summaryrefslogtreecommitdiff
path: root/kernel/datamods
diff options
context:
space:
mode:
authortzafrir <tzafrir@5390a7c7-147a-4af0-8ec9-7488f05a26cb>2008-02-04 23:00:48 +0000
committertzafrir <tzafrir@5390a7c7-147a-4af0-8ec9-7488f05a26cb>2008-02-04 23:00:48 +0000
commit7e068801fbf82413ac0a5e63e586c268bd457434 (patch)
tree9b61e9a4e07167e0b7d347e4336245724befa29c /kernel/datamods
parent29daeebad888269fa0ee2ca7e54e238c8498ca2d (diff)
Move kernel stuff to under kernel/
(merged branch /zaptel/team/tzafrir/move ) Closes issue #7117. git-svn-id: http://svn.digium.com/svn/zaptel/branches/1.4@3793 5390a7c7-147a-4af0-8ec9-7488f05a26cb
Diffstat (limited to 'kernel/datamods')
-rw-r--r--kernel/datamods/Makefile32
-rw-r--r--kernel/datamods/hdlc_cisco.c335
-rw-r--r--kernel/datamods/hdlc_fr.c1273
-rw-r--r--kernel/datamods/hdlc_generic.c355
-rw-r--r--kernel/datamods/hdlc_ppp.c114
-rw-r--r--kernel/datamods/hdlc_raw.c89
-rw-r--r--kernel/datamods/hdlc_raw_eth.c107
-rw-r--r--kernel/datamods/syncppp.c1485
8 files changed, 3790 insertions, 0 deletions
diff --git a/kernel/datamods/Makefile b/kernel/datamods/Makefile
new file mode 100644
index 0000000..310073e
--- /dev/null
+++ b/kernel/datamods/Makefile
@@ -0,0 +1,32 @@
+.EXPORT_ALL_VARIABLES:
+MODULES= \
+ hdlc_cisco hdlc_generic hdlc_raw syncppp \
+ hdlc_fr hdlc_ppp hdlc_raw_eth
+
+
+PWD=$(shell pwd)
+
+MODULESO:=$(MODULES:%=%.o)
+MODULESKO:=$(MODULES:%=%.ko)
+KMAKE = $(MAKE) -C $(KSRC) SUBDIRS=$(PWD)
+KMAKE_INST = $(KMAKE) \
+ INSTALL_MOD_PATH=$(INSTALL_PREFIX) INSTALL_MOD_DIR=misc modules_install
+
+obj-m := $(MODULESO)
+#obj-m:=hdlc_raw.o hdlc_cisco.o
+#obj-m := hdlc_cisco.o hdlc_cisco.mod.o hdlc_fr.o hdlc_generic.o hdlc_ppp.o hdlc_raw.o hdlc_raw_eth.o hdlc_raw.mod.o hdlc_x25.o
+
+all:
+ @echo "You don't want to do make here. Do it from up above"
+
+clean:
+ $(KMAKE) clean
+
+install: $(MODULESKO)
+ $(KMAKE_INST)
+
+datamods:
+ @echo "To build: $(obj-m)"
+ @echo $(KSRC)
+ @if [ -z "$(KSRC)" -o ! -d "$(KSRC)" ]; then echo "You do not appear to have the sources for the $(KVERS) kernel installed."; exit 1 ; fi
+ $(KMAKE) modules
diff --git a/kernel/datamods/hdlc_cisco.c b/kernel/datamods/hdlc_cisco.c
new file mode 100644
index 0000000..1fd0466
--- /dev/null
+++ b/kernel/datamods/hdlc_cisco.c
@@ -0,0 +1,335 @@
+/*
+ * Generic HDLC support routines for Linux
+ * Cisco HDLC support
+ *
+ * Copyright (C) 2000 - 2003 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/pkt_sched.h>
+#include <linux/inetdevice.h>
+#include <linux/lapb.h>
+#include <linux/rtnetlink.h>
+#include <linux/hdlc.h>
+
+#undef DEBUG_HARD_HEADER
+
+#define CISCO_MULTICAST 0x8F /* Cisco multicast address */
+#define CISCO_UNICAST 0x0F /* Cisco unicast address */
+#define CISCO_KEEPALIVE 0x8035 /* Cisco keepalive protocol */
+#define CISCO_SYS_INFO 0x2000 /* Cisco interface/system info */
+#define CISCO_ADDR_REQ 0 /* Cisco address request */
+#define CISCO_ADDR_REPLY 1 /* Cisco address reply */
+#define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */
+
+
+static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev,
+ u16 type, void *daddr, void *saddr,
+ unsigned int len)
+{
+ hdlc_header *data;
+#ifdef DEBUG_HARD_HEADER
+ printk(KERN_DEBUG "%s: cisco_hard_header called\n", dev->name);
+#endif
+
+ skb_push(skb, sizeof(hdlc_header));
+ data = (hdlc_header*)skb->data;
+ if (type == CISCO_KEEPALIVE)
+ data->address = CISCO_MULTICAST;
+ else
+ data->address = CISCO_UNICAST;
+ data->control = 0;
+ data->protocol = htons(type);
+
+ return sizeof(hdlc_header);
+}
+
+
+
+static void cisco_keepalive_send(struct net_device *dev, u32 type,
+ u32 par1, u32 par2)
+{
+ struct sk_buff *skb;
+ cisco_packet *data;
+
+ skb = dev_alloc_skb(sizeof(hdlc_header) + sizeof(cisco_packet));
+ if (!skb) {
+ printk(KERN_WARNING
+ "%s: Memory squeeze on cisco_keepalive_send()\n",
+ dev->name);
+ return;
+ }
+ skb_reserve(skb, 4);
+ cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
+ data = (cisco_packet*)(skb->data + 4);
+
+ data->type = htonl(type);
+ data->par1 = htonl(par1);
+ data->par2 = htonl(par2);
+ data->rel = 0xFFFF;
+ /* we will need do_div here if 1000 % HZ != 0 */
+ data->time = htonl((jiffies - INITIAL_JIFFIES) * (1000 / HZ));
+
+ skb_put(skb, sizeof(cisco_packet));
+ skb->priority = TC_PRIO_CONTROL;
+ skb->dev = dev;
+ skb->nh.raw = skb->data;
+
+ dev_queue_xmit(skb);
+}
+
+
+
+static __be16 cisco_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+ hdlc_header *data = (hdlc_header*)skb->data;
+
+ if (skb->len < sizeof(hdlc_header))
+ return __constant_htons(ETH_P_HDLC);
+
+ if (data->address != CISCO_MULTICAST &&
+ data->address != CISCO_UNICAST)
+ return __constant_htons(ETH_P_HDLC);
+
+ switch(data->protocol) {
+ case __constant_htons(ETH_P_IP):
+ case __constant_htons(ETH_P_IPX):
+ case __constant_htons(ETH_P_IPV6):
+ skb_pull(skb, sizeof(hdlc_header));
+ return data->protocol;
+ default:
+ return __constant_htons(ETH_P_HDLC);
+ }
+}
+
+
+static int cisco_rx(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ hdlc_header *data = (hdlc_header*)skb->data;
+ cisco_packet *cisco_data;
+ struct in_device *in_dev;
+ u32 addr, mask;
+
+ if (skb->len < sizeof(hdlc_header))
+ goto rx_error;
+
+ if (data->address != CISCO_MULTICAST &&
+ data->address != CISCO_UNICAST)
+ goto rx_error;
+
+ switch(ntohs(data->protocol)) {
+ case CISCO_SYS_INFO:
+ /* Packet is not needed, drop it. */
+ dev_kfree_skb_any(skb);
+ return NET_RX_SUCCESS;
+
+ case CISCO_KEEPALIVE:
+ if (skb->len != sizeof(hdlc_header) + CISCO_PACKET_LEN &&
+ skb->len != sizeof(hdlc_header) + CISCO_BIG_PACKET_LEN) {
+ printk(KERN_INFO "%s: Invalid length of Cisco "
+ "control packet (%d bytes)\n",
+ dev->name, skb->len);
+ goto rx_error;
+ }
+
+ cisco_data = (cisco_packet*)(skb->data + sizeof(hdlc_header));
+
+ switch(ntohl (cisco_data->type)) {
+ case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */
+ in_dev = dev->ip_ptr;
+ addr = 0;
+ mask = ~0; /* is the mask correct? */
+
+ if (in_dev != NULL) {
+ struct in_ifaddr **ifap = &in_dev->ifa_list;
+
+ while (*ifap != NULL) {
+ if (strcmp(dev->name,
+ (*ifap)->ifa_label) == 0) {
+ addr = (*ifap)->ifa_local;
+ mask = (*ifap)->ifa_mask;
+ break;
+ }
+ ifap = &(*ifap)->ifa_next;
+ }
+
+ cisco_keepalive_send(dev, CISCO_ADDR_REPLY,
+ addr, mask);
+ }
+ dev_kfree_skb_any(skb);
+ return NET_RX_SUCCESS;
+
+ case CISCO_ADDR_REPLY:
+ printk(KERN_INFO "%s: Unexpected Cisco IP address "
+ "reply\n", dev->name);
+ goto rx_error;
+
+ case CISCO_KEEPALIVE_REQ:
+ hdlc->state.cisco.rxseq = ntohl(cisco_data->par1);
+ if (hdlc->state.cisco.request_sent &&
+ ntohl(cisco_data->par2)==hdlc->state.cisco.txseq) {
+ hdlc->state.cisco.last_poll = jiffies;
+ if (!hdlc->state.cisco.up) {
+ u32 sec, min, hrs, days;
+ sec = ntohl(cisco_data->time) / 1000;
+ min = sec / 60; sec -= min * 60;
+ hrs = min / 60; min -= hrs * 60;
+ days = hrs / 24; hrs -= days * 24;
+ printk(KERN_INFO "%s: Link up (peer "
+ "uptime %ud%uh%um%us)\n",
+ dev->name, days, hrs,
+ min, sec);
+#if 0
+ netif_carrier_on(dev);
+#endif
+ hdlc->state.cisco.up = 1;
+ }
+ }
+
+ dev_kfree_skb_any(skb);
+ return NET_RX_SUCCESS;
+ } /* switch(keepalive type) */
+ } /* switch(protocol) */
+
+ printk(KERN_INFO "%s: Unsupported protocol %x\n", dev->name,
+ data->protocol);
+ dev_kfree_skb_any(skb);
+ return NET_RX_DROP;
+
+ rx_error:
+ hdlc->stats.rx_errors++; /* Mark error */
+ dev_kfree_skb_any(skb);
+ return NET_RX_DROP;
+}
+
+
+
+static void cisco_timer(unsigned long arg)
+{
+ struct net_device *dev = (struct net_device *)arg;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+
+ if (hdlc->state.cisco.up &&
+ time_after(jiffies, hdlc->state.cisco.last_poll +
+ hdlc->state.cisco.settings.timeout * HZ)) {
+ hdlc->state.cisco.up = 0;
+ printk(KERN_INFO "%s: Link down\n", dev->name);
+#if 0
+ netif_carrier_off(dev);
+#endif
+ }
+
+ cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ,
+ ++hdlc->state.cisco.txseq,
+ hdlc->state.cisco.rxseq);
+ hdlc->state.cisco.request_sent = 1;
+ hdlc->state.cisco.timer.expires = jiffies +
+ hdlc->state.cisco.settings.interval * HZ;
+ hdlc->state.cisco.timer.function = cisco_timer;
+ hdlc->state.cisco.timer.data = arg;
+ add_timer(&hdlc->state.cisco.timer);
+}
+
+
+
+static void cisco_start(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ hdlc->state.cisco.up = 0;
+ hdlc->state.cisco.request_sent = 0;
+ hdlc->state.cisco.txseq = hdlc->state.cisco.rxseq = 0;
+
+ init_timer(&hdlc->state.cisco.timer);
+ hdlc->state.cisco.timer.expires = jiffies + HZ; /*First poll after 1s*/
+ hdlc->state.cisco.timer.function = cisco_timer;
+ hdlc->state.cisco.timer.data = (unsigned long)dev;
+ add_timer(&hdlc->state.cisco.timer);
+}
+
+
+
+static void cisco_stop(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ del_timer_sync(&hdlc->state.cisco.timer);
+#if 0
+ if (netif_carrier_ok(dev))
+ netif_carrier_off(dev);
+#endif
+ hdlc->state.cisco.up = 0;
+ hdlc->state.cisco.request_sent = 0;
+}
+
+
+
+int hdlc_cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+ cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco;
+ const size_t size = sizeof(cisco_proto);
+ cisco_proto new_settings;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ int result;
+
+ switch (ifr->ifr_settings.type) {
+ case IF_GET_PROTO:
+ ifr->ifr_settings.type = IF_PROTO_CISCO;
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+ if (copy_to_user(cisco_s, &hdlc->state.cisco.settings, size))
+ return -EFAULT;
+ return 0;
+
+ case IF_PROTO_CISCO:
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if(dev->flags & IFF_UP)
+ return -EBUSY;
+
+ if (copy_from_user(&new_settings, cisco_s, size))
+ return -EFAULT;
+
+ if (new_settings.interval < 1 ||
+ new_settings.timeout < 2)
+ return -EINVAL;
+
+ result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
+
+ if (result)
+ return result;
+
+ hdlc_proto_detach(hdlc);
+ memcpy(&hdlc->state.cisco.settings, &new_settings, size);
+ memset(&hdlc->proto, 0, sizeof(hdlc->proto));
+
+ hdlc->proto.start = cisco_start;
+ hdlc->proto.stop = cisco_stop;
+ hdlc->proto.netif_rx = cisco_rx;
+ hdlc->proto.type_trans = cisco_type_trans;
+ hdlc->proto.id = IF_PROTO_CISCO;
+ dev->hard_start_xmit = hdlc->xmit;
+ dev->hard_header = cisco_hard_header;
+ dev->hard_header_cache = NULL;
+ dev->type = ARPHRD_CISCO;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->addr_len = 0;
+ return 0;
+ }
+
+ return -EINVAL;
+}
diff --git a/kernel/datamods/hdlc_fr.c b/kernel/datamods/hdlc_fr.c
new file mode 100644
index 0000000..523afe1
--- /dev/null
+++ b/kernel/datamods/hdlc_fr.c
@@ -0,0 +1,1273 @@
+/*
+ * Generic HDLC support routines for Linux
+ * Frame Relay support
+ *
+ * Copyright (C) 1999 - 2005 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+
+ Theory of PVC state
+
+ DCE mode:
+
+ (exist,new) -> 0,0 when "PVC create" or if "link unreliable"
+ 0,x -> 1,1 if "link reliable" when sending FULL STATUS
+ 1,1 -> 1,0 if received FULL STATUS ACK
+
+ (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create"
+ -> 1 when "PVC up" and (exist,new) = 1,0
+
+ DTE mode:
+ (exist,new,active) = FULL STATUS if "link reliable"
+ = 0, 0, 0 if "link unreliable"
+ No LMI:
+ active = open and "link reliable"
+ exist = new = not used
+
+ CCITT LMI: ITU-T Q.933 Annex A
+ ANSI LMI: ANSI T1.617 Annex D
+ CISCO LMI: the original, aka "Gang of Four" LMI
+
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/pkt_sched.h>
+#include <linux/random.h>
+#include <linux/inetdevice.h>
+#include <linux/lapb.h>
+#include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
+#include <linux/hdlc.h>
+
+#undef DEBUG_PKT
+#undef DEBUG_ECN
+#undef DEBUG_LINK
+
+#define FR_UI 0x03
+#define FR_PAD 0x00
+
+#define NLPID_IP 0xCC
+#define NLPID_IPV6 0x8E
+#define NLPID_SNAP 0x80
+#define NLPID_PAD 0x00
+#define NLPID_CCITT_ANSI_LMI 0x08
+#define NLPID_CISCO_LMI 0x09
+
+
+#define LMI_CCITT_ANSI_DLCI 0 /* LMI DLCI */
+#define LMI_CISCO_DLCI 1023
+
+#define LMI_CALLREF 0x00 /* Call Reference */
+#define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI locking shift */
+#define LMI_ANSI_CISCO_REPTYPE 0x01 /* report type */
+#define LMI_CCITT_REPTYPE 0x51
+#define LMI_ANSI_CISCO_ALIVE 0x03 /* keep alive */
+#define LMI_CCITT_ALIVE 0x53
+#define LMI_ANSI_CISCO_PVCSTAT 0x07 /* PVC status */
+#define LMI_CCITT_PVCSTAT 0x57
+
+#define LMI_FULLREP 0x00 /* full report */
+#define LMI_INTEGRITY 0x01 /* link integrity report */
+#define LMI_SINGLE 0x02 /* single PVC report */
+
+#define LMI_STATUS_ENQUIRY 0x75
+#define LMI_STATUS 0x7D /* reply */
+
+#define LMI_REPT_LEN 1 /* report type element length */
+#define LMI_INTEG_LEN 2 /* link integrity element length */
+
+#define LMI_CCITT_CISCO_LENGTH 13 /* LMI frame lengths */
+#define LMI_ANSI_LENGTH 14
+
+
+typedef struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned ea1: 1;
+ unsigned cr: 1;
+ unsigned dlcih: 6;
+
+ unsigned ea2: 1;
+ unsigned de: 1;
+ unsigned becn: 1;
+ unsigned fecn: 1;
+ unsigned dlcil: 4;
+#else
+ unsigned dlcih: 6;
+ unsigned cr: 1;
+ unsigned ea1: 1;
+
+ unsigned dlcil: 4;
+ unsigned fecn: 1;
+ unsigned becn: 1;
+ unsigned de: 1;
+ unsigned ea2: 1;
+#endif
+}__attribute__ ((packed)) fr_hdr;
+
+
+static inline u16 q922_to_dlci(u8 *hdr)
+{
+ return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4);
+}
+
+
+
+static inline void dlci_to_q922(u8 *hdr, u16 dlci)
+{
+ hdr[0] = (dlci >> 2) & 0xFC;
+ hdr[1] = ((dlci << 4) & 0xF0) | 0x01;
+}
+
+
+
+static inline pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci)
+{
+ pvc_device *pvc = hdlc->state.fr.first_pvc;
+
+ while (pvc) {
+ if (pvc->dlci == dlci)
+ return pvc;
+ if (pvc->dlci > dlci)
+ return NULL; /* the listed is sorted */
+ pvc = pvc->next;
+ }
+
+ return NULL;
+}
+
+
+static inline pvc_device* add_pvc(struct net_device *dev, u16 dlci)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ pvc_device *pvc, **pvc_p = &hdlc->state.fr.first_pvc;
+
+ while (*pvc_p) {
+ if ((*pvc_p)->dlci == dlci)
+ return *pvc_p;
+ if ((*pvc_p)->dlci > dlci)
+ break; /* the list is sorted */
+ pvc_p = &(*pvc_p)->next;
+ }
+
+ pvc = kmalloc(sizeof(pvc_device), GFP_ATOMIC);
+ if (!pvc)
+ return NULL;
+
+ memset(pvc, 0, sizeof(pvc_device));
+ pvc->dlci = dlci;
+ pvc->master = dev;
+ pvc->next = *pvc_p; /* Put it in the chain */
+ *pvc_p = pvc;
+ return pvc;
+}
+
+
+static inline int pvc_is_used(pvc_device *pvc)
+{
+ return pvc->main != NULL || pvc->ether != NULL;
+}
+
+
+static inline void pvc_carrier(int on, pvc_device *pvc)
+{
+ if (on) {
+ if (pvc->main)
+ if (!netif_carrier_ok(pvc->main))
+ netif_carrier_on(pvc->main);
+ if (pvc->ether)
+ if (!netif_carrier_ok(pvc->ether))
+ netif_carrier_on(pvc->ether);
+ } else {
+ if (pvc->main)
+ if (netif_carrier_ok(pvc->main))
+ netif_carrier_off(pvc->main);
+ if (pvc->ether)
+ if (netif_carrier_ok(pvc->ether))
+ netif_carrier_off(pvc->ether);
+ }
+}
+
+
+static inline void delete_unused_pvcs(hdlc_device *hdlc)
+{
+ pvc_device **pvc_p = &hdlc->state.fr.first_pvc;
+
+ while (*pvc_p) {
+ if (!pvc_is_used(*pvc_p)) {
+ pvc_device *pvc = *pvc_p;
+ *pvc_p = pvc->next;
+ kfree(pvc);
+ continue;
+ }
+ pvc_p = &(*pvc_p)->next;
+ }
+}
+
+
+static inline struct net_device** get_dev_p(pvc_device *pvc, int type)
+{
+ if (type == ARPHRD_ETHER)
+ return &pvc->ether;
+ else
+ return &pvc->main;
+}
+
+
+static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
+{
+ u16 head_len;
+ struct sk_buff *skb = *skb_p;
+
+ switch (skb->protocol) {
+ case __constant_ntohs(NLPID_CCITT_ANSI_LMI):
+ head_len = 4;
+ skb_push(skb, head_len);
+ skb->data[3] = NLPID_CCITT_ANSI_LMI;
+ break;
+
+ case __constant_ntohs(NLPID_CISCO_LMI):
+ head_len = 4;
+ skb_push(skb, head_len);
+ skb->data[3] = NLPID_CISCO_LMI;
+ break;
+
+ case __constant_ntohs(ETH_P_IP):
+ head_len = 4;
+ skb_push(skb, head_len);
+ skb->data[3] = NLPID_IP;
+ break;
+
+ case __constant_ntohs(ETH_P_IPV6):
+ head_len = 4;
+ skb_push(skb, head_len);
+ skb->data[3] = NLPID_IPV6;
+ break;
+
+ case __constant_ntohs(ETH_P_802_3):
+ head_len = 10;
+ if (skb_headroom(skb) < head_len) {
+ struct sk_buff *skb2 = skb_realloc_headroom(skb,
+ head_len);
+ if (!skb2)
+ return -ENOBUFS;
+ dev_kfree_skb(skb);
+ skb = *skb_p = skb2;
+ }
+ skb_push(skb, head_len);
+ skb->data[3] = FR_PAD;
+ skb->data[4] = NLPID_SNAP;
+ skb->data[5] = FR_PAD;
+ skb->data[6] = 0x80;
+ skb->data[7] = 0xC2;
+ skb->data[8] = 0x00;
+ skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */
+ break;
+
+ default:
+ head_len = 10;
+ skb_push(skb, head_len);
+ skb->data[3] = FR_PAD;
+ skb->data[4] = NLPID_SNAP;
+ skb->data[5] = FR_PAD;
+ skb->data[6] = FR_PAD;
+ skb->data[7] = FR_PAD;
+ *(u16*)(skb->data + 8) = skb->protocol;
+ }
+
+ dlci_to_q922(skb->data, dlci);
+ skb->data[2] = FR_UI;
+ return 0;
+}
+
+
+
+static int pvc_open(struct net_device *dev)
+{
+ pvc_device *pvc = dev_to_pvc(dev);
+
+ if ((pvc->master->flags & IFF_UP) == 0)
+ return -EIO; /* Master must be UP in order to activate PVC */
+
+ if (pvc->open_count++ == 0) {
+ hdlc_device *hdlc = dev_to_hdlc(pvc->master);
+ if (hdlc->state.fr.settings.lmi == LMI_NONE)
+ pvc->state.active = hdlc->carrier;
+
+ pvc_carrier(pvc->state.active, pvc);
+ hdlc->state.fr.dce_changed = 1;
+ }
+ return 0;
+}
+
+
+
+static int pvc_close(struct net_device *dev)
+{
+ pvc_device *pvc = dev_to_pvc(dev);
+
+ if (--pvc->open_count == 0) {
+ hdlc_device *hdlc = dev_to_hdlc(pvc->master);
+ if (hdlc->state.fr.settings.lmi == LMI_NONE)
+ pvc->state.active = 0;
+
+ if (hdlc->state.fr.settings.dce) {
+ hdlc->state.fr.dce_changed = 1;
+ pvc->state.active = 0;
+ }
+ }
+ return 0;
+}
+
+
+
+static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ pvc_device *pvc = dev_to_pvc(dev);
+ fr_proto_pvc_info info;
+
+ if (ifr->ifr_settings.type == IF_GET_PROTO) {
+ if (dev->type == ARPHRD_ETHER)
+ ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC;
+ else
+ ifr->ifr_settings.type = IF_PROTO_FR_PVC;
+
+ if (ifr->ifr_settings.size < sizeof(info)) {
+ /* data size wanted */
+ ifr->ifr_settings.size = sizeof(info);
+ return -ENOBUFS;
+ }
+
+ info.dlci = pvc->dlci;
+ memcpy(info.master, pvc->master->name, IFNAMSIZ);
+ if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info,
+ &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+
+static inline struct net_device_stats *pvc_get_stats(struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
+
+
+static int pvc_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ pvc_device *pvc = dev_to_pvc(dev);
+ struct net_device_stats *stats = pvc_get_stats(dev);
+
+ if (pvc->state.active) {
+ if (dev->type == ARPHRD_ETHER) {
+ int pad = ETH_ZLEN - skb->len;
+ if (pad > 0) { /* Pad the frame with zeros */
+ int len = skb->len;
+ if (skb_tailroom(skb) < pad)
+ if (pskb_expand_head(skb, 0, pad,
+ GFP_ATOMIC)) {
+ stats->tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+ }
+ skb_put(skb, pad);
+ memset(skb->data + len, 0, pad);
+ }
+ skb->protocol = __constant_htons(ETH_P_802_3);
+ }
+ if (!fr_hard_header(&skb, pvc->dlci)) {
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+ if (pvc->state.fecn) /* TX Congestion counter */
+ stats->tx_compressed++;
+ skb->dev = pvc->master;
+ dev_queue_xmit(skb);
+ return 0;
+ }
+ }
+
+ stats->tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+
+
+static int pvc_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+
+
+static inline void fr_log_dlci_active(pvc_device *pvc)
+{
+ printk(KERN_INFO "%s: DLCI %d [%s%s%s]%s %s\n",
+ pvc->master->name,
+ pvc->dlci,
+ pvc->main ? pvc->main->name : "",
+ pvc->main && pvc->ether ? " " : "",
+ pvc->ether ? pvc->ether->name : "",
+ pvc->state.new ? " new" : "",
+ !pvc->state.exist ? "deleted" :
+ pvc->state.active ? "active" : "inactive");
+}
+
+
+
+static inline u8 fr_lmi_nextseq(u8 x)
+{
+ x++;
+ return x ? x : 1;
+}
+
+
+
+static void fr_lmi_send(struct net_device *dev, int fullrep)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ struct sk_buff *skb;
+ pvc_device *pvc = hdlc->state.fr.first_pvc;
+ int lmi = hdlc->state.fr.settings.lmi;
+ int dce = hdlc->state.fr.settings.dce;
+ int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH;
+ int stat_len = (lmi == LMI_CISCO) ? 6 : 3;
+ u8 *data;
+ int i = 0;
+
+ if (dce && fullrep) {
+ len += hdlc->state.fr.dce_pvc_count * (2 + stat_len);
+ if (len > HDLC_MAX_MRU) {
+ printk(KERN_WARNING "%s: Too many PVCs while sending "
+ "LMI full report\n", dev->name);
+ return;
+ }
+ }
+
+ skb = dev_alloc_skb(len);
+ if (!skb) {
+ printk(KERN_WARNING "%s: Memory squeeze on fr_lmi_send()\n",
+ dev->name);
+ return;
+ }
+ memset(skb->data, 0, len);
+ skb_reserve(skb, 4);
+ if (lmi == LMI_CISCO) {
+ skb->protocol = __constant_htons(NLPID_CISCO_LMI);
+ fr_hard_header(&skb, LMI_CISCO_DLCI);
+ } else {
+ skb->protocol = __constant_htons(NLPID_CCITT_ANSI_LMI);
+ fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
+ }
+ data = skb->tail;
+ data[i++] = LMI_CALLREF;
+ data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
+ if (lmi == LMI_ANSI)
+ data[i++] = LMI_ANSI_LOCKSHIFT;
+ data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
+ LMI_ANSI_CISCO_REPTYPE;
+ data[i++] = LMI_REPT_LEN;
+ data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;
+ data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE;
+ data[i++] = LMI_INTEG_LEN;
+ data[i++] = hdlc->state.fr.txseq =fr_lmi_nextseq(hdlc->state.fr.txseq);
+ data[i++] = hdlc->state.fr.rxseq;
+
+ if (dce && fullrep) {
+ while (pvc) {
+ data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
+ LMI_ANSI_CISCO_PVCSTAT;
+ data[i++] = stat_len;
+
+ /* LMI start/restart */
+ if (hdlc->state.fr.reliable && !pvc->state.exist) {
+ pvc->state.exist = pvc->state.new = 1;
+ fr_log_dlci_active(pvc);
+ }
+
+ /* ifconfig PVC up */
+ if (pvc->open_count && !pvc->state.active &&
+ pvc->state.exist && !pvc->state.new) {
+ pvc_carrier(1, pvc);
+ pvc->state.active = 1;
+ fr_log_dlci_active(pvc);
+ }
+
+ if (lmi == LMI_CISCO) {
+ data[i] = pvc->dlci >> 8;
+ data[i + 1] = pvc->dlci & 0xFF;
+ } else {
+ data[i] = (pvc->dlci >> 4) & 0x3F;
+ data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80;
+ data[i + 2] = 0x80;
+ }
+
+ if (pvc->state.new)
+ data[i + 2] |= 0x08;
+ else if (pvc->state.active)
+ data[i + 2] |= 0x02;
+
+ i += stat_len;
+ pvc = pvc->next;
+ }
+ }
+
+ skb_put(skb, i);
+ skb->priority = TC_PRIO_CONTROL;
+ skb->dev = dev;
+ skb->nh.raw = skb->data;
+
+ dev_queue_xmit(skb);
+}
+
+
+
+static void fr_set_link_state(int reliable, struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ pvc_device *pvc = hdlc->state.fr.first_pvc;
+
+ hdlc->state.fr.reliable = reliable;
+ if (reliable) {
+#if 0
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+#endif
+
+ hdlc->state.fr.n391cnt = 0; /* Request full status */
+ hdlc->state.fr.dce_changed = 1;
+
+ if (hdlc->state.fr.settings.lmi == LMI_NONE) {
+ while (pvc) { /* Activate all PVCs */
+ pvc_carrier(1, pvc);
+ pvc->state.exist = pvc->state.active = 1;
+ pvc->state.new = 0;
+ pvc = pvc->next;
+ }
+ }
+ } else {
+#if 0
+ if (netif_carrier_ok(dev))
+ netif_carrier_off(dev);
+#endif
+
+ while (pvc) { /* Deactivate all PVCs */
+ pvc_carrier(0, pvc);
+ pvc->state.exist = pvc->state.active = 0;
+ pvc->state.new = 0;
+ if (!hdlc->state.fr.settings.dce)
+ pvc->state.bandwidth = 0;
+ pvc = pvc->next;
+ }
+ }
+}
+
+
+
+static void fr_timer(unsigned long arg)
+{
+ struct net_device *dev = (struct net_device *)arg;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ int i, cnt = 0, reliable;
+ u32 list;
+
+ if (hdlc->state.fr.settings.dce) {
+ reliable = hdlc->state.fr.request &&
+ time_before(jiffies, hdlc->state.fr.last_poll +
+ hdlc->state.fr.settings.t392 * HZ);
+ hdlc->state.fr.request = 0;
+ } else {
+ hdlc->state.fr.last_errors <<= 1; /* Shift the list */
+ if (hdlc->state.fr.request) {
+ if (hdlc->state.fr.reliable)
+ printk(KERN_INFO "%s: No LMI status reply "
+ "received\n", dev->name);
+ hdlc->state.fr.last_errors |= 1;
+ }
+
+ list = hdlc->state.fr.last_errors;
+ for (i = 0; i < hdlc->state.fr.settings.n393; i++, list >>= 1)
+ cnt += (list & 1); /* errors count */
+
+ reliable = (cnt < hdlc->state.fr.settings.n392);
+ }
+
+ if (hdlc->state.fr.reliable != reliable) {
+ printk(KERN_INFO "%s: Link %sreliable\n", dev->name,
+ reliable ? "" : "un");
+ fr_set_link_state(reliable, dev);
+ }
+
+ if (hdlc->state.fr.settings.dce)
+ hdlc->state.fr.timer.expires = jiffies +
+ hdlc->state.fr.settings.t392 * HZ;
+ else {
+ if (hdlc->state.fr.n391cnt)
+ hdlc->state.fr.n391cnt--;
+
+ fr_lmi_send(dev, hdlc->state.fr.n391cnt == 0);
+
+ hdlc->state.fr.last_poll = jiffies;
+ hdlc->state.fr.request = 1;
+ hdlc->state.fr.timer.expires = jiffies +
+ hdlc->state.fr.settings.t391 * HZ;
+ }
+
+ hdlc->state.fr.timer.function = fr_timer;
+ hdlc->state.fr.timer.data = arg;
+ add_timer(&hdlc->state.fr.timer);
+}
+
+
+
+static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ pvc_device *pvc;
+ u8 rxseq, txseq;
+ int lmi = hdlc->state.fr.settings.lmi;
+ int dce = hdlc->state.fr.settings.dce;
+ int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i;
+
+ if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH :
+ LMI_CCITT_CISCO_LENGTH)) {
+ printk(KERN_INFO "%s: Short LMI frame\n", dev->name);
+ return 1;
+ }
+
+ if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI :
+ NLPID_CCITT_ANSI_LMI)) {
+ printk(KERN_INFO "%s: Received non-LMI frame with LMI"
+ " DLCI\n", dev->name);
+ return 1;
+ }
+
+ if (skb->data[4] != LMI_CALLREF) {
+ printk(KERN_INFO "%s: Invalid LMI Call reference (0x%02X)\n",
+ dev->name, skb->data[4]);
+ return 1;
+ }
+
+ if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) {
+ printk(KERN_INFO "%s: Invalid LMI Message type (0x%02X)\n",
+ dev->name, skb->data[5]);
+ return 1;
+ }
+
+ if (lmi == LMI_ANSI) {
+ if (skb->data[6] != LMI_ANSI_LOCKSHIFT) {
+ printk(KERN_INFO "%s: Not ANSI locking shift in LMI"
+ " message (0x%02X)\n", dev->name, skb->data[6]);
+ return 1;
+ }
+ i = 7;
+ } else
+ i = 6;
+
+ if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
+ LMI_ANSI_CISCO_REPTYPE)) {
+ printk(KERN_INFO "%s: Not an LMI Report type IE (0x%02X)\n",
+ dev->name, skb->data[i]);
+ return 1;
+ }
+
+ if (skb->data[++i] != LMI_REPT_LEN) {
+ printk(KERN_INFO "%s: Invalid LMI Report type IE length"
+ " (%u)\n", dev->name, skb->data[i]);
+ return 1;
+ }
+
+ reptype = skb->data[++i];
+ if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) {
+ printk(KERN_INFO "%s: Unsupported LMI Report type (0x%02X)\n",
+ dev->name, reptype);
+ return 1;
+ }
+
+ if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE :
+ LMI_ANSI_CISCO_ALIVE)) {
+ printk(KERN_INFO "%s: Not an LMI Link integrity verification"
+ " IE (0x%02X)\n", dev->name, skb->data[i]);
+ return 1;
+ }
+
+ if (skb->data[++i] != LMI_INTEG_LEN) {
+ printk(KERN_INFO "%s: Invalid LMI Link integrity verification"
+ " IE length (%u)\n", dev->name, skb->data[i]);
+ return 1;
+ }
+ i++;
+
+ hdlc->state.fr.rxseq = skb->data[i++]; /* TX sequence from peer */
+ rxseq = skb->data[i++]; /* Should confirm our sequence */
+
+ txseq = hdlc->state.fr.txseq;
+
+ if (dce)
+ hdlc->state.fr.last_poll = jiffies;
+
+ error = 0;
+ if (!hdlc->state.fr.reliable)
+ error = 1;
+
+ if (rxseq == 0 || rxseq != txseq) {
+ hdlc->state.fr.n391cnt = 0; /* Ask for full report next time */
+ error = 1;
+ }
+
+ if (dce) {
+ if (hdlc->state.fr.fullrep_sent && !error) {
+/* Stop sending full report - the last one has been confirmed by DTE */
+ hdlc->state.fr.fullrep_sent = 0;
+ pvc = hdlc->state.fr.first_pvc;
+ while (pvc) {
+ if (pvc->state.new) {
+ pvc->state.new = 0;
+
+/* Tell DTE that new PVC is now active */
+ hdlc->state.fr.dce_changed = 1;
+ }
+ pvc = pvc->next;
+ }
+ }
+
+ if (hdlc->state.fr.dce_changed) {
+ reptype = LMI_FULLREP;
+ hdlc->state.fr.fullrep_sent = 1;
+ hdlc->state.fr.dce_changed = 0;
+ }
+
+ hdlc->state.fr.request = 1; /* got request */
+ fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0);
+ return 0;
+ }
+
+ /* DTE */
+
+ hdlc->state.fr.request = 0; /* got response, no request pending */
+
+ if (error)
+ return 0;
+
+ if (reptype != LMI_FULLREP)
+ return 0;
+
+ pvc = hdlc->state.fr.first_pvc;
+
+ while (pvc) {
+ pvc->state.deleted = 1;
+ pvc = pvc->next;
+ }
+
+ no_ram = 0;
+ while (skb->len >= i + 2 + stat_len) {
+ u16 dlci;
+ u32 bw;
+ unsigned int active, new;
+
+ if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
+ LMI_ANSI_CISCO_PVCSTAT)) {
+ printk(KERN_INFO "%s: Not an LMI PVC status IE"
+ " (0x%02X)\n", dev->name, skb->data[i]);
+ return 1;
+ }
+
+ if (skb->data[++i] != stat_len) {
+ printk(KERN_INFO "%s: Invalid LMI PVC status IE length"
+ " (%u)\n", dev->name, skb->data[i]);
+ return 1;
+ }
+ i++;
+
+ new = !! (skb->data[i + 2] & 0x08);
+ active = !! (skb->data[i + 2] & 0x02);
+ if (lmi == LMI_CISCO) {
+ dlci = (skb->data[i] << 8) | skb->data[i + 1];
+ bw = (skb->data[i + 3] << 16) |
+ (skb->data[i + 4] << 8) |
+ (skb->data[i + 5]);
+ } else {
+ dlci = ((skb->data[i] & 0x3F) << 4) |
+ ((skb->data[i + 1] & 0x78) >> 3);
+ bw = 0;
+ }
+
+ pvc = add_pvc(dev, dlci);
+
+ if (!pvc && !no_ram) {
+ printk(KERN_WARNING
+ "%s: Memory squeeze on fr_lmi_recv()\n",
+ dev->name);
+ no_ram = 1;
+ }
+
+ if (pvc) {
+ pvc->state.exist = 1;
+ pvc->state.deleted = 0;
+ if (active != pvc->state.active ||
+ new != pvc->state.new ||
+ bw != pvc->state.bandwidth ||
+ !pvc->state.exist) {
+ pvc->state.new = new;
+ pvc->state.active = active;
+ pvc->state.bandwidth = bw;
+ pvc_carrier(active, pvc);
+ fr_log_dlci_active(pvc);
+ }
+ }
+
+ i += stat_len;
+ }
+
+ pvc = hdlc->state.fr.first_pvc;
+
+ while (pvc) {
+ if (pvc->state.deleted && pvc->state.exist) {
+ pvc_carrier(0, pvc);
+ pvc->state.active = pvc->state.new = 0;
+ pvc->state.exist = 0;
+ pvc->state.bandwidth = 0;
+ fr_log_dlci_active(pvc);
+ }
+ pvc = pvc->next;
+ }
+
+ /* Next full report after N391 polls */
+ hdlc->state.fr.n391cnt = hdlc->state.fr.settings.n391;
+
+ return 0;
+}
+
+
+
+static int fr_rx(struct sk_buff *skb)
+{
+ struct net_device *ndev = skb->dev;
+ hdlc_device *hdlc = dev_to_hdlc(ndev);
+ fr_hdr *fh = (fr_hdr*)skb->data;
+ u8 *data = skb->data;
+ u16 dlci;
+ pvc_device *pvc;
+ struct net_device *dev = NULL;
+
+ if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI)
+ goto rx_error;
+
+ dlci = q922_to_dlci(skb->data);
+
+ if ((dlci == LMI_CCITT_ANSI_DLCI &&
+ (hdlc->state.fr.settings.lmi == LMI_ANSI ||
+ hdlc->state.fr.settings.lmi == LMI_CCITT)) ||
+ (dlci == LMI_CISCO_DLCI &&
+ hdlc->state.fr.settings.lmi == LMI_CISCO)) {
+ if (fr_lmi_recv(ndev, skb))
+ goto rx_error;
+ dev_kfree_skb_any(skb);
+ return NET_RX_SUCCESS;
+ }
+
+ pvc = find_pvc(hdlc, dlci);
+ if (!pvc) {
+#ifdef DEBUG_PKT
+ printk(KERN_INFO "%s: No PVC for received frame's DLCI %d\n",
+ ndev->name, dlci);
+#endif
+ dev_kfree_skb_any(skb);
+ return NET_RX_DROP;
+ }
+
+ if (pvc->state.fecn != fh->fecn) {
+#ifdef DEBUG_ECN
+ printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", ndev->name,
+ dlci, fh->fecn ? "N" : "FF");
+#endif
+ pvc->state.fecn ^= 1;
+ }
+
+ if (pvc->state.becn != fh->becn) {
+#ifdef DEBUG_ECN
+ printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", ndev->name,
+ dlci, fh->becn ? "N" : "FF");
+#endif
+ pvc->state.becn ^= 1;
+ }
+
+
+ if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
+ hdlc->stats.rx_dropped++;
+ return NET_RX_DROP;
+ }
+
+ if (data[3] == NLPID_IP) {
+ skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
+ dev = pvc->main;
+ skb->protocol = htons(ETH_P_IP);
+
+ } else if (data[3] == NLPID_IPV6) {
+ skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
+ dev = pvc->main;
+ skb->protocol = htons(ETH_P_IPV6);
+
+ } else if (skb->len > 10 && data[3] == FR_PAD &&
+ data[4] == NLPID_SNAP && data[5] == FR_PAD) {
+ u16 oui = ntohs(*(u16*)(data + 6));
+ u16 pid = ntohs(*(u16*)(data + 8));
+ skb_pull(skb, 10);
+
+ switch ((((u32)oui) << 16) | pid) {
+ case ETH_P_ARP: /* routed frame with SNAP */
+ case ETH_P_IPX:
+ case ETH_P_IP: /* a long variant */
+ case ETH_P_IPV6:
+ dev = pvc->main;
+ skb->protocol = htons(pid);
+ break;
+
+ case 0x80C20007: /* bridged Ethernet frame */
+ if ((dev = pvc->ether) != NULL)
+ skb->protocol = eth_type_trans(skb, dev);
+ break;
+
+ default:
+ printk(KERN_INFO "%s: Unsupported protocol, OUI=%x "
+ "PID=%x\n", ndev->name, oui, pid);
+ dev_kfree_skb_any(skb);
+ return NET_RX_DROP;
+ }
+ } else {
+ printk(KERN_INFO "%s: Unsupported protocol, NLPID=%x "
+ "length = %i\n", ndev->name, data[3], skb->len);
+ dev_kfree_skb_any(skb);
+ return NET_RX_DROP;
+ }
+
+ if (dev) {
+ struct net_device_stats *stats = pvc_get_stats(dev);
+ stats->rx_packets++; /* PVC traffic */
+ stats->rx_bytes += skb->len;
+ if (pvc->state.becn)
+ stats->rx_compressed++;
+ skb->dev = dev;
+ netif_rx(skb);
+ return NET_RX_SUCCESS;
+ } else {
+ dev_kfree_skb_any(skb);
+ return NET_RX_DROP;
+ }
+
+ rx_error:
+ hdlc->stats.rx_errors++; /* Mark error */
+ dev_kfree_skb_any(skb);
+ return NET_RX_DROP;
+}
+
+
+
+static void fr_start(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+#ifdef DEBUG_LINK
+ printk(KERN_DEBUG "fr_start\n");
+#endif
+ if (hdlc->state.fr.settings.lmi != LMI_NONE) {
+ hdlc->state.fr.reliable = 0;
+ hdlc->state.fr.dce_changed = 1;
+ hdlc->state.fr.request = 0;
+ hdlc->state.fr.fullrep_sent = 0;
+ hdlc->state.fr.last_errors = 0xFFFFFFFF;
+ hdlc->state.fr.n391cnt = 0;
+ hdlc->state.fr.txseq = hdlc->state.fr.rxseq = 0;
+
+ init_timer(&hdlc->state.fr.timer);
+ /* First poll after 1 s */
+ hdlc->state.fr.timer.expires = jiffies + HZ;
+ hdlc->state.fr.timer.function = fr_timer;
+ hdlc->state.fr.timer.data = (unsigned long)dev;
+ add_timer(&hdlc->state.fr.timer);
+ } else
+ fr_set_link_state(1, dev);
+}
+
+
+
+static void fr_stop(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+#ifdef DEBUG_LINK
+ printk(KERN_DEBUG "fr_stop\n");
+#endif
+ if (hdlc->state.fr.settings.lmi != LMI_NONE)
+ del_timer_sync(&hdlc->state.fr.timer);
+ fr_set_link_state(0, dev);
+}
+
+
+
+static void fr_close(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ pvc_device *pvc = hdlc->state.fr.first_pvc;
+
+ while (pvc) { /* Shutdown all PVCs for this FRAD */
+ if (pvc->main)
+ dev_close(pvc->main);
+ if (pvc->ether)
+ dev_close(pvc->ether);
+ pvc = pvc->next;
+ }
+}
+
+static void dlci_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_DLCI;
+ dev->flags = IFF_POINTOPOINT;
+ dev->hard_header_len = 10;
+ dev->addr_len = 2;
+}
+
+static int fr_add_pvc(struct net_device *master, unsigned int dlci, int type)
+{
+ hdlc_device *hdlc = dev_to_hdlc(master);
+ pvc_device *pvc = NULL;
+ struct net_device *dev;
+ int result, used;
+ char * prefix = "pvc%d";
+
+ if (type == ARPHRD_ETHER)
+ prefix = "pvceth%d";
+
+ if ((pvc = add_pvc(master, dlci)) == NULL) {
+ printk(KERN_WARNING "%s: Memory squeeze on fr_add_pvc()\n",
+ master->name);
+ return -ENOBUFS;
+ }
+
+ if (*get_dev_p(pvc, type))
+ return -EEXIST;
+
+ used = pvc_is_used(pvc);
+
+ if (type == ARPHRD_ETHER)
+ dev = alloc_netdev(sizeof(struct net_device_stats),
+ "pvceth%d", ether_setup);
+ else
+ dev = alloc_netdev(sizeof(struct net_device_stats),
+ "pvc%d", dlci_setup);
+
+ if (!dev) {
+ printk(KERN_WARNING "%s: Memory squeeze on fr_pvc()\n",
+ master->name);
+ delete_unused_pvcs(hdlc);
+ return -ENOBUFS;
+ }
+
+ if (type == ARPHRD_ETHER) {
+ memcpy(dev->dev_addr, "\x00\x01", 2);
+ get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2);
+ } else {
+ *(u16*)dev->dev_addr = htons(dlci);
+ dlci_to_q922(dev->broadcast, dlci);
+ }
+ dev->hard_start_xmit = pvc_xmit;
+ dev->get_stats = pvc_get_stats;
+ dev->open = pvc_open;
+ dev->stop = pvc_close;
+ dev->do_ioctl = pvc_ioctl;
+ dev->change_mtu = pvc_change_mtu;
+ dev->mtu = HDLC_MAX_MTU;
+ dev->tx_queue_len = 0;
+ dev->priv = pvc;
+
+ result = dev_alloc_name(dev, dev->name);
+ if (result < 0) {
+ free_netdev(dev);
+ delete_unused_pvcs(hdlc);
+ return result;
+ }
+
+ if (register_netdevice(dev) != 0) {
+ free_netdev(dev);
+ delete_unused_pvcs(hdlc);
+ return -EIO;
+ }
+
+ dev->destructor = free_netdev;
+ *get_dev_p(pvc, type) = dev;
+ if (!used) {
+ hdlc->state.fr.dce_changed = 1;
+ hdlc->state.fr.dce_pvc_count++;
+ }
+ return 0;
+}
+
+
+
+static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
+{
+ pvc_device *pvc;
+ struct net_device *dev;
+
+ if ((pvc = find_pvc(hdlc, dlci)) == NULL)
+ return -ENOENT;
+
+ if ((dev = *get_dev_p(pvc, type)) == NULL)
+ return -ENOENT;
+
+ if (dev->flags & IFF_UP)
+ return -EBUSY; /* PVC in use */
+
+ unregister_netdevice(dev); /* the destructor will free_netdev(dev) */
+ *get_dev_p(pvc, type) = NULL;
+
+ if (!pvc_is_used(pvc)) {
+ hdlc->state.fr.dce_pvc_count--;
+ hdlc->state.fr.dce_changed = 1;
+ }
+ delete_unused_pvcs(hdlc);
+ return 0;
+}
+
+
+
+static void fr_destroy(hdlc_device *hdlc)
+{
+ pvc_device *pvc;
+
+ pvc = hdlc->state.fr.first_pvc;
+ hdlc->state.fr.first_pvc = NULL; /* All PVCs destroyed */
+ hdlc->state.fr.dce_pvc_count = 0;
+ hdlc->state.fr.dce_changed = 1;
+
+ while (pvc) {
+ pvc_device *next = pvc->next;
+ /* destructors will free_netdev() main and ether */
+ if (pvc->main)
+ unregister_netdevice(pvc->main);
+
+ if (pvc->ether)
+ unregister_netdevice(pvc->ether);
+
+ kfree(pvc);
+ pvc = next;
+ }
+}
+
+
+
+int hdlc_fr_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+ fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
+ const size_t size = sizeof(fr_proto);
+ fr_proto new_settings;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ fr_proto_pvc pvc;
+ int result;
+
+ switch (ifr->ifr_settings.type) {
+ case IF_GET_PROTO:
+ ifr->ifr_settings.type = IF_PROTO_FR;
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+ if (copy_to_user(fr_s, &hdlc->state.fr.settings, size))
+ return -EFAULT;
+ return 0;
+
+ case IF_PROTO_FR:
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if(dev->flags & IFF_UP)
+ return -EBUSY;
+
+ if (copy_from_user(&new_settings, fr_s, size))
+ return -EFAULT;
+
+ if (new_settings.lmi == LMI_DEFAULT)
+ new_settings.lmi = LMI_ANSI;
+
+ if ((new_settings.lmi != LMI_NONE &&
+ new_settings.lmi != LMI_ANSI &&
+ new_settings.lmi != LMI_CCITT &&
+ new_settings.lmi != LMI_CISCO) ||
+ new_settings.t391 < 1 ||
+ new_settings.t392 < 2 ||
+ new_settings.n391 < 1 ||
+ new_settings.n392 < 1 ||
+ new_settings.n393 < new_settings.n392 ||
+ new_settings.n393 > 32 ||
+ (new_settings.dce != 0 &&
+ new_settings.dce != 1))
+ return -EINVAL;
+
+ result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
+ if (result)
+ return result;
+
+ if (hdlc->proto.id != IF_PROTO_FR) {
+ hdlc_proto_detach(hdlc);
+ hdlc->state.fr.first_pvc = NULL;
+ hdlc->state.fr.dce_pvc_count = 0;
+ }
+ memcpy(&hdlc->state.fr.settings, &new_settings, size);
+ memset(&hdlc->proto, 0, sizeof(hdlc->proto));
+
+ hdlc->proto.close = fr_close;
+ hdlc->proto.start = fr_start;
+ hdlc->proto.stop = fr_stop;
+ hdlc->proto.detach = fr_destroy;
+ hdlc->proto.netif_rx = fr_rx;
+ hdlc->proto.id = IF_PROTO_FR;
+ dev->hard_start_xmit = hdlc->xmit;
+ dev->hard_header = NULL;
+ dev->type = ARPHRD_FRAD;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->addr_len = 0;
+ return 0;
+
+ case IF_PROTO_FR_ADD_PVC:
+ case IF_PROTO_FR_DEL_PVC:
+ case IF_PROTO_FR_ADD_ETH_PVC:
+ case IF_PROTO_FR_DEL_ETH_PVC:
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc,
+ sizeof(fr_proto_pvc)))
+ return -EFAULT;
+
+ if (pvc.dlci <= 0 || pvc.dlci >= 1024)
+ return -EINVAL; /* Only 10 bits, DLCI 0 reserved */
+
+ if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC ||
+ ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC)
+ result = ARPHRD_ETHER; /* bridged Ethernet device */
+ else
+ result = ARPHRD_DLCI;
+
+ if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC ||
+ ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC)
+ return fr_add_pvc(dev, pvc.dlci, result);
+ else
+ return fr_del_pvc(hdlc, pvc.dlci, result);
+ }
+
+ return -EINVAL;
+}
diff --git a/kernel/datamods/hdlc_generic.c b/kernel/datamods/hdlc_generic.c
new file mode 100644
index 0000000..46cef8f
--- /dev/null
+++ b/kernel/datamods/hdlc_generic.c
@@ -0,0 +1,355 @@
+/*
+ * Generic HDLC support routines for Linux
+ *
+ * Copyright (C) 1999 - 2005 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * Currently supported:
+ * * raw IP-in-HDLC
+ * * Cisco HDLC
+ * * Frame Relay with ANSI or CCITT LMI (both user and network side)
+ * * PPP
+ * * X.25
+ *
+ * Use sethdlc utility to set line parameters, protocol and PVCs
+ *
+ * How does it work:
+ * - proto.open(), close(), start(), stop() calls are serialized.
+ * The order is: open, [ start, stop ... ] close ...
+ * - proto.start() and stop() are called with spin_lock_irq held.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/pkt_sched.h>
+#include <linux/inetdevice.h>
+#include <linux/lapb.h>
+#include <linux/rtnetlink.h>
+#include <linux/hdlc.h>
+
+
+static const char* version = "HDLC support module revision 1.18";
+
+#undef DEBUG_LINK
+
+
+static int hdlc_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+
+
+static struct net_device_stats *hdlc_get_stats(struct net_device *dev)
+{
+ return hdlc_stats(dev);
+}
+
+
+
+static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *p, struct net_device *orig_dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ if (hdlc->proto.netif_rx)
+ return hdlc->proto.netif_rx(skb);
+
+ hdlc->stats.rx_dropped++; /* Shouldn't happen */
+ dev_kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+
+
+static void __hdlc_set_carrier_on(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ if (hdlc->proto.start)
+ return hdlc->proto.start(dev);
+#if 0
+#ifdef DEBUG_LINK
+ if (netif_carrier_ok(dev))
+ printk(KERN_ERR "hdlc_set_carrier_on(): already on\n");
+#endif
+ netif_carrier_on(dev);
+#endif
+}
+
+
+
+static void __hdlc_set_carrier_off(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ if (hdlc->proto.stop)
+ return hdlc->proto.stop(dev);
+
+#if 0
+#ifdef DEBUG_LINK
+ if (!netif_carrier_ok(dev))
+ printk(KERN_ERR "hdlc_set_carrier_off(): already off\n");
+#endif
+ netif_carrier_off(dev);
+#endif
+}
+
+
+
+void hdlc_set_carrier(int on, struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ unsigned long flags;
+ on = on ? 1 : 0;
+
+#ifdef DEBUG_LINK
+ printk(KERN_DEBUG "hdlc_set_carrier %i\n", on);
+#endif
+
+ spin_lock_irqsave(&hdlc->state_lock, flags);
+
+ if (hdlc->carrier == on)
+ goto carrier_exit; /* no change in DCD line level */
+
+#ifdef DEBUG_LINK
+ printk(KERN_INFO "%s: carrier %s\n", dev->name, on ? "ON" : "off");
+#endif
+ hdlc->carrier = on;
+
+ if (!hdlc->open)
+ goto carrier_exit;
+
+ if (hdlc->carrier) {
+ printk(KERN_INFO "%s: Carrier detected\n", dev->name);
+ __hdlc_set_carrier_on(dev);
+ } else {
+ printk(KERN_INFO "%s: Carrier lost\n", dev->name);
+ __hdlc_set_carrier_off(dev);
+ }
+
+carrier_exit:
+ spin_unlock_irqrestore(&hdlc->state_lock, flags);
+}
+
+
+
+/* Must be called by hardware driver when HDLC device is being opened */
+int hdlc_open(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+#ifdef DEBUG_LINK
+ printk(KERN_DEBUG "hdlc_open() carrier %i open %i\n",
+ hdlc->carrier, hdlc->open);
+#endif
+
+ if (hdlc->proto.id == -1)
+ return -ENOSYS; /* no protocol attached */
+
+ if (hdlc->proto.open) {
+ int result = hdlc->proto.open(dev);
+ if (result)
+ return result;
+ }
+
+ spin_lock_irq(&hdlc->state_lock);
+
+ if (hdlc->carrier) {
+ printk(KERN_INFO "%s: Carrier detected\n", dev->name);
+ __hdlc_set_carrier_on(dev);
+ } else
+ printk(KERN_INFO "%s: No carrier\n", dev->name);
+
+ hdlc->open = 1;
+
+ spin_unlock_irq(&hdlc->state_lock);
+ return 0;
+}
+
+
+
+/* Must be called by hardware driver when HDLC device is being closed */
+void hdlc_close(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+#ifdef DEBUG_LINK
+ printk(KERN_DEBUG "hdlc_close() carrier %i open %i\n",
+ hdlc->carrier, hdlc->open);
+#endif
+
+ spin_lock_irq(&hdlc->state_lock);
+
+ hdlc->open = 0;
+ if (hdlc->carrier)
+ __hdlc_set_carrier_off(dev);
+
+ spin_unlock_irq(&hdlc->state_lock);
+
+ if (hdlc->proto.close)
+ hdlc->proto.close(dev);
+}
+
+
+
+#ifndef CONFIG_HDLC_RAW
+#define hdlc_raw_ioctl(dev, ifr) -ENOSYS
+#endif
+
+#ifndef CONFIG_HDLC_RAW_ETH
+#define hdlc_raw_eth_ioctl(dev, ifr) -ENOSYS
+#endif
+
+#ifndef CONFIG_HDLC_PPP
+#define hdlc_ppp_ioctl(dev, ifr) -ENOSYS
+#endif
+
+#ifndef CONFIG_HDLC_CISCO
+#define hdlc_cisco_ioctl(dev, ifr) -ENOSYS
+#endif
+
+#ifndef CONFIG_HDLC_FR
+#define hdlc_fr_ioctl(dev, ifr) -ENOSYS
+#endif
+
+#ifndef CONFIG_HDLC_X25
+#define hdlc_x25_ioctl(dev, ifr) -ENOSYS
+#endif
+
+
+int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ unsigned int proto;
+
+ if (cmd != SIOCWANDEV)
+ return -EINVAL;
+
+ switch(ifr->ifr_settings.type) {
+ case IF_PROTO_HDLC:
+ case IF_PROTO_HDLC_ETH:
+ case IF_PROTO_PPP:
+ case IF_PROTO_CISCO:
+ case IF_PROTO_FR:
+ case IF_PROTO_X25:
+ proto = ifr->ifr_settings.type;
+ break;
+
+ default:
+ proto = hdlc->proto.id;
+ }
+
+ switch(proto) {
+ case IF_PROTO_HDLC: return hdlc_raw_ioctl(dev, ifr);
+ case IF_PROTO_HDLC_ETH: return hdlc_raw_eth_ioctl(dev, ifr);
+ case IF_PROTO_PPP: return hdlc_ppp_ioctl(dev, ifr);
+ case IF_PROTO_CISCO: return hdlc_cisco_ioctl(dev, ifr);
+ case IF_PROTO_FR: return hdlc_fr_ioctl(dev, ifr);
+ case IF_PROTO_X25: return hdlc_x25_ioctl(dev, ifr);
+ default: return -EINVAL;
+ }
+}
+
+static void hdlc_setup(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+
+ dev->get_stats = hdlc_get_stats;
+ dev->change_mtu = hdlc_change_mtu;
+ dev->mtu = HDLC_MAX_MTU;
+
+ dev->type = ARPHRD_RAWHDLC;
+ dev->hard_header_len = 16;
+
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+
+ hdlc->proto.id = -1;
+ hdlc->proto.detach = NULL;
+ hdlc->carrier = 1;
+ hdlc->open = 0;
+ spin_lock_init(&hdlc->state_lock);
+}
+
+struct net_device *alloc_hdlcdev(void *priv)
+{
+ struct net_device *dev;
+ dev = alloc_netdev(sizeof(hdlc_device), "hdlc%d", hdlc_setup);
+ if (dev)
+ dev_to_hdlc(dev)->priv = priv;
+ return dev;
+}
+
+int register_hdlc_device(struct net_device *dev)
+{
+ int result = dev_alloc_name(dev, "hdlc%d");
+ if (result < 0)
+ return result;
+
+ result = register_netdev(dev);
+ if (result != 0)
+ return -EIO;
+
+#if 0
+ if (netif_carrier_ok(dev))
+ netif_carrier_off(dev); /* no carrier until DCD goes up */
+#endif
+
+ return 0;
+}
+
+
+
+void unregister_hdlc_device(struct net_device *dev)
+{
+ rtnl_lock();
+ hdlc_proto_detach(dev_to_hdlc(dev));
+ unregister_netdevice(dev);
+ rtnl_unlock();
+}
+
+
+
+MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
+MODULE_DESCRIPTION("HDLC support module");
+MODULE_LICENSE("GPL v2");
+
+EXPORT_SYMBOL(hdlc_open);
+EXPORT_SYMBOL(hdlc_close);
+EXPORT_SYMBOL(hdlc_set_carrier);
+EXPORT_SYMBOL(hdlc_ioctl);
+EXPORT_SYMBOL(alloc_hdlcdev);
+EXPORT_SYMBOL(register_hdlc_device);
+EXPORT_SYMBOL(unregister_hdlc_device);
+
+static struct packet_type hdlc_packet_type = {
+ .type = __constant_htons(ETH_P_HDLC),
+ .func = hdlc_rcv,
+};
+
+
+static int __init hdlc_module_init(void)
+{
+ printk(KERN_INFO "%s\n", version);
+ dev_add_pack(&hdlc_packet_type);
+ return 0;
+}
+
+
+
+static void __exit hdlc_module_exit(void)
+{
+ dev_remove_pack(&hdlc_packet_type);
+}
+
+
+module_init(hdlc_module_init);
+module_exit(hdlc_module_exit);
diff --git a/kernel/datamods/hdlc_ppp.c b/kernel/datamods/hdlc_ppp.c
new file mode 100644
index 0000000..b81263e
--- /dev/null
+++ b/kernel/datamods/hdlc_ppp.c
@@ -0,0 +1,114 @@
+/*
+ * Generic HDLC support routines for Linux
+ * Point-to-point protocol support
+ *
+ * Copyright (C) 1999 - 2003 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/pkt_sched.h>
+#include <linux/inetdevice.h>
+#include <linux/lapb.h>
+#include <linux/rtnetlink.h>
+#include <linux/hdlc.h>
+
+
+static int ppp_open(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ void *old_ioctl;
+ int result;
+
+ dev->priv = &hdlc->state.ppp.syncppp_ptr;
+ hdlc->state.ppp.syncppp_ptr = &hdlc->state.ppp.pppdev;
+ hdlc->state.ppp.pppdev.dev = dev;
+
+ old_ioctl = dev->do_ioctl;
+ hdlc->state.ppp.old_change_mtu = dev->change_mtu;
+ sppp_attach(&hdlc->state.ppp.pppdev);
+ /* sppp_attach nukes them. We don't need syncppp's ioctl */
+ dev->do_ioctl = old_ioctl;
+ hdlc->state.ppp.pppdev.sppp.pp_flags &= ~PP_CISCO;
+ dev->type = ARPHRD_PPP;
+ result = sppp_open(dev);
+ if (result) {
+ sppp_detach(dev);
+ return result;
+ }
+
+ return 0;
+}
+
+
+
+static void ppp_close(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+
+ sppp_close(dev);
+ sppp_detach(dev);
+ dev->rebuild_header = NULL;
+ dev->change_mtu = hdlc->state.ppp.old_change_mtu;
+ dev->mtu = HDLC_MAX_MTU;
+ dev->hard_header_len = 16;
+}
+
+
+
+static __be16 ppp_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+ return __constant_htons(ETH_P_WAN_PPP);
+}
+
+
+
+int hdlc_ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ int result;
+
+ switch (ifr->ifr_settings.type) {
+ case IF_GET_PROTO:
+ ifr->ifr_settings.type = IF_PROTO_PPP;
+ return 0; /* return protocol only, no settable parameters */
+
+ case IF_PROTO_PPP:
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if(dev->flags & IFF_UP)
+ return -EBUSY;
+
+ /* no settable parameters */
+
+ result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
+ if (result)
+ return result;
+
+ hdlc_proto_detach(hdlc);
+ memset(&hdlc->proto, 0, sizeof(hdlc->proto));
+
+ hdlc->proto.open = ppp_open;
+ hdlc->proto.close = ppp_close;
+ hdlc->proto.type_trans = ppp_type_trans;
+ hdlc->proto.id = IF_PROTO_PPP;
+ dev->hard_start_xmit = hdlc->xmit;
+ dev->hard_header = NULL;
+ dev->type = ARPHRD_PPP;
+ dev->addr_len = 0;
+ return 0;
+ }
+
+ return -EINVAL;
+}
diff --git a/kernel/datamods/hdlc_raw.c b/kernel/datamods/hdlc_raw.c
new file mode 100644
index 0000000..9456d31
--- /dev/null
+++ b/kernel/datamods/hdlc_raw.c
@@ -0,0 +1,89 @@
+/*
+ * Generic HDLC support routines for Linux
+ * HDLC support
+ *
+ * Copyright (C) 1999 - 2003 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/pkt_sched.h>
+#include <linux/inetdevice.h>
+#include <linux/lapb.h>
+#include <linux/rtnetlink.h>
+#include <linux/hdlc.h>
+
+
+static __be16 raw_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+ return __constant_htons(ETH_P_IP);
+}
+
+
+
+int hdlc_raw_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+ raw_hdlc_proto __user *raw_s = ifr->ifr_settings.ifs_ifsu.raw_hdlc;
+ const size_t size = sizeof(raw_hdlc_proto);
+ raw_hdlc_proto new_settings;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ int result;
+
+ switch (ifr->ifr_settings.type) {
+ case IF_GET_PROTO:
+ ifr->ifr_settings.type = IF_PROTO_HDLC;
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+ if (copy_to_user(raw_s, &hdlc->state.raw_hdlc.settings, size))
+ return -EFAULT;
+ return 0;
+
+ case IF_PROTO_HDLC:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ if (copy_from_user(&new_settings, raw_s, size))
+ return -EFAULT;
+
+ if (new_settings.encoding == ENCODING_DEFAULT)
+ new_settings.encoding = ENCODING_NRZ;
+
+ if (new_settings.parity == PARITY_DEFAULT)
+ new_settings.parity = PARITY_CRC16_PR1_CCITT;
+
+ result = hdlc->attach(dev, new_settings.encoding,
+ new_settings.parity);
+ if (result)
+ return result;
+
+ hdlc_proto_detach(hdlc);
+ memcpy(&hdlc->state.raw_hdlc.settings, &new_settings, size);
+ memset(&hdlc->proto, 0, sizeof(hdlc->proto));
+
+ hdlc->proto.type_trans = raw_type_trans;
+ hdlc->proto.id = IF_PROTO_HDLC;
+ dev->hard_start_xmit = hdlc->xmit;
+ dev->hard_header = NULL;
+ dev->type = ARPHRD_RAWHDLC;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->addr_len = 0;
+ return 0;
+ }
+
+ return -EINVAL;
+}
diff --git a/kernel/datamods/hdlc_raw_eth.c b/kernel/datamods/hdlc_raw_eth.c
new file mode 100644
index 0000000..b1285cc
--- /dev/null
+++ b/kernel/datamods/hdlc_raw_eth.c
@@ -0,0 +1,107 @@
+/*
+ * Generic HDLC support routines for Linux
+ * HDLC Ethernet emulation support
+ *
+ * Copyright (C) 2002-2003 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/pkt_sched.h>
+#include <linux/random.h>
+#include <linux/inetdevice.h>
+#include <linux/lapb.h>
+#include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
+#include <linux/hdlc.h>
+
+
+static int eth_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ int pad = ETH_ZLEN - skb->len;
+ if (pad > 0) { /* Pad the frame with zeros */
+ int len = skb->len;
+ if (skb_tailroom(skb) < pad)
+ if (pskb_expand_head(skb, 0, pad, GFP_ATOMIC)) {
+ hdlc_stats(dev)->tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+ }
+ skb_put(skb, pad);
+ memset(skb->data + len, 0, pad);
+ }
+ return dev_to_hdlc(dev)->xmit(skb, dev);
+}
+
+
+int hdlc_raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+ raw_hdlc_proto __user *raw_s = ifr->ifr_settings.ifs_ifsu.raw_hdlc;
+ const size_t size = sizeof(raw_hdlc_proto);
+ raw_hdlc_proto new_settings;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ int result;
+ void *old_ch_mtu;
+ int old_qlen;
+
+ switch (ifr->ifr_settings.type) {
+ case IF_GET_PROTO:
+ ifr->ifr_settings.type = IF_PROTO_HDLC_ETH;
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+ if (copy_to_user(raw_s, &hdlc->state.raw_hdlc.settings, size))
+ return -EFAULT;
+ return 0;
+
+ case IF_PROTO_HDLC_ETH:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ if (copy_from_user(&new_settings, raw_s, size))
+ return -EFAULT;
+
+ if (new_settings.encoding == ENCODING_DEFAULT)
+ new_settings.encoding = ENCODING_NRZ;
+
+ if (new_settings.parity == PARITY_DEFAULT)
+ new_settings.parity = PARITY_CRC16_PR1_CCITT;
+
+ result = hdlc->attach(dev, new_settings.encoding,
+ new_settings.parity);
+ if (result)
+ return result;
+
+ hdlc_proto_detach(hdlc);
+ memcpy(&hdlc->state.raw_hdlc.settings, &new_settings, size);
+ memset(&hdlc->proto, 0, sizeof(hdlc->proto));
+
+ hdlc->proto.type_trans = eth_type_trans;
+ hdlc->proto.id = IF_PROTO_HDLC_ETH;
+ dev->hard_start_xmit = eth_tx;
+ old_ch_mtu = dev->change_mtu;
+ old_qlen = dev->tx_queue_len;
+ ether_setup(dev);
+ dev->change_mtu = old_ch_mtu;
+ dev->tx_queue_len = old_qlen;
+ memcpy(dev->dev_addr, "\x00\x01", 2);
+ get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2);
+ return 0;
+ }
+
+ return -EINVAL;
+}
diff --git a/kernel/datamods/syncppp.c b/kernel/datamods/syncppp.c
new file mode 100644
index 0000000..5ca283a
--- /dev/null
+++ b/kernel/datamods/syncppp.c
@@ -0,0 +1,1485 @@
+/*
+ * NET3: A (fairly minimal) implementation of synchronous PPP for Linux
+ * as well as a CISCO HDLC implementation. See the copyright
+ * message below for the original source.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the license, or (at your option) any later version.
+ *
+ * Note however. This code is also used in a different form by FreeBSD.
+ * Therefore when making any non OS specific change please consider
+ * contributing it back to the original author under the terms
+ * below in addition.
+ * -- Alan
+ *
+ * Port for Linux-2.1 by Jan "Yenya" Kasprzak <kas@fi.muni.cz>
+ */
+
+/*
+ * Synchronous PPP/Cisco link level subroutines.
+ * Keepalive protocol implemented in both Cisco and PPP modes.
+ *
+ * Copyright (C) 1994 Cronyx Ltd.
+ * Author: Serge Vakulenko, <vak@zebub.msk.su>
+ *
+ * This software is distributed with NO WARRANTIES, not even the implied
+ * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Authors grant any other persons or organisations permission to use
+ * or modify this software as long as this message is kept with the software,
+ * all derivative works or modified versions.
+ *
+ * Version 1.9, Wed Oct 4 18:58:15 MSK 1995
+ *
+ * $Id$
+ */
+#undef DEBUG
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/if_arp.h>
+#include <linux/skbuff.h>
+#include <linux/route.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/random.h>
+#include <linux/pkt_sched.h>
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+
+#include <net/syncppp.h>
+
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+#define MAXALIVECNT 6 /* max. alive packets */
+
+#define PPP_ALLSTATIONS 0xff /* All-Stations broadcast address */
+#define PPP_UI 0x03 /* Unnumbered Information */
+#define PPP_IP 0x0021 /* Internet Protocol */
+#define PPP_ISO 0x0023 /* ISO OSI Protocol */
+#define PPP_XNS 0x0025 /* Xerox NS Protocol */
+#define PPP_IPX 0x002b /* Novell IPX Protocol */
+#define PPP_LCP 0xc021 /* Link Control Protocol */
+#define PPP_IPCP 0x8021 /* Internet Protocol Control Protocol */
+
+#define LCP_CONF_REQ 1 /* PPP LCP configure request */
+#define LCP_CONF_ACK 2 /* PPP LCP configure acknowledge */
+#define LCP_CONF_NAK 3 /* PPP LCP configure negative ack */
+#define LCP_CONF_REJ 4 /* PPP LCP configure reject */
+#define LCP_TERM_REQ 5 /* PPP LCP terminate request */
+#define LCP_TERM_ACK 6 /* PPP LCP terminate acknowledge */
+#define LCP_CODE_REJ 7 /* PPP LCP code reject */
+#define LCP_PROTO_REJ 8 /* PPP LCP protocol reject */
+#define LCP_ECHO_REQ 9 /* PPP LCP echo request */
+#define LCP_ECHO_REPLY 10 /* PPP LCP echo reply */
+#define LCP_DISC_REQ 11 /* PPP LCP discard request */
+
+#define LCP_OPT_MRU 1 /* maximum receive unit */
+#define LCP_OPT_ASYNC_MAP 2 /* async control character map */
+#define LCP_OPT_AUTH_PROTO 3 /* authentication protocol */
+#define LCP_OPT_QUAL_PROTO 4 /* quality protocol */
+#define LCP_OPT_MAGIC 5 /* magic number */
+#define LCP_OPT_RESERVED 6 /* reserved */
+#define LCP_OPT_PROTO_COMP 7 /* protocol field compression */
+#define LCP_OPT_ADDR_COMP 8 /* address/control field compression */
+
+#define IPCP_CONF_REQ LCP_CONF_REQ /* PPP IPCP configure request */
+#define IPCP_CONF_ACK LCP_CONF_ACK /* PPP IPCP configure acknowledge */
+#define IPCP_CONF_NAK LCP_CONF_NAK /* PPP IPCP configure negative ack */
+#define IPCP_CONF_REJ LCP_CONF_REJ /* PPP IPCP configure reject */
+#define IPCP_TERM_REQ LCP_TERM_REQ /* PPP IPCP terminate request */
+#define IPCP_TERM_ACK LCP_TERM_ACK /* PPP IPCP terminate acknowledge */
+#define IPCP_CODE_REJ LCP_CODE_REJ /* PPP IPCP code reject */
+
+#define CISCO_MULTICAST 0x8f /* Cisco multicast address */
+#define CISCO_UNICAST 0x0f /* Cisco unicast address */
+#define CISCO_KEEPALIVE 0x8035 /* Cisco keepalive protocol */
+#define CISCO_ADDR_REQ 0 /* Cisco address request */
+#define CISCO_ADDR_REPLY 1 /* Cisco address reply */
+#define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */
+
+struct ppp_header {
+ u8 address;
+ u8 control;
+ u16 protocol;
+};
+#define PPP_HEADER_LEN sizeof (struct ppp_header)
+
+struct lcp_header {
+ u8 type;
+ u8 ident;
+ u16 len;
+};
+#define LCP_HEADER_LEN sizeof (struct lcp_header)
+
+struct cisco_packet {
+ u32 type;
+ u32 par1;
+ u32 par2;
+ u16 rel;
+ u16 time0;
+ u16 time1;
+};
+#define CISCO_PACKET_LEN 18
+#define CISCO_BIG_PACKET_LEN 20
+
+static struct sppp *spppq;
+static struct timer_list sppp_keepalive_timer;
+static DEFINE_SPINLOCK(spppq_lock);
+
+/* global xmit queue for sending packets while spinlock is held */
+static struct sk_buff_head tx_queue;
+
+static void sppp_keepalive (unsigned long dummy);
+static void sppp_cp_send (struct sppp *sp, u16 proto, u8 type,
+ u8 ident, u16 len, void *data);
+static void sppp_cisco_send (struct sppp *sp, int type, long par1, long par2);
+static void sppp_lcp_input (struct sppp *sp, struct sk_buff *m);
+static void sppp_cisco_input (struct sppp *sp, struct sk_buff *m);
+static void sppp_ipcp_input (struct sppp *sp, struct sk_buff *m);
+static void sppp_lcp_open (struct sppp *sp);
+static void sppp_ipcp_open (struct sppp *sp);
+static int sppp_lcp_conf_parse_options (struct sppp *sp, struct lcp_header *h,
+ int len, u32 *magic);
+static void sppp_cp_timeout (unsigned long arg);
+static char *sppp_lcp_type_name (u8 type);
+static char *sppp_ipcp_type_name (u8 type);
+static void sppp_print_bytes (u8 *p, u16 len);
+
+static int debug;
+
+/* Flush global outgoing packet queue to dev_queue_xmit().
+ *
+ * dev_queue_xmit() must be called with interrupts enabled
+ * which means it can't be called with spinlocks held.
+ * If a packet needs to be sent while a spinlock is held,
+ * then put the packet into tx_queue, and call sppp_flush_xmit()
+ * after spinlock is released.
+ */
+static void sppp_flush_xmit(void)
+{
+ struct sk_buff *skb;
+ while ((skb = skb_dequeue(&tx_queue)) != NULL)
+ dev_queue_xmit(skb);
+}
+
+/*
+ * Interface down stub
+ */
+
+static void if_down(struct net_device *dev)
+{
+ struct sppp *sp = (struct sppp *)sppp_of(dev);
+
+ sp->pp_link_state=SPPP_LINK_DOWN;
+}
+
+/*
+ * Timeout routine activations.
+ */
+
+static void sppp_set_timeout(struct sppp *p,int s)
+{
+ if (! (p->pp_flags & PP_TIMO))
+ {
+ init_timer(&p->pp_timer);
+ p->pp_timer.function=sppp_cp_timeout;
+ p->pp_timer.expires=jiffies+s*HZ;
+ p->pp_timer.data=(unsigned long)p;
+ p->pp_flags |= PP_TIMO;
+ add_timer(&p->pp_timer);
+ }
+}
+
+static void sppp_clear_timeout(struct sppp *p)
+{
+ if (p->pp_flags & PP_TIMO)
+ {
+ del_timer(&p->pp_timer);
+ p->pp_flags &= ~PP_TIMO;
+ }
+}
+
+/**
+ * sppp_input - receive and process a WAN PPP frame
+ * @skb: The buffer to process
+ * @dev: The device it arrived on
+ *
+ * This can be called directly by cards that do not have
+ * timing constraints but is normally called from the network layer
+ * after interrupt servicing to process frames queued via netif_rx().
+ *
+ * We process the options in the card. If the frame is destined for
+ * the protocol stacks then it requeues the frame for the upper level
+ * protocol. If it is a control from it is processed and discarded
+ * here.
+ */
+
+static void sppp_input (struct net_device *dev, struct sk_buff *skb)
+{
+ struct ppp_header *h;
+ struct sppp *sp = (struct sppp *)sppp_of(dev);
+ unsigned long flags;
+
+ skb->dev=dev;
+ skb->mac.raw=skb->data;
+
+ if (dev->flags & IFF_RUNNING)
+ {
+ /* Count received bytes, add FCS and one flag */
+ sp->ibytes+= skb->len + 3;
+ sp->ipkts++;
+ }
+
+ if (!pskb_may_pull(skb, PPP_HEADER_LEN)) {
+ /* Too small packet, drop it. */
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_DEBUG "%s: input packet is too small, %d bytes\n",
+ dev->name, skb->len);
+ kfree_skb(skb);
+ return;
+ }
+
+ /* Get PPP header. */
+ h = (struct ppp_header *)skb->data;
+ skb_pull(skb,sizeof(struct ppp_header));
+
+ spin_lock_irqsave(&sp->lock, flags);
+
+ switch (h->address) {
+ default: /* Invalid PPP packet. */
+ goto invalid;
+ case PPP_ALLSTATIONS:
+ if (h->control != PPP_UI)
+ goto invalid;
+ if (sp->pp_flags & PP_CISCO) {
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: PPP packet in Cisco mode <0x%x 0x%x 0x%x>\n",
+ dev->name,
+ h->address, h->control, ntohs (h->protocol));
+ goto drop;
+ }
+ switch (ntohs (h->protocol)) {
+ default:
+ if (sp->lcp.state == LCP_STATE_OPENED)
+ sppp_cp_send (sp, PPP_LCP, LCP_PROTO_REJ,
+ ++sp->pp_seq, skb->len + 2,
+ &h->protocol);
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: invalid input protocol <0x%x 0x%x 0x%x>\n",
+ dev->name,
+ h->address, h->control, ntohs (h->protocol));
+ goto drop;
+ case PPP_LCP:
+ sppp_lcp_input (sp, skb);
+ goto drop;
+ case PPP_IPCP:
+ if (sp->lcp.state == LCP_STATE_OPENED)
+ sppp_ipcp_input (sp, skb);
+ else
+ printk(KERN_DEBUG "IPCP when still waiting LCP finish.\n");
+ goto drop;
+ case PPP_IP:
+ if (sp->ipcp.state == IPCP_STATE_OPENED) {
+ if(sp->pp_flags&PP_DEBUG)
+ printk(KERN_DEBUG "Yow an IP frame.\n");
+ skb->protocol=htons(ETH_P_IP);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ goto done;
+ }
+ break;
+#ifdef IPX
+ case PPP_IPX:
+ /* IPX IPXCP not implemented yet */
+ if (sp->lcp.state == LCP_STATE_OPENED) {
+ skb->protocol=htons(ETH_P_IPX);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ goto done;
+ }
+ break;
+#endif
+ }
+ break;
+ case CISCO_MULTICAST:
+ case CISCO_UNICAST:
+ /* Don't check the control field here (RFC 1547). */
+ if (! (sp->pp_flags & PP_CISCO)) {
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: Cisco packet in PPP mode <0x%x 0x%x 0x%x>\n",
+ dev->name,
+ h->address, h->control, ntohs (h->protocol));
+ goto drop;
+ }
+ switch (ntohs (h->protocol)) {
+ default:
+ goto invalid;
+ case CISCO_KEEPALIVE:
+ sppp_cisco_input (sp, skb);
+ goto drop;
+#ifdef CONFIG_INET
+ case ETH_P_IP:
+ skb->protocol=htons(ETH_P_IP);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ goto done;
+#endif
+#ifdef CONFIG_IPX
+ case ETH_P_IPX:
+ skb->protocol=htons(ETH_P_IPX);
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ goto done;
+#endif
+ }
+ break;
+ }
+ goto drop;
+
+invalid:
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: invalid input packet <0x%x 0x%x 0x%x>\n",
+ dev->name, h->address, h->control, ntohs (h->protocol));
+drop:
+ kfree_skb(skb);
+done:
+ spin_unlock_irqrestore(&sp->lock, flags);
+ sppp_flush_xmit();
+ return;
+}
+
+/*
+ * Handle transmit packets.
+ */
+
+static int sppp_hard_header(struct sk_buff *skb, struct net_device *dev, __u16 type,
+ void *daddr, void *saddr, unsigned int len)
+{
+ struct sppp *sp = (struct sppp *)sppp_of(dev);
+ struct ppp_header *h;
+ skb_push(skb,sizeof(struct ppp_header));
+ h=(struct ppp_header *)skb->data;
+ if(sp->pp_flags&PP_CISCO)
+ {
+ h->address = CISCO_UNICAST;
+ h->control = 0;
+ }
+ else
+ {
+ h->address = PPP_ALLSTATIONS;
+ h->control = PPP_UI;
+ }
+ if(sp->pp_flags & PP_CISCO)
+ {
+ h->protocol = htons(type);
+ }
+ else switch(type)
+ {
+ case ETH_P_IP:
+ h->protocol = htons(PPP_IP);
+ break;
+ case ETH_P_IPX:
+ h->protocol = htons(PPP_IPX);
+ break;
+ }
+ return sizeof(struct ppp_header);
+}
+
+static int sppp_rebuild_header(struct sk_buff *skb)
+{
+ return 0;
+}
+
+/*
+ * Send keepalive packets, every 10 seconds.
+ */
+
+static void sppp_keepalive (unsigned long dummy)
+{
+ struct sppp *sp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&spppq_lock, flags);
+
+ for (sp=spppq; sp; sp=sp->pp_next)
+ {
+ struct net_device *dev = sp->pp_if;
+
+ /* Keepalive mode disabled or channel down? */
+ if (! (sp->pp_flags & PP_KEEPALIVE) ||
+ ! (dev->flags & IFF_UP))
+ continue;
+
+ spin_lock(&sp->lock);
+
+ /* No keepalive in PPP mode if LCP not opened yet. */
+ if (! (sp->pp_flags & PP_CISCO) &&
+ sp->lcp.state != LCP_STATE_OPENED) {
+ spin_unlock(&sp->lock);
+ continue;
+ }
+
+ if (sp->pp_alivecnt == MAXALIVECNT) {
+ /* No keepalive packets got. Stop the interface. */
+ printk (KERN_WARNING "%s: protocol down\n", dev->name);
+ if_down (dev);
+ if (! (sp->pp_flags & PP_CISCO)) {
+ /* Shut down the PPP link. */
+ sp->lcp.magic = jiffies;
+ sp->lcp.state = LCP_STATE_CLOSED;
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ sppp_clear_timeout (sp);
+ /* Initiate negotiation. */
+ sppp_lcp_open (sp);
+ }
+ }
+ if (sp->pp_alivecnt <= MAXALIVECNT)
+ ++sp->pp_alivecnt;
+ if (sp->pp_flags & PP_CISCO)
+ sppp_cisco_send (sp, CISCO_KEEPALIVE_REQ, ++sp->pp_seq,
+ sp->pp_rseq);
+ else if (sp->lcp.state == LCP_STATE_OPENED) {
+ long nmagic = htonl (sp->lcp.magic);
+ sp->lcp.echoid = ++sp->pp_seq;
+ sppp_cp_send (sp, PPP_LCP, LCP_ECHO_REQ,
+ sp->lcp.echoid, 4, &nmagic);
+ }
+
+ spin_unlock(&sp->lock);
+ }
+ spin_unlock_irqrestore(&spppq_lock, flags);
+ sppp_flush_xmit();
+ sppp_keepalive_timer.expires=jiffies+10*HZ;
+ add_timer(&sppp_keepalive_timer);
+}
+
+/*
+ * Handle incoming PPP Link Control Protocol packets.
+ */
+
+static void sppp_lcp_input (struct sppp *sp, struct sk_buff *skb)
+{
+ struct lcp_header *h;
+ struct net_device *dev = sp->pp_if;
+ int len = skb->len;
+ u8 *p, opt[6];
+ u32 rmagic;
+
+ if (!pskb_may_pull(skb, sizeof(struct lcp_header))) {
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: invalid lcp packet length: %d bytes\n",
+ dev->name, len);
+ return;
+ }
+ h = (struct lcp_header *)skb->data;
+ skb_pull(skb,sizeof(struct lcp_header *));
+
+ if (sp->pp_flags & PP_DEBUG)
+ {
+ char state = '?';
+ switch (sp->lcp.state) {
+ case LCP_STATE_CLOSED: state = 'C'; break;
+ case LCP_STATE_ACK_RCVD: state = 'R'; break;
+ case LCP_STATE_ACK_SENT: state = 'S'; break;
+ case LCP_STATE_OPENED: state = 'O'; break;
+ }
+ printk (KERN_WARNING "%s: lcp input(%c): %d bytes <%s id=%xh len=%xh",
+ dev->name, state, len,
+ sppp_lcp_type_name (h->type), h->ident, ntohs (h->len));
+ if (len > 4)
+ sppp_print_bytes ((u8*) (h+1), len-4);
+ printk (">\n");
+ }
+ if (len > ntohs (h->len))
+ len = ntohs (h->len);
+ switch (h->type) {
+ default:
+ /* Unknown packet type -- send Code-Reject packet. */
+ sppp_cp_send (sp, PPP_LCP, LCP_CODE_REJ, ++sp->pp_seq,
+ skb->len, h);
+ break;
+ case LCP_CONF_REQ:
+ if (len < 4) {
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_DEBUG"%s: invalid lcp configure request packet length: %d bytes\n",
+ dev->name, len);
+ break;
+ }
+ if (len>4 && !sppp_lcp_conf_parse_options (sp, h, len, &rmagic))
+ goto badreq;
+ if (rmagic == sp->lcp.magic) {
+ /* Local and remote magics equal -- loopback? */
+ if (sp->pp_loopcnt >= MAXALIVECNT*5) {
+ printk (KERN_WARNING "%s: loopback\n",
+ dev->name);
+ sp->pp_loopcnt = 0;
+ if (dev->flags & IFF_UP) {
+ if_down (dev);
+ }
+ } else if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_DEBUG "%s: conf req: magic glitch\n",
+ dev->name);
+ ++sp->pp_loopcnt;
+
+ /* MUST send Conf-Nack packet. */
+ rmagic = ~sp->lcp.magic;
+ opt[0] = LCP_OPT_MAGIC;
+ opt[1] = sizeof (opt);
+ opt[2] = rmagic >> 24;
+ opt[3] = rmagic >> 16;
+ opt[4] = rmagic >> 8;
+ opt[5] = rmagic;
+ sppp_cp_send (sp, PPP_LCP, LCP_CONF_NAK,
+ h->ident, sizeof (opt), &opt);
+badreq:
+ switch (sp->lcp.state) {
+ case LCP_STATE_OPENED:
+ /* Initiate renegotiation. */
+ sppp_lcp_open (sp);
+ /* fall through... */
+ case LCP_STATE_ACK_SENT:
+ /* Go to closed state. */
+ sp->lcp.state = LCP_STATE_CLOSED;
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ }
+ break;
+ }
+ /* Send Configure-Ack packet. */
+ sp->pp_loopcnt = 0;
+ if (sp->lcp.state != LCP_STATE_OPENED) {
+ sppp_cp_send (sp, PPP_LCP, LCP_CONF_ACK,
+ h->ident, len-4, h+1);
+ }
+ /* Change the state. */
+ switch (sp->lcp.state) {
+ case LCP_STATE_CLOSED:
+ sp->lcp.state = LCP_STATE_ACK_SENT;
+ break;
+ case LCP_STATE_ACK_RCVD:
+ sp->lcp.state = LCP_STATE_OPENED;
+ sppp_ipcp_open (sp);
+ break;
+ case LCP_STATE_OPENED:
+ /* Remote magic changed -- close session. */
+ sp->lcp.state = LCP_STATE_CLOSED;
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ /* Initiate renegotiation. */
+ sppp_lcp_open (sp);
+ /* Send ACK after our REQ in attempt to break loop */
+ sppp_cp_send (sp, PPP_LCP, LCP_CONF_ACK,
+ h->ident, len-4, h+1);
+ sp->lcp.state = LCP_STATE_ACK_SENT;
+ break;
+ }
+ break;
+ case LCP_CONF_ACK:
+ if (h->ident != sp->lcp.confid)
+ break;
+ sppp_clear_timeout (sp);
+ if ((sp->pp_link_state != SPPP_LINK_UP) &&
+ (dev->flags & IFF_UP)) {
+ /* Coming out of loopback mode. */
+ sp->pp_link_state=SPPP_LINK_UP;
+ printk (KERN_INFO "%s: protocol up\n", dev->name);
+ }
+ switch (sp->lcp.state) {
+ case LCP_STATE_CLOSED:
+ sp->lcp.state = LCP_STATE_ACK_RCVD;
+ sppp_set_timeout (sp, 5);
+ break;
+ case LCP_STATE_ACK_SENT:
+ sp->lcp.state = LCP_STATE_OPENED;
+ sppp_ipcp_open (sp);
+ break;
+ }
+ break;
+ case LCP_CONF_NAK:
+ if (h->ident != sp->lcp.confid)
+ break;
+ p = (u8*) (h+1);
+ if (len>=10 && p[0] == LCP_OPT_MAGIC && p[1] >= 4) {
+ rmagic = (u32)p[2] << 24 |
+ (u32)p[3] << 16 | p[4] << 8 | p[5];
+ if (rmagic == ~sp->lcp.magic) {
+ int newmagic;
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_DEBUG "%s: conf nak: magic glitch\n",
+ dev->name);
+ get_random_bytes(&newmagic, sizeof(newmagic));
+ sp->lcp.magic += newmagic;
+ } else
+ sp->lcp.magic = rmagic;
+ }
+ if (sp->lcp.state != LCP_STATE_ACK_SENT) {
+ /* Go to closed state. */
+ sp->lcp.state = LCP_STATE_CLOSED;
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ }
+ /* The link will be renegotiated after timeout,
+ * to avoid endless req-nack loop. */
+ sppp_clear_timeout (sp);
+ sppp_set_timeout (sp, 2);
+ break;
+ case LCP_CONF_REJ:
+ if (h->ident != sp->lcp.confid)
+ break;
+ sppp_clear_timeout (sp);
+ /* Initiate renegotiation. */
+ sppp_lcp_open (sp);
+ if (sp->lcp.state != LCP_STATE_ACK_SENT) {
+ /* Go to closed state. */
+ sp->lcp.state = LCP_STATE_CLOSED;
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ }
+ break;
+ case LCP_TERM_REQ:
+ sppp_clear_timeout (sp);
+ /* Send Terminate-Ack packet. */
+ sppp_cp_send (sp, PPP_LCP, LCP_TERM_ACK, h->ident, 0, NULL);
+ /* Go to closed state. */
+ sp->lcp.state = LCP_STATE_CLOSED;
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ /* Initiate renegotiation. */
+ sppp_lcp_open (sp);
+ break;
+ case LCP_TERM_ACK:
+ case LCP_CODE_REJ:
+ case LCP_PROTO_REJ:
+ /* Ignore for now. */
+ break;
+ case LCP_DISC_REQ:
+ /* Discard the packet. */
+ break;
+ case LCP_ECHO_REQ:
+ if (sp->lcp.state != LCP_STATE_OPENED)
+ break;
+ if (len < 8) {
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: invalid lcp echo request packet length: %d bytes\n",
+ dev->name, len);
+ break;
+ }
+ if (ntohl (*(long*)(h+1)) == sp->lcp.magic) {
+ /* Line loopback mode detected. */
+ printk (KERN_WARNING "%s: loopback\n", dev->name);
+ if_down (dev);
+
+ /* Shut down the PPP link. */
+ sp->lcp.state = LCP_STATE_CLOSED;
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ sppp_clear_timeout (sp);
+ /* Initiate negotiation. */
+ sppp_lcp_open (sp);
+ break;
+ }
+ *(long*)(h+1) = htonl (sp->lcp.magic);
+ sppp_cp_send (sp, PPP_LCP, LCP_ECHO_REPLY, h->ident, len-4, h+1);
+ break;
+ case LCP_ECHO_REPLY:
+ if (h->ident != sp->lcp.echoid)
+ break;
+ if (len < 8) {
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: invalid lcp echo reply packet length: %d bytes\n",
+ dev->name, len);
+ break;
+ }
+ if (ntohl (*(long*)(h+1)) != sp->lcp.magic)
+ sp->pp_alivecnt = 0;
+ break;
+ }
+}
+
+/*
+ * Handle incoming Cisco keepalive protocol packets.
+ */
+
+static void sppp_cisco_input (struct sppp *sp, struct sk_buff *skb)
+{
+ struct cisco_packet *h;
+ struct net_device *dev = sp->pp_if;
+
+ if (!pskb_may_pull(skb, sizeof(struct cisco_packet))
+ || (skb->len != CISCO_PACKET_LEN
+ && skb->len != CISCO_BIG_PACKET_LEN)) {
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: invalid cisco packet length: %d bytes\n",
+ dev->name, skb->len);
+ return;
+ }
+ h = (struct cisco_packet *)skb->data;
+ skb_pull(skb, sizeof(struct cisco_packet*));
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: cisco input: %d bytes <%xh %xh %xh %xh %xh-%xh>\n",
+ dev->name, skb->len,
+ ntohl (h->type), h->par1, h->par2, h->rel,
+ h->time0, h->time1);
+ switch (ntohl (h->type)) {
+ default:
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: unknown cisco packet type: 0x%x\n",
+ dev->name, ntohl (h->type));
+ break;
+ case CISCO_ADDR_REPLY:
+ /* Reply on address request, ignore */
+ break;
+ case CISCO_KEEPALIVE_REQ:
+ sp->pp_alivecnt = 0;
+ sp->pp_rseq = ntohl (h->par1);
+ if (sp->pp_seq == sp->pp_rseq) {
+ /* Local and remote sequence numbers are equal.
+ * Probably, the line is in loopback mode. */
+ int newseq;
+ if (sp->pp_loopcnt >= MAXALIVECNT) {
+ printk (KERN_WARNING "%s: loopback\n",
+ dev->name);
+ sp->pp_loopcnt = 0;
+ if (dev->flags & IFF_UP) {
+ if_down (dev);
+ }
+ }
+ ++sp->pp_loopcnt;
+
+ /* Generate new local sequence number */
+ get_random_bytes(&newseq, sizeof(newseq));
+ sp->pp_seq ^= newseq;
+ break;
+ }
+ sp->pp_loopcnt = 0;
+ if (sp->pp_link_state==SPPP_LINK_DOWN &&
+ (dev->flags & IFF_UP)) {
+ sp->pp_link_state=SPPP_LINK_UP;
+ printk (KERN_INFO "%s: protocol up\n", dev->name);
+ }
+ break;
+ case CISCO_ADDR_REQ:
+ /* Stolen from net/ipv4/devinet.c -- SIOCGIFADDR ioctl */
+ {
+ struct in_device *in_dev;
+ struct in_ifaddr *ifa;
+ u32 addr = 0, mask = ~0; /* FIXME: is the mask correct? */
+#ifdef CONFIG_INET
+ rcu_read_lock();
+ if ((in_dev = __in_dev_get_rcu(dev)) != NULL)
+ {
+ for (ifa=in_dev->ifa_list; ifa != NULL;
+ ifa=ifa->ifa_next) {
+ if (strcmp(dev->name, ifa->ifa_label) == 0)
+ {
+ addr = ifa->ifa_local;
+ mask = ifa->ifa_mask;
+ break;
+ }
+ }
+ }
+ rcu_read_unlock();
+#endif
+ /* I hope both addr and mask are in the net order */
+ sppp_cisco_send (sp, CISCO_ADDR_REPLY, addr, mask);
+ break;
+ }
+ }
+}
+
+
+/*
+ * Send PPP LCP packet.
+ */
+
+static void sppp_cp_send (struct sppp *sp, u16 proto, u8 type,
+ u8 ident, u16 len, void *data)
+{
+ struct ppp_header *h;
+ struct lcp_header *lh;
+ struct sk_buff *skb;
+ struct net_device *dev = sp->pp_if;
+
+ skb=alloc_skb(dev->hard_header_len+PPP_HEADER_LEN+LCP_HEADER_LEN+len,
+ GFP_ATOMIC);
+ if (skb==NULL)
+ return;
+
+ skb_reserve(skb,dev->hard_header_len);
+
+ h = (struct ppp_header *)skb_put(skb, sizeof(struct ppp_header));
+ h->address = PPP_ALLSTATIONS; /* broadcast address */
+ h->control = PPP_UI; /* Unnumbered Info */
+ h->protocol = htons (proto); /* Link Control Protocol */
+
+ lh = (struct lcp_header *)skb_put(skb, sizeof(struct lcp_header));
+ lh->type = type;
+ lh->ident = ident;
+ lh->len = htons (LCP_HEADER_LEN + len);
+
+ if (len)
+ memcpy(skb_put(skb,len),data, len);
+
+ if (sp->pp_flags & PP_DEBUG) {
+ printk (KERN_WARNING "%s: %s output <%s id=%xh len=%xh",
+ dev->name,
+ proto==PPP_LCP ? "lcp" : "ipcp",
+ proto==PPP_LCP ? sppp_lcp_type_name (lh->type) :
+ sppp_ipcp_type_name (lh->type), lh->ident,
+ ntohs (lh->len));
+ if (len)
+ sppp_print_bytes ((u8*) (lh+1), len);
+ printk (">\n");
+ }
+ sp->obytes += skb->len;
+ /* Control is high priority so it doesn't get queued behind data */
+ skb->priority=TC_PRIO_CONTROL;
+ skb->dev = dev;
+ skb_queue_tail(&tx_queue, skb);
+}
+
+/*
+ * Send Cisco keepalive packet.
+ */
+
+static void sppp_cisco_send (struct sppp *sp, int type, long par1, long par2)
+{
+ struct ppp_header *h;
+ struct cisco_packet *ch;
+ struct sk_buff *skb;
+ struct net_device *dev = sp->pp_if;
+ u32 t = jiffies * 1000/HZ;
+
+ skb=alloc_skb(dev->hard_header_len+PPP_HEADER_LEN+CISCO_PACKET_LEN,
+ GFP_ATOMIC);
+
+ if(skb==NULL)
+ return;
+
+ skb_reserve(skb, dev->hard_header_len);
+ h = (struct ppp_header *)skb_put (skb, sizeof(struct ppp_header));
+ h->address = CISCO_MULTICAST;
+ h->control = 0;
+ h->protocol = htons (CISCO_KEEPALIVE);
+
+ ch = (struct cisco_packet*)skb_put(skb, CISCO_PACKET_LEN);
+ ch->type = htonl (type);
+ ch->par1 = htonl (par1);
+ ch->par2 = htonl (par2);
+ ch->rel = -1;
+ ch->time0 = htons ((u16) (t >> 16));
+ ch->time1 = htons ((u16) t);
+
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: cisco output: <%xh %xh %xh %xh %xh-%xh>\n",
+ dev->name, ntohl (ch->type), ch->par1,
+ ch->par2, ch->rel, ch->time0, ch->time1);
+ sp->obytes += skb->len;
+ skb->priority=TC_PRIO_CONTROL;
+ skb->dev = dev;
+ skb_queue_tail(&tx_queue, skb);
+}
+
+/**
+ * sppp_close - close down a synchronous PPP or Cisco HDLC link
+ * @dev: The network device to drop the link of
+ *
+ * This drops the logical interface to the channel. It is not
+ * done politely as we assume we will also be dropping DTR. Any
+ * timeouts are killed.
+ */
+
+int sppp_close (struct net_device *dev)
+{
+ struct sppp *sp = (struct sppp *)sppp_of(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sp->lock, flags);
+ sp->pp_link_state = SPPP_LINK_DOWN;
+ sp->lcp.state = LCP_STATE_CLOSED;
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ sppp_clear_timeout (sp);
+ spin_unlock_irqrestore(&sp->lock, flags);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(sppp_close);
+
+/**
+ * sppp_open - open a synchronous PPP or Cisco HDLC link
+ * @dev: Network device to activate
+ *
+ * Close down any existing synchronous session and commence
+ * from scratch. In the PPP case this means negotiating LCP/IPCP
+ * and friends, while for Cisco HDLC we simply need to start sending
+ * keepalives
+ */
+
+int sppp_open (struct net_device *dev)
+{
+ struct sppp *sp = (struct sppp *)sppp_of(dev);
+ unsigned long flags;
+
+ sppp_close(dev);
+
+ spin_lock_irqsave(&sp->lock, flags);
+ if (!(sp->pp_flags & PP_CISCO)) {
+ sppp_lcp_open (sp);
+ }
+ sp->pp_link_state = SPPP_LINK_DOWN;
+ spin_unlock_irqrestore(&sp->lock, flags);
+ sppp_flush_xmit();
+
+ return 0;
+}
+
+EXPORT_SYMBOL(sppp_open);
+
+/**
+ * sppp_reopen - notify of physical link loss
+ * @dev: Device that lost the link
+ *
+ * This function informs the synchronous protocol code that
+ * the underlying link died (for example a carrier drop on X.21)
+ *
+ * We increment the magic numbers to ensure that if the other end
+ * failed to notice we will correctly start a new session. It happens
+ * do to the nature of telco circuits is that you can lose carrier on
+ * one endonly.
+ *
+ * Having done this we go back to negotiating. This function may
+ * be called from an interrupt context.
+ */
+
+int sppp_reopen (struct net_device *dev)
+{
+ struct sppp *sp = (struct sppp *)sppp_of(dev);
+ unsigned long flags;
+
+ sppp_close(dev);
+
+ spin_lock_irqsave(&sp->lock, flags);
+ if (!(sp->pp_flags & PP_CISCO))
+ {
+ sp->lcp.magic = jiffies;
+ ++sp->pp_seq;
+ sp->lcp.state = LCP_STATE_CLOSED;
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ /* Give it a moment for the line to settle then go */
+ sppp_set_timeout (sp, 1);
+ }
+ sp->pp_link_state=SPPP_LINK_DOWN;
+ spin_unlock_irqrestore(&sp->lock, flags);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(sppp_reopen);
+
+/**
+ * sppp_change_mtu - Change the link MTU
+ * @dev: Device to change MTU on
+ * @new_mtu: New MTU
+ *
+ * Change the MTU on the link. This can only be called with
+ * the link down. It returns an error if the link is up or
+ * the mtu is out of range.
+ */
+
+static int sppp_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if(new_mtu<128||new_mtu>PPP_MTU||(dev->flags&IFF_UP))
+ return -EINVAL;
+ dev->mtu=new_mtu;
+ return 0;
+}
+
+/**
+ * sppp_do_ioctl - Ioctl handler for ppp/hdlc
+ * @dev: Device subject to ioctl
+ * @ifr: Interface request block from the user
+ * @cmd: Command that is being issued
+ *
+ * This function handles the ioctls that may be issued by the user
+ * to control the settings of a PPP/HDLC link. It does both busy
+ * and security checks. This function is intended to be wrapped by
+ * callers who wish to add additional ioctl calls of their own.
+ */
+
+int sppp_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct sppp *sp = (struct sppp *)sppp_of(dev);
+
+ if(dev->flags&IFF_UP)
+ return -EBUSY;
+
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ switch(cmd)
+ {
+ case SPPPIOCCISCO:
+ sp->pp_flags|=PP_CISCO;
+ dev->type = ARPHRD_HDLC;
+ break;
+ case SPPPIOCPPP:
+ sp->pp_flags&=~PP_CISCO;
+ dev->type = ARPHRD_PPP;
+ break;
+ case SPPPIOCDEBUG:
+ sp->pp_flags&=~PP_DEBUG;
+ if(ifr->ifr_flags)
+ sp->pp_flags|=PP_DEBUG;
+ break;
+ case SPPPIOCGFLAGS:
+ if(copy_to_user(ifr->ifr_data, &sp->pp_flags, sizeof(sp->pp_flags)))
+ return -EFAULT;
+ break;
+ case SPPPIOCSFLAGS:
+ if(copy_from_user(&sp->pp_flags, ifr->ifr_data, sizeof(sp->pp_flags)))
+ return -EFAULT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+EXPORT_SYMBOL(sppp_do_ioctl);
+
+/**
+ * sppp_attach - attach synchronous PPP/HDLC to a device
+ * @pd: PPP device to initialise
+ *
+ * This initialises the PPP/HDLC support on an interface. At the
+ * time of calling the dev element must point to the network device
+ * that this interface is attached to. The interface should not yet
+ * be registered.
+ */
+
+void sppp_attach(struct ppp_device *pd)
+{
+ struct net_device *dev = pd->dev;
+ struct sppp *sp = &pd->sppp;
+ unsigned long flags;
+
+ /* Make sure embedding is safe for sppp_of */
+ BUG_ON(sppp_of(dev) != sp);
+
+ spin_lock_irqsave(&spppq_lock, flags);
+ /* Initialize keepalive handler. */
+ if (! spppq)
+ {
+ init_timer(&sppp_keepalive_timer);
+ sppp_keepalive_timer.expires=jiffies+10*HZ;
+ sppp_keepalive_timer.function=sppp_keepalive;
+ add_timer(&sppp_keepalive_timer);
+ }
+ /* Insert new entry into the keepalive list. */
+ sp->pp_next = spppq;
+ spppq = sp;
+ spin_unlock_irqrestore(&spppq_lock, flags);
+
+ sp->pp_loopcnt = 0;
+ sp->pp_alivecnt = 0;
+ sp->pp_seq = 0;
+ sp->pp_rseq = 0;
+ sp->pp_flags = PP_KEEPALIVE|PP_CISCO|debug;/*PP_DEBUG;*/
+ sp->lcp.magic = 0;
+ sp->lcp.state = LCP_STATE_CLOSED;
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ sp->pp_if = dev;
+ spin_lock_init(&sp->lock);
+
+ /*
+ * Device specific setup. All but interrupt handler and
+ * hard_start_xmit.
+ */
+
+ dev->hard_header = sppp_hard_header;
+ dev->rebuild_header = sppp_rebuild_header;
+ dev->tx_queue_len = 10;
+ dev->type = ARPHRD_HDLC;
+ dev->addr_len = 0;
+ dev->hard_header_len = sizeof(struct ppp_header);
+ dev->mtu = PPP_MTU;
+ /*
+ * These 4 are callers but MUST also call sppp_ functions
+ */
+ dev->do_ioctl = sppp_do_ioctl;
+#if 0
+ dev->get_stats = NULL; /* Let the driver override these */
+ dev->open = sppp_open;
+ dev->stop = sppp_close;
+#endif
+ dev->change_mtu = sppp_change_mtu;
+ dev->hard_header_cache = NULL;
+ dev->header_cache_update = NULL;
+ dev->flags = IFF_MULTICAST|IFF_POINTOPOINT|IFF_NOARP;
+}
+
+EXPORT_SYMBOL(sppp_attach);
+
+/**
+ * sppp_detach - release PPP resources from a device
+ * @dev: Network device to release
+ *
+ * Stop and free up any PPP/HDLC resources used by this
+ * interface. This must be called before the device is
+ * freed.
+ */
+
+void sppp_detach (struct net_device *dev)
+{
+ struct sppp **q, *p, *sp = (struct sppp *)sppp_of(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&spppq_lock, flags);
+ /* Remove the entry from the keepalive list. */
+ for (q = &spppq; (p = *q); q = &p->pp_next)
+ if (p == sp) {
+ *q = p->pp_next;
+ break;
+ }
+
+ /* Stop keepalive handler. */
+ if (! spppq)
+ del_timer(&sppp_keepalive_timer);
+ sppp_clear_timeout (sp);
+ spin_unlock_irqrestore(&spppq_lock, flags);
+}
+
+EXPORT_SYMBOL(sppp_detach);
+
+/*
+ * Analyze the LCP Configure-Request options list
+ * for the presence of unknown options.
+ * If the request contains unknown options, build and
+ * send Configure-reject packet, containing only unknown options.
+ */
+static int
+sppp_lcp_conf_parse_options (struct sppp *sp, struct lcp_header *h,
+ int len, u32 *magic)
+{
+ u8 *buf, *r, *p;
+ int rlen;
+
+ len -= 4;
+ buf = r = kmalloc (len, GFP_ATOMIC);
+ if (! buf)
+ return (0);
+
+ p = (void*) (h+1);
+ for (rlen=0; len>1 && p[1]; len-=p[1], p+=p[1]) {
+ switch (*p) {
+ case LCP_OPT_MAGIC:
+ /* Magic number -- extract. */
+ if (len >= 6 && p[1] == 6) {
+ *magic = (u32)p[2] << 24 |
+ (u32)p[3] << 16 | p[4] << 8 | p[5];
+ continue;
+ }
+ break;
+ case LCP_OPT_ASYNC_MAP:
+ /* Async control character map -- check to be zero. */
+ if (len >= 6 && p[1] == 6 && ! p[2] && ! p[3] &&
+ ! p[4] && ! p[5])
+ continue;
+ break;
+ case LCP_OPT_MRU:
+ /* Maximum receive unit -- always OK. */
+ continue;
+ default:
+ /* Others not supported. */
+ break;
+ }
+ /* Add the option to rejected list. */
+ memcpy(r, p, p[1]);
+ r += p[1];
+ rlen += p[1];
+ }
+ if (rlen)
+ sppp_cp_send (sp, PPP_LCP, LCP_CONF_REJ, h->ident, rlen, buf);
+ kfree(buf);
+ return (rlen == 0);
+}
+
+static void sppp_ipcp_input (struct sppp *sp, struct sk_buff *skb)
+{
+ struct lcp_header *h;
+ struct net_device *dev = sp->pp_if;
+ int len = skb->len;
+
+ if (!pskb_may_pull(skb, sizeof(struct lcp_header))) {
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: invalid ipcp packet length: %d bytes\n",
+ dev->name, len);
+ return;
+ }
+ h = (struct lcp_header *)skb->data;
+ skb_pull(skb,sizeof(struct lcp_header));
+ if (sp->pp_flags & PP_DEBUG) {
+ printk (KERN_WARNING "%s: ipcp input: %d bytes <%s id=%xh len=%xh",
+ dev->name, len,
+ sppp_ipcp_type_name (h->type), h->ident, ntohs (h->len));
+ if (len > 4)
+ sppp_print_bytes ((u8*) (h+1), len-4);
+ printk (">\n");
+ }
+ if (len > ntohs (h->len))
+ len = ntohs (h->len);
+ switch (h->type) {
+ default:
+ /* Unknown packet type -- send Code-Reject packet. */
+ sppp_cp_send (sp, PPP_IPCP, IPCP_CODE_REJ, ++sp->pp_seq, len, h);
+ break;
+ case IPCP_CONF_REQ:
+ if (len < 4) {
+ if (sp->pp_flags & PP_DEBUG)
+ printk (KERN_WARNING "%s: invalid ipcp configure request packet length: %d bytes\n",
+ dev->name, len);
+ return;
+ }
+ if (len > 4) {
+ sppp_cp_send (sp, PPP_IPCP, LCP_CONF_REJ, h->ident,
+ len-4, h+1);
+
+ switch (sp->ipcp.state) {
+ case IPCP_STATE_OPENED:
+ /* Initiate renegotiation. */
+ sppp_ipcp_open (sp);
+ /* fall through... */
+ case IPCP_STATE_ACK_SENT:
+ /* Go to closed state. */
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ }
+ } else {
+ /* Send Configure-Ack packet. */
+ sppp_cp_send (sp, PPP_IPCP, IPCP_CONF_ACK, h->ident,
+ 0, NULL);
+ /* Change the state. */
+ if (sp->ipcp.state == IPCP_STATE_ACK_RCVD)
+ sp->ipcp.state = IPCP_STATE_OPENED;
+ else
+ sp->ipcp.state = IPCP_STATE_ACK_SENT;
+ }
+ break;
+ case IPCP_CONF_ACK:
+ if (h->ident != sp->ipcp.confid)
+ break;
+ sppp_clear_timeout (sp);
+ switch (sp->ipcp.state) {
+ case IPCP_STATE_CLOSED:
+ sp->ipcp.state = IPCP_STATE_ACK_RCVD;
+ sppp_set_timeout (sp, 5);
+ break;
+ case IPCP_STATE_ACK_SENT:
+ sp->ipcp.state = IPCP_STATE_OPENED;
+ break;
+ }
+ break;
+ case IPCP_CONF_NAK:
+ case IPCP_CONF_REJ:
+ if (h->ident != sp->ipcp.confid)
+ break;
+ sppp_clear_timeout (sp);
+ /* Initiate renegotiation. */
+ sppp_ipcp_open (sp);
+ if (sp->ipcp.state != IPCP_STATE_ACK_SENT)
+ /* Go to closed state. */
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ break;
+ case IPCP_TERM_REQ:
+ /* Send Terminate-Ack packet. */
+ sppp_cp_send (sp, PPP_IPCP, IPCP_TERM_ACK, h->ident, 0, NULL);
+ /* Go to closed state. */
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ /* Initiate renegotiation. */
+ sppp_ipcp_open (sp);
+ break;
+ case IPCP_TERM_ACK:
+ /* Ignore for now. */
+ case IPCP_CODE_REJ:
+ /* Ignore for now. */
+ break;
+ }
+}
+
+static void sppp_lcp_open (struct sppp *sp)
+{
+ char opt[6];
+
+ if (! sp->lcp.magic)
+ sp->lcp.magic = jiffies;
+ opt[0] = LCP_OPT_MAGIC;
+ opt[1] = sizeof (opt);
+ opt[2] = sp->lcp.magic >> 24;
+ opt[3] = sp->lcp.magic >> 16;
+ opt[4] = sp->lcp.magic >> 8;
+ opt[5] = sp->lcp.magic;
+ sp->lcp.confid = ++sp->pp_seq;
+ sppp_cp_send (sp, PPP_LCP, LCP_CONF_REQ, sp->lcp.confid,
+ sizeof (opt), &opt);
+ sppp_set_timeout (sp, 2);
+}
+
+static void sppp_ipcp_open (struct sppp *sp)
+{
+ sp->ipcp.confid = ++sp->pp_seq;
+ sppp_cp_send (sp, PPP_IPCP, IPCP_CONF_REQ, sp->ipcp.confid, 0, NULL);
+ sppp_set_timeout (sp, 2);
+}
+
+/*
+ * Process PPP control protocol timeouts.
+ */
+
+static void sppp_cp_timeout (unsigned long arg)
+{
+ struct sppp *sp = (struct sppp*) arg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sp->lock, flags);
+
+ sp->pp_flags &= ~PP_TIMO;
+ if (! (sp->pp_if->flags & IFF_UP) || (sp->pp_flags & PP_CISCO)) {
+ spin_unlock_irqrestore(&sp->lock, flags);
+ return;
+ }
+ switch (sp->lcp.state) {
+ case LCP_STATE_CLOSED:
+ /* No ACK for Configure-Request, retry. */
+ sppp_lcp_open (sp);
+ break;
+ case LCP_STATE_ACK_RCVD:
+ /* ACK got, but no Configure-Request for peer, retry. */
+ sppp_lcp_open (sp);
+ sp->lcp.state = LCP_STATE_CLOSED;
+ break;
+ case LCP_STATE_ACK_SENT:
+ /* ACK sent but no ACK for Configure-Request, retry. */
+ sppp_lcp_open (sp);
+ break;
+ case LCP_STATE_OPENED:
+ /* LCP is already OK, try IPCP. */
+ switch (sp->ipcp.state) {
+ case IPCP_STATE_CLOSED:
+ /* No ACK for Configure-Request, retry. */
+ sppp_ipcp_open (sp);
+ break;
+ case IPCP_STATE_ACK_RCVD:
+ /* ACK got, but no Configure-Request for peer, retry. */
+ sppp_ipcp_open (sp);
+ sp->ipcp.state = IPCP_STATE_CLOSED;
+ break;
+ case IPCP_STATE_ACK_SENT:
+ /* ACK sent but no ACK for Configure-Request, retry. */
+ sppp_ipcp_open (sp);
+ break;
+ case IPCP_STATE_OPENED:
+ /* IPCP is OK. */
+ break;
+ }
+ break;
+ }
+ spin_unlock_irqrestore(&sp->lock, flags);
+ sppp_flush_xmit();
+}
+
+static char *sppp_lcp_type_name (u8 type)
+{
+ static char buf [8];
+ switch (type) {
+ case LCP_CONF_REQ: return ("conf-req");
+ case LCP_CONF_ACK: return ("conf-ack");
+ case LCP_CONF_NAK: return ("conf-nack");
+ case LCP_CONF_REJ: return ("conf-rej");
+ case LCP_TERM_REQ: return ("term-req");
+ case LCP_TERM_ACK: return ("term-ack");
+ case LCP_CODE_REJ: return ("code-rej");
+ case LCP_PROTO_REJ: return ("proto-rej");
+ case LCP_ECHO_REQ: return ("echo-req");
+ case LCP_ECHO_REPLY: return ("echo-reply");
+ case LCP_DISC_REQ: return ("discard-req");
+ }
+ sprintf (buf, "%xh", type);
+ return (buf);
+}
+
+static char *sppp_ipcp_type_name (u8 type)
+{
+ static char buf [8];
+ switch (type) {
+ case IPCP_CONF_REQ: return ("conf-req");
+ case IPCP_CONF_ACK: return ("conf-ack");
+ case IPCP_CONF_NAK: return ("conf-nack");
+ case IPCP_CONF_REJ: return ("conf-rej");
+ case IPCP_TERM_REQ: return ("term-req");
+ case IPCP_TERM_ACK: return ("term-ack");
+ case IPCP_CODE_REJ: return ("code-rej");
+ }
+ sprintf (buf, "%xh", type);
+ return (buf);
+}
+
+static void sppp_print_bytes (u_char *p, u16 len)
+{
+ printk (" %x", *p++);
+ while (--len > 0)
+ printk ("-%x", *p++);
+}
+
+/**
+ * sppp_rcv - receive and process a WAN PPP frame
+ * @skb: The buffer to process
+ * @dev: The device it arrived on
+ * @p: Unused
+ * @orig_dev: Unused
+ *
+ * Protocol glue. This drives the deferred processing mode the poorer
+ * cards use. This can be called directly by cards that do not have
+ * timing constraints but is normally called from the network layer
+ * after interrupt servicing to process frames queued via netif_rx.
+ */
+
+static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p, struct net_device *orig_dev)
+{
+ if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+ return NET_RX_DROP;
+ sppp_input(dev,skb);
+ return 0;
+}
+
+static struct packet_type sppp_packet_type = {
+ .type = __constant_htons(ETH_P_WAN_PPP),
+ .func = sppp_rcv,
+};
+
+static char banner[] __initdata =
+ KERN_INFO "Cronyx Ltd, Synchronous PPP and CISCO HDLC (c) 1994\n"
+ KERN_INFO "Linux port (c) 1998 Building Number Three Ltd & "
+ "Jan \"Yenya\" Kasprzak.\n";
+
+static int __init sync_ppp_init(void)
+{
+ if(debug)
+ debug=PP_DEBUG;
+ printk(banner);
+ skb_queue_head_init(&tx_queue);
+ dev_add_pack(&sppp_packet_type);
+ return 0;
+}
+
+
+static void __exit sync_ppp_cleanup(void)
+{
+ dev_remove_pack(&sppp_packet_type);
+}
+
+module_init(sync_ppp_init);
+module_exit(sync_ppp_cleanup);
+module_param(debug, int, 0);
+MODULE_LICENSE("GPL");
+