/* * Dynamic Span Interface for Zaptel (Multi-Span Ethernet Interface) * * Modifications by Brett Carrington * Copyright (C) 2007-2008, Redfone Communications, LLC. * * Written by Joseph Benden * Copyright (C) 2007, Thralling Penguin LLC * * Based on code written by Mark Spencer * Copyright (C) 2001, Linux Support Services, Inc. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include #include #include #include #include #include #include #include #include #include #if 0 #ifdef CONFIG_DEVFS_FS #include #endif #endif #ifdef STANDALONE_ZAPATA #include "zaptel.h" #else #include #endif #define ETH_P_ZTDETH 0xd00d #define ETHMF_FLAG_IGNORE_CHAN0 (1 << 3) struct ztdeth_header { unsigned short subaddr; }; /* Timer for enabling spans - used to combat a Zaptel lock problem */ static struct timer_list timer; /* Variable used by the timer to correctly deduce the amount of time passed */ static int timer_counter; /* We take the raw message, put it in an ethernet frame, and add a two byte addressing header at the top for future use */ #ifdef DEFINE_SPINLOCK static DEFINE_SPINLOCK(zlock); #else static spinlock_t zlock = SPIN_LOCK_UNLOCKED; #endif static struct sk_buff_head skbs; static struct ztdeth { unsigned char addr[ETH_ALEN]; unsigned int addr_hash; unsigned short subaddr; /* Network byte order */ struct zt_span *span; char ethdev[IFNAMSIZ]; struct net_device *dev; struct ztdeth *next; unsigned char *msgbuf; int msgbuf_len; int ready; int delay; unsigned char *rcvbuf; spinlock_t rblock; int real_channels; int no_front_padding; } *zdevs = NULL; /* * Return the ztdeth structure for a given MultiFrame zt_span */ struct ztdeth *ztdethmf_getz(struct zt_span *span) { unsigned long flags; struct ztdeth *z = NULL, *found = NULL; spin_lock_irqsave(&zlock, flags); z = zdevs; while (z) { if (z->span == span) found = z; z = z->next; } spin_unlock_irqrestore(&zlock, flags); return found; } struct zt_span *ztdethmf_getspan(unsigned char *addr, unsigned short subaddr) { unsigned long flags; struct ztdeth *z; struct zt_span *span = NULL; spin_lock_irqsave(&zlock, flags); z = zdevs; while (z) { if (!z->delay) { if (!memcmp(addr, z->addr, ETH_ALEN) && z->subaddr == subaddr) break; } z = z->next; } if (z) span = z->span; spin_unlock_irqrestore(&zlock, flags); return span; } #if defined(LINUX26) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) static int ztdethmf_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) #else static int ztdethmf_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) #endif { int num_spans = 0, span_index = 0; struct ztdeth_header *zh; unsigned char *data; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) zh = (struct ztdeth_header *) skb_network_header(skb); #else zh = (struct ztdeth_header *) skb->nh.raw; #endif /* The high order bit denotes that this is a multiframe span and not a regular ztd-eth span. */ if (unlikely(!(ntohs(zh->subaddr) & 0x8000))) { kfree_skb(skb); return 0; } /* got a multi-span frame */ num_spans = ntohs(zh->subaddr) & 0xFF; /* Currently max of 4 spans supported */ if (num_spans > 4) { kfree_skb(skb); return 0; } skb_pull(skb, sizeof(struct ztdeth_header)); data = (unsigned char *) skb->data; do { struct zt_span *span; struct ztdeth *z; unsigned int samples, channels, rbslen, flags; unsigned int skip = 0; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) span = ztdethmf_getspan(eth_hdr(skb)->h_source, htons(span_index)); #else span = ztdethmf_getspan(skb->mac.ethernet->h_source, htons(span_index)); #endif if (!span) { kfree_skb(skb); return 0; } samples = data[span_index * 6] & 0xFF; flags = data[span_index * 6 + 1] & 0xFF; channels = data[span_index * 6 + 5] & 0xFF; /* Precomputed defaults for most typical values */ if (channels == 24) { rbslen = 12; } else if (channels == 31) { rbslen = 16; } else { rbslen = ((channels + 3) / 4) * 2; } if (unlikely(!(samples == 8 && channels < 32))) { kfree_skb(skb); return 0; } /* Find the correct ethmf span */ z = ztdethmf_getz(span); if (!z) { kfree_skb(skb); return 0; } spin_lock(&z->rblock); /* TDM Header */ memcpy(z->rcvbuf, data + 6 * span_index, 6); /* * If we ignore channel zero we must skip the first eight bytes * and ensure that ztdynamic doesn't get confused by this new * flag */ if (flags & ETHMF_FLAG_IGNORE_CHAN0) { skip = 8; /* Remove this flag since ztdynamic may not understand it */ z->rcvbuf[1] = flags & ~(ETHMF_FLAG_IGNORE_CHAN0); /* Additionally, now we will transmit with front padding */ z->no_front_padding = 0; } else { /* Disable front padding if we recv'd a packet without it */ z->no_front_padding = 1; } /* RBS Header */ memcpy(z->rcvbuf + 6, data + 6 * num_spans + 16 * span_index, rbslen); /* Payload: 256 == 32*8 -- if padding lengths change, this must be modified */ memcpy(z->rcvbuf + 6 + rbslen, data + 6 * num_spans + 16 * num_spans + (256) * span_index + skip, channels * 8); zt_dynamic_receive(span, z->rcvbuf, 6 + rbslen + channels * 8); spin_unlock(&z->rblock); span_index++; } while (span_index < num_spans); kfree_skb(skb); return 0; } static int ztdethmf_notifier(struct notifier_block *block, unsigned long event, void *ptr) { struct net_device *dev = ptr; struct ztdeth *z; unsigned long flags; switch (event) { case NETDEV_GOING_DOWN: case NETDEV_DOWN: spin_lock_irqsave(&zlock, flags); z = zdevs; while (z) { /* Note that the device no longer exists */ if (z->dev == dev) z->dev = NULL; z = z->next; } spin_unlock_irqrestore(&zlock, flags); break; case NETDEV_UP: spin_lock_irqsave(&zlock, flags); z = zdevs; while (z) { /* Now that the device exists again, use it */ if (!strcmp(z->ethdev, dev->name)) z->dev = dev; z = z->next; } spin_unlock_irqrestore(&zlock, flags); break; } return 0; } static int ztdethmf_transmit(void *pvt, unsigned char *msg, int msglen) { struct ztdeth *z, *t, *ready_spans[8]; struct sk_buff *skb; struct ztdeth_header *zh; unsigned long flags; struct net_device *dev; unsigned char addr[ETH_ALEN]; unsigned short tmp_subaddr; unsigned short span_count = 0, spans_ready = 0, index = 0; spin_lock_irqsave(&zlock, flags); z = pvt; if (unlikely(!z->dev)) { spin_unlock_irqrestore(&zlock, flags); return 0; } /* Store the present data in the buffers */ memcpy(z->msgbuf, msg, msglen); z->msgbuf_len = msglen; z->ready = 1; /* Are all available spans ready for transmit? */ t = zdevs; while (t) { if (!t->delay) { /* * A profiling showed that this hash method was faster than * the previous memcmp */ if (t->addr_hash == z->addr_hash) { ++span_count; if (t->ready) { ready_spans[ntohs(t->subaddr)] = t; ++spans_ready; } } } t = t->next; } /* Copy fields to local variables to remove spinlock ASAP */ dev = z->dev; memcpy(addr, z->addr, sizeof(z->addr)); spin_unlock_irqrestore(&zlock, flags); /* ready_spans[8] forces us to check spans_ready < 8 */ if (span_count && spans_ready && span_count == spans_ready && spans_ready < 8) { int pad[spans_ready], rbs[spans_ready]; for (index = 0; index < spans_ready; index++) { int chan = ready_spans[index]->real_channels; /* By default we pad to 32 channels, but if no_front_padding * is false then we have a pad in the front of 8 bytes, so * this implies one less channel */ if (ready_spans[index]->no_front_padding) pad[index] = (32 - chan) * 8; else pad[index] = (31 - chan) * 8; if (chan == 24) { rbs[index] = 12; } else if (chan == 31) { rbs[index] = 16; } else { rbs[spans_ready] = ((chan + 3) / 4) * 2; } } /* Standard size for 32-chan frame */ skb = dev_alloc_skb(1112 + dev->hard_header_len + sizeof(struct ztdeth_header) + 32); if (!skb) { if (printk_ratelimit()) printk(KERN_WARNING "ztd-ethmf: Unable to allocate memory for transmission!\n"); return 0; } /* Reserve header space */ skb_reserve(skb, dev->hard_header_len + sizeof(struct ztdeth_header)); for (index = 0; index < spans_ready; index++) { if (!ready_spans[index]->no_front_padding) { ready_spans[index]->msgbuf[1] |= ETHMF_FLAG_IGNORE_CHAN0; } memcpy(skb_put(skb, 6), ready_spans[index]->msgbuf, 6); } for (index = 0; index < spans_ready; index++) { memcpy(skb_put(skb, 16), ready_spans[index]->msgbuf + 6, rbs[index]); } for (index = 0; index < spans_ready; index++) { int chan = ready_spans[index]->real_channels; if (!ready_spans[index]->no_front_padding) { /* This adds an additional (padded) channel */ memset(skb_put(skb, 8), 0xA5, 8); /* ETHMF_IGNORE_CHAN0 */ } memcpy(skb_put(skb, chan * 8), ready_spans[index]->msgbuf + (6 + rbs[index]), chan * 8); if (pad[index] > 0) { memset(skb_put(skb, pad[index]), 0xDD, pad[index]); } /* We're done with this span */ ready_spans[index]->ready = 0; } /* Throw on header */ zh = (struct ztdeth_header *) skb_push(skb, sizeof(struct ztdeth_header)); tmp_subaddr = (0x8000 | (span_count & 0xFF)); zh->subaddr = htons(tmp_subaddr); /* Setup protocol and such */ skb->protocol = __constant_htons(ETH_P_ZTDETH); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) skb_set_network_header(skb, 0); #else skb->nh.raw = skb->data; #endif skb->dev = dev; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) dev_hard_header(skb, dev, ETH_P_ZTDETH, addr, dev->dev_addr, skb->len); #else if (dev->hard_header) dev->hard_header(skb, dev, ETH_P_ZTDETH, addr, dev->dev_addr, skb->len); #endif skb_queue_tail(&skbs, skb); } return 0; } static int ztdethmf_flush(void) { struct sk_buff *skb; /* Handle all transmissions now */ while ((skb = skb_dequeue(&skbs))) { dev_queue_xmit(skb); } return 0; } static struct packet_type ztdethmf_ptype = { type:__constant_htons(ETH_P_ZTDETH), /* Protocol */ dev:NULL, /* Device (NULL = wildcard) */ func:ztdethmf_rcv /* Recv func */ }; static int digit2int(char d) { switch (d) { case 'F': case 'E': case 'D': case 'C': case 'B': case 'A': return d - 'A' + 10; case 'f': case 'e': case 'd': case 'c': case 'b': case 'a': return d - 'a' + 10; case '9': case '8': case '7': case '6': case '5': case '4': case '3': case '2': case '1': case '0': return d - '0'; } return -1; } static int hex2int(char *s) { int res; int tmp; /* Gotta be at least one digit */ if (strlen(s) < 1) return -1; /* Can't be more than two */ if (strlen(s) > 2) return -1; /* Grab the first digit */ res = digit2int(s[0]); if (res < 0) return -1; tmp = res; /* Grab the next */ if (strlen(s) > 1) { res = digit2int(s[1]); if (res < 0) return -1; tmp = tmp * 16 + res; } return tmp; } static void ztdethmf_destroy(void *pvt) { struct ztdeth *z = pvt; unsigned long flags; struct ztdeth *prev = NULL, *cur; spin_lock_irqsave(&zlock, flags); cur = zdevs; while (cur) { if (cur == z) { if (prev) prev->next = cur->next; else zdevs = cur->next; break; } prev = cur; cur = cur->next; } spin_unlock_irqrestore(&zlock, flags); if (cur == z) { /* Successfully removed */ printk(KERN_INFO "TDMoEmf: Removed interface for %s\n", z->span->name); kfree(z->msgbuf); kfree(z); #ifndef LINUX26 MOD_DEC_USE_COUNT; #else module_put(THIS_MODULE); #endif } else { if (z && z->span && z->span->name) { printk(KERN_INFO "TDMoEmf: Cannot find interface for %s\n", z->span->name); } } } static void *ztdethmf_create(struct zt_span *span, char *addr) { struct ztdeth *z; char src[256]; char tmp[256], *tmp2, *tmp3, *tmp4 = NULL; int res, x, bufsize; unsigned long flags; z = kmalloc(sizeof(struct ztdeth), GFP_KERNEL); if (!z) { printk(KERN_ERR "TDMoEmf: Unable to allocate memory for new span!\n"); return z; } /* Zero it out */ memset(z, 0, sizeof(struct ztdeth)); /* set a delay for xmit/recv to workaround Zaptel problems */ z->delay = 8; /* create a msg buffer */ /* MAX OF 31 CHANNELS!!!! */ bufsize = 31 * ZT_CHUNKSIZE + 31 / 4 + 48; z->msgbuf = kmalloc(bufsize, GFP_KERNEL); z->rcvbuf = kmalloc(bufsize, GFP_KERNEL); z->rblock = SPIN_LOCK_UNLOCKED; /* Address should be /[/subaddr] */ zap_copy_string(tmp, addr, sizeof(tmp)); tmp2 = strchr(tmp, '/'); if (tmp2) { *tmp2 = '\0'; tmp2++; zap_copy_string(z->ethdev, tmp, sizeof(z->ethdev)); } else { printk(KERN_ERR "Invalid TDMoEmf address (no device) '%s'\n", addr); kfree(z); return NULL; } if (tmp2) { tmp4 = strchr(tmp2 + 1, '/'); if (tmp4) { *tmp4 = '\0'; tmp4++; } /* We don't have SSCANF :( Gotta do this the hard way */ tmp3 = strchr(tmp2, ':'); for (x = 0; x < 6; x++) { if (tmp2) { if (tmp3) { *tmp3 = '\0'; tmp3++; } res = hex2int(tmp2); if (res < 0) break; z->addr[x] = res & 0xff; } else break; if ((tmp2 = tmp3)) tmp3 = strchr(tmp2, ':'); } if (x != 6) { printk(KERN_ERR "TDMoEmf: Invalid MAC address in: %s\n", addr); kfree(z); return NULL; } } else { printk(KERN_ERR "TDMoEmf: Missing MAC address\n"); kfree(z); return NULL; } if (tmp4) { int sub = 0; int mul = 1; /* We have a subaddr */ tmp3 = tmp4 + strlen(tmp4) - 1; while (tmp3 >= tmp4) { if (*tmp3 >= '0' && *tmp3 <= '9') { sub += (*tmp3 - '0') * mul; } else { printk(KERN_ERR "TDMoEmf: Invalid subaddress\n"); kfree(z); return NULL; } mul *= 10; tmp3--; } z->subaddr = htons(sub); } z->dev = dev_get_by_name( #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) &init_net, #endif z->ethdev); if (!z->dev) { printk(KERN_ERR "TDMoEmf: Invalid device '%s'\n", z->ethdev); kfree(z); return NULL; } z->span = span; z->addr_hash = crc32_le(0, z->addr, ETH_ALEN); /* Set up hashed MAC address */ z->real_channels = span->channels; src[0] = '\0'; for (x = 0; x < 5; x++) snprintf(src + strlen(src), 3, "%02x:", z->dev->dev_addr[x]); snprintf(src + strlen(src), 2, "%02x", z->dev->dev_addr[5]); printk(KERN_INFO "TDMoEmf: Added new interface for %s at %s (addr=%s, src=%s, subaddr=%d)\n", span->name, z->dev->name, addr, src, ntohs(z->subaddr)); spin_lock_irqsave(&zlock, flags); z->next = zdevs; zdevs = z; spin_unlock_irqrestore(&zlock, flags); #ifndef LINUX26 MOD_INC_USE_COUNT; #else if (!try_module_get(THIS_MODULE)) printk(KERN_ERR "TDMoEmf: Unable to increment module use count\n"); #endif /* enable the timer for enabling the spans */ #if 0 timer.expires = jiffies + 1; if (!timer_pending(&timer)) add_timer(&timer); #else mod_timer(&timer, jiffies + 1); #endif return z; } static struct zt_dynamic_driver ztd_ethmf = { "ethmf", "Ethernet", ztdethmf_create, ztdethmf_destroy, ztdethmf_transmit, ztdethmf_flush }; static struct notifier_block ztdethmf_nblock = { notifier_call:ztdethmf_notifier, }; static void timer_callback(unsigned long param) { unsigned long flags; struct ztdeth *z; int count_zero = 0, count_nonzero = 0; /* loop spans */ ++timer_counter; if (timer_counter >= HZ) { timer_counter -= HZ; spin_lock_irqsave(&zlock, flags); z = zdevs; while (z) { if (z->delay) { z->delay--; // ++count_nonzero; if (!z->delay) { printk(KERN_INFO "TDMoEmf: Span %s is ready for use.\n", z->span->name); } else { ++count_nonzero; } } else { ++count_zero; } z = z->next; } spin_unlock_irqrestore(&zlock, flags); if (count_nonzero /* && count_zero && count_nonzero != count_zero */ ) { /* re-add the timer event */ timer.expires = jiffies + 1; add_timer(&timer); } else { printk(KERN_INFO "TDMoEmf: All spans are active - removing delay timer.\n"); del_timer(&timer); } } else { timer.expires = jiffies + 1; add_timer(&timer); } } static int __init ztdethmf_init(void) { timer_counter = 0; init_timer(&timer); timer.expires = jiffies + 1; timer.function = &timer_callback; if (!timer_pending(&timer)) add_timer(&timer); dev_add_pack(&ztdethmf_ptype); register_netdevice_notifier(&ztdethmf_nblock); zt_dynamic_register(&ztd_ethmf); skb_queue_head_init(&skbs); return 0; } static void __exit ztdethmf_exit(void) { del_timer(&timer); dev_remove_pack(&ztdethmf_ptype); unregister_netdevice_notifier(&ztdethmf_nblock); zt_dynamic_unregister(&ztd_ethmf); } MODULE_DESCRIPTION("Zaptel Dynamic TDMoEmf Support"); MODULE_AUTHOR("Joseph Benden "); #ifdef MODULE_LICENSE MODULE_LICENSE("GPL"); #endif module_init(ztdethmf_init); module_exit(ztdethmf_exit);