summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorShaun Ruffell <sruffell@digium.com>2010-02-25 19:10:02 +0000
committerShaun Ruffell <sruffell@digium.com>2010-02-25 19:10:02 +0000
commit9e42acddf0f25ca9c17bf370bb008be0799fc44f (patch)
treea8b8b40eb01eee47fe29583af71edd2ebe73ceac
parentfeef9550b093994d23a6e3eecd96db1708be408d (diff)
dahdi_dynamic: Add TDMoE Multi-Frame support.
Add TDMoE Multi-Frame support as described in the article at the following URL: http://www.thrallingpenguin.com/articles/tdmoe-mf.htm TDMoE-MF is known to be implemented in hardware solutions from Redfone Communications. This patch additionally implements RCU within dahdi_dynamic to decrease lock contention, latency, and context switching. Because of the use of RCU locking, all prior known issues with loading and unloading of the modules are resolved, providing the spans are shutdown with "dahdi_cfg -s". It also contains an attempt, which works, at fixing a kernel change with skb_linearize(). The use of kernel version number does not work with SuSE SLES 10, as it appears they have backported the 2.6.18 change in to their 2.6.16 version. This merges in the work Jbenden did at: http://svn.digium.com/svn/dahdi/team/jbenden/tdmoe-mf@8102 (issue #13483) Patch by: JBenden Reported by: JBenden Tested by: JBenden git-svn-id: http://svn.asterisk.org/svn/dahdi/linux/trunk@8103 a0bf4364-ded3-4de4-8d8a-66a801d63aff
-rw-r--r--drivers/dahdi/Kbuild9
-rw-r--r--drivers/dahdi/Kconfig13
-rw-r--r--drivers/dahdi/dahdi_dynamic.c286
-rw-r--r--drivers/dahdi/dahdi_dynamic_eth.c8
-rw-r--r--drivers/dahdi/dahdi_dynamic_ethmf.c823
-rw-r--r--include/dahdi/kernel.h2
6 files changed, 970 insertions, 171 deletions
diff --git a/drivers/dahdi/Kbuild b/drivers/dahdi/Kbuild
index 9335be8..f0af285 100644
--- a/drivers/dahdi/Kbuild
+++ b/drivers/dahdi/Kbuild
@@ -3,6 +3,7 @@ obj-$(DAHDI_BUILD_ALL)$(CONFIG_DAHDI) += dahdi.o
obj-$(DAHDI_BUILD_ALL)$(CONFIG_DAHDI_DYNAMIC) += dahdi_dynamic.o
obj-$(DAHDI_BUILD_ALL)$(CONFIG_DAHDI_DYNAMIC_LOC) += dahdi_dynamic_loc.o
obj-$(DAHDI_BUILD_ALL)$(CONFIG_DAHDI_DYNAMIC_ETH) += dahdi_dynamic_eth.o
+obj-$(DAHDI_BUILD_ALL)$(CONFIG_DAHDI_DYNAMIC_ETHMF) += dahdi_dynamic_ethmf.o
obj-$(DAHDI_BUILD_ALL)$(CONFIG_DAHDI_TRANSCODE) += dahdi_transcode.o
obj-$(DAHDI_BUILD_ALL)$(CONFIG_DAHDI_WCT4XXP) += wct4xxp/
@@ -63,6 +64,14 @@ ifeq (1,$(shell fgrep -q ' hrtimer_set_expires' include/linux/hrtimer.h 2>/dev/n
EXTRA_CFLAGS+=-DHAVE_HRTIMER_ACCESSORS=1
endif
+# In 2.6.18 skb_linearize changed; however, some distros backported the change
+ifneq (,$(wildcard $(srctree)/include/linux/skbuff.h))
+ifeq ($(shell grep "skb_linearize.*(.*, .* gfp)" $(srctree)/include/linux/skbuff.h),)
+CFLAGS_dahdi_dynamic_eth.o := -DNEW_SKB_LINEARIZE
+CFLAGS_dahdi_dynamic_ethmf.o := -DNEW_SKB_LINEARIZE
+endif
+endif
+
dahdi-objs := dahdi-base.o
###############################################################################
diff --git a/drivers/dahdi/Kconfig b/drivers/dahdi/Kconfig
index 4986ccc..6952c6a 100644
--- a/drivers/dahdi/Kconfig
+++ b/drivers/dahdi/Kconfig
@@ -225,6 +225,19 @@ config DAHDI_DYNAMIC_ETH
If unsure, say Y.
+config DAHDI_DYNAMIC_ETHMF
+ tristate "Ethernet (TDMoE) Multi-Frame Span Support"
+ depends on DAHDI && DAHDI_DYNAMIC
+ default DAHDI
+ ---help---
+ This module provides support for spans over Ethernet,
+ using the TDMoE-Multi-Frame protocol.
+
+ To compile this driver as a module, choose M here: the
+ module will be called dahdi_dynamic_ethmf.
+
+ If unsure, say Y.
+
config DAHDI_DYNAMIC_LOC
tristate "Local (loopback) Span Support"
depends on DAHDI && DAHDI_DYNAMIC
diff --git a/drivers/dahdi/dahdi_dynamic.c b/drivers/dahdi/dahdi_dynamic.c
index e83fb80..e499344 100644
--- a/drivers/dahdi/dahdi_dynamic.c
+++ b/drivers/dahdi/dahdi_dynamic.c
@@ -31,7 +31,6 @@
#include <linux/kmod.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
-#include <linux/vmalloc.h>
#include <linux/moduleparam.h>
#include <dahdi/kernel.h>
@@ -70,16 +69,19 @@
#define ZTD_FLAG_YELLOW_ALARM (1 << 0)
#define ZTD_FLAG_SIGBITS_PRESENT (1 << 1)
-#define ZTD_FLAG_LOOPBACK (1 << 2)
+#define ZTD_FLAG_LOOPBACK (1 << 2)
-#define ERR_NSAMP (1 << 16)
-#define ERR_NCHAN (1 << 17)
-#define ERR_LEN (1 << 18)
+#define ERR_NSAMP (1 << 16)
+#define ERR_NCHAN (1 << 17)
+#define ERR_LEN (1 << 18)
EXPORT_SYMBOL(dahdi_dynamic_register);
EXPORT_SYMBOL(dahdi_dynamic_unregister);
EXPORT_SYMBOL(dahdi_dynamic_receive);
+static int ztdynamic_init(void);
+static void ztdynamic_cleanup(void);
+
#ifdef ENABLE_TASKLETS
static int taskletrun;
static int taskletsched;
@@ -91,8 +93,7 @@ static struct tasklet_struct ztd_tlet;
static void ztd_tasklet(unsigned long data);
#endif
-
-static struct dahdi_dynamic {
+struct dahdi_dynamic {
char addr[40];
char dname[20];
int err;
@@ -103,40 +104,34 @@ static struct dahdi_dynamic {
unsigned short rxcnt;
struct dahdi_span span;
struct dahdi_chan *chans[DAHDI_DYNAMIC_MAX_CHANS];
- struct dahdi_dynamic *next;
struct dahdi_dynamic_driver *driver;
void *pvt;
int timing;
int master;
unsigned char *msgbuf;
-} *dspans;
-static struct dahdi_dynamic_driver *drivers = NULL;
+ struct list_head list;
+};
+
+static DEFINE_SPINLOCK(dspan_lock);
+static LIST_HEAD(dspan_list);
+
+static DEFINE_SPINLOCK(driver_lock);
+static LIST_HEAD(driver_list);
static int debug = 0;
static int hasmaster = 0;
-#ifdef DEFINE_SPINLOCK
-static DEFINE_SPINLOCK(dlock);
-#else
-static spinlock_t dlock = SPIN_LOCK_UNLOCKED;
-#endif
-
-#ifdef DEFINE_RWLOCK
-static DEFINE_RWLOCK(drvlock);
-#else
-static rwlock_t drvlock = RW_LOCK_UNLOCKED;
-#endif
static void checkmaster(void)
{
- unsigned long flags;
int newhasmaster=0;
int best = 9999999;
struct dahdi_dynamic *z, *master=NULL;
- spin_lock_irqsave(&dlock, flags);
- z = dspans;
- while(z) {
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(z, &dspan_list, list) {
if (z->timing) {
z->master = 0;
if (!(z->span.alarms & DAHDI_ALARM_RED) &&
@@ -148,13 +143,15 @@ static void checkmaster(void)
newhasmaster = 1;
}
}
- z = z->next;
}
+
hasmaster = newhasmaster;
/* Mark the new master if there is one */
if (master)
master->master = 1;
- spin_unlock_irqrestore(&dlock, flags);
+
+ rcu_read_unlock();
+
if (master)
printk(KERN_INFO "TDMoX: New master: %s\n", master->span.name);
else
@@ -223,15 +220,13 @@ static void ztd_sendmessage(struct dahdi_dynamic *z)
static void __ztdynamic_run(void)
{
- unsigned long flags;
struct dahdi_dynamic *z;
struct dahdi_dynamic_driver *drv;
int y;
- spin_lock_irqsave(&dlock, flags);
- z = dspans;
- while(z) {
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(z, &dspan_list, list) {
if (!z->dead) {
- /* Ignore dead spans */
for (y=0;y<z->span.channels;y++) {
/* Echo cancel double buffered data */
dahdi_ec_chunk(z->span.chans[y], z->span.chans[y]->readchunk, z->span.chans[y]->writechunk);
@@ -239,30 +234,23 @@ static void __ztdynamic_run(void)
dahdi_receive(&z->span);
dahdi_transmit(&z->span);
/* Handle all transmissions now */
- spin_unlock_irqrestore(&dlock, flags);
ztd_sendmessage(z);
- spin_lock_irqsave(&dlock, flags);
}
- z = z->next;
}
- spin_unlock_irqrestore(&dlock, flags);
- read_lock(&drvlock);
- drv = drivers;
- while(drv) {
+ list_for_each_entry_rcu(drv, &driver_list, list) {
/* Flush any traffic still pending in the driver */
if (drv->flush) {
drv->flush();
}
- drv = drv->next;
}
- read_unlock(&drvlock);
+ rcu_read_unlock();
}
#ifdef ENABLE_TASKLETS
static void ztdynamic_run(void)
{
- if (!taskletpending) {
+ if (likely(!taskletpending)) {
taskletpending = 1;
taskletsched++;
tasklet_hi_schedule(&ztd_tlet);
@@ -278,18 +266,17 @@ void dahdi_dynamic_receive(struct dahdi_span *span, unsigned char *msg, int msgl
{
struct dahdi_dynamic *ztd = span->pvt;
int newerr=0;
- unsigned long flags;
int sflags;
int xlen;
int x, bits, sig;
int nchans, master;
int newalarm;
unsigned short rxpos, rxcnt;
-
-
- spin_lock_irqsave(&dlock, flags);
- if (msglen < 6) {
- spin_unlock_irqrestore(&dlock, flags);
+
+ rcu_read_lock();
+
+ if (unlikely(msglen < 6)) {
+ rcu_read_unlock();
newerr = ERR_LEN;
if (newerr != ztd->err) {
printk(KERN_NOTICE "Span %s: Insufficient samples for header (only %d)\n", span->name, msglen);
@@ -299,8 +286,8 @@ void dahdi_dynamic_receive(struct dahdi_span *span, unsigned char *msg, int msgl
}
/* First, check the chunksize */
- if (*msg != DAHDI_CHUNKSIZE) {
- spin_unlock_irqrestore(&dlock, flags);
+ if (unlikely(*msg != DAHDI_CHUNKSIZE)) {
+ rcu_read_unlock();
newerr = ERR_NSAMP | msg[0];
if (newerr != ztd->err) {
printk(KERN_NOTICE "Span %s: Expected %d samples, but receiving %d\n", span->name, DAHDI_CHUNKSIZE, msg[0]);
@@ -311,14 +298,14 @@ void dahdi_dynamic_receive(struct dahdi_span *span, unsigned char *msg, int msgl
msg++;
sflags = *msg;
msg++;
-
+
rxpos = ntohs(*((unsigned short *)msg));
msg++;
msg++;
-
+
nchans = ntohs(*((unsigned short *)msg));
- if (nchans != span->channels) {
- spin_unlock_irqrestore(&dlock, flags);
+ if (unlikely(nchans != span->channels)) {
+ rcu_read_unlock();
newerr = ERR_NCHAN | nchans;
if (newerr != ztd->err) {
printk(KERN_NOTICE "Span %s: Expected %d channels, but receiving %d\n", span->name, span->channels, nchans);
@@ -328,7 +315,7 @@ void dahdi_dynamic_receive(struct dahdi_span *span, unsigned char *msg, int msgl
}
msg++;
msg++;
-
+
/* Okay now we've accepted the header, lets check our message
length... */
@@ -341,9 +328,9 @@ void dahdi_dynamic_receive(struct dahdi_span *span, unsigned char *msg, int msgl
/* Account for sigbits -- one short per 4 channels*/
xlen += ((nchans + 3) / 4) * 2;
}
-
- if (xlen != msglen) {
- spin_unlock_irqrestore(&dlock, flags);
+
+ if (unlikely(xlen != msglen)) {
+ rcu_read_unlock();
newerr = ERR_LEN | xlen;
if (newerr != ztd->err) {
printk(KERN_NOTICE "Span %s: Expected message size %d, but was %d instead\n", span->name, xlen, msglen);
@@ -351,9 +338,9 @@ void dahdi_dynamic_receive(struct dahdi_span *span, unsigned char *msg, int msgl
ztd->err = newerr;
return;
}
-
+
bits = 0;
-
+
/* Record sigbits if present */
if (sflags & ZTD_FLAG_SIGBITS_PRESENT) {
for (x=0;x<nchans;x++) {
@@ -385,8 +372,11 @@ void dahdi_dynamic_receive(struct dahdi_span *span, unsigned char *msg, int msgl
rxcnt = ztd->rxcnt;
ztd->rxcnt = rxpos+1;
- spin_unlock_irqrestore(&dlock, flags);
-
+ /* Keep track of last received packet */
+ ztd->rxjif = jiffies;
+
+ rcu_read_unlock();
+
/* Check for Yellow alarm */
newalarm = span->alarms & ~(DAHDI_ALARM_YELLOW | DAHDI_ALARM_RED);
if (sflags & ZTD_FLAG_YELLOW_ALARM)
@@ -397,18 +387,14 @@ void dahdi_dynamic_receive(struct dahdi_span *span, unsigned char *msg, int msgl
dahdi_alarm_notify(span);
checkmaster();
}
-
- /* Keep track of last received packet */
- ztd->rxjif = jiffies;
/* note if we had a missing packet */
- if (rxpos != rxcnt)
+ if (unlikely(rxpos != rxcnt))
printk(KERN_NOTICE "Span %s: Expected seq no %d, but received %d instead\n", span->name, rxcnt, rxpos);
/* If this is our master span, then run everything */
if (master)
ztdynamic_run();
-
}
static void dynamic_destroy(struct dahdi_dynamic *z)
@@ -440,64 +426,61 @@ static void dynamic_destroy(struct dahdi_dynamic *z)
static struct dahdi_dynamic *find_dynamic(struct dahdi_dynamic_span *zds)
{
- struct dahdi_dynamic *z;
- z = dspans;
- while(z) {
+ struct dahdi_dynamic *z = NULL, *found = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(z, &dspan_list, list) {
if (!strcmp(z->dname, zds->driver) &&
- !strcmp(z->addr, zds->addr))
+ !strcmp(z->addr, zds->addr)) {
+ found = z;
break;
- z = z->next;
+ }
}
- return z;
+ rcu_read_unlock();
+
+ return found;
}
static struct dahdi_dynamic_driver *find_driver(char *name)
{
- struct dahdi_dynamic_driver *ztd;
- ztd = drivers;
- while(ztd) {
+ struct dahdi_dynamic_driver *ztd, *found = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ztd, &driver_list, list) {
/* here's our driver */
- if (!strcmp(name, ztd->name))
+ if (!strcmp(name, ztd->name)) {
+ found = ztd;
break;
- ztd = ztd->next;
+ }
}
- return ztd;
+ rcu_read_unlock();
+
+ return found;
}
static int destroy_dynamic(struct dahdi_dynamic_span *zds)
{
unsigned long flags;
- struct dahdi_dynamic *z, *cur, *prev=NULL;
- spin_lock_irqsave(&dlock, flags);
+ struct dahdi_dynamic *z;
+
z = find_dynamic(zds);
- if (!z) {
- spin_unlock_irqrestore(&dlock, flags);
+ if (unlikely(!z)) {
return -EINVAL;
}
- /* Don't destroy span until it is in use */
+
if (z->usecount) {
- spin_unlock_irqrestore(&dlock, flags);
printk(KERN_NOTICE "Attempt to destroy dynamic span while it is in use\n");
return -EBUSY;
}
- /* Unlink it */
- cur = dspans;
- while(cur) {
- if (cur == z) {
- if (prev)
- prev->next = z->next;
- else
- dspans = z->next;
- break;
- }
- prev = cur;
- cur = cur->next;
- }
- spin_unlock_irqrestore(&dlock, flags);
+
+ spin_lock_irqsave(&dspan_lock, flags);
+ list_del_rcu(&z->list);
+ spin_unlock_irqrestore(&dspan_lock, flags);
+ synchronize_rcu();
/* Destroy it */
dynamic_destroy(z);
-
+
return 0;
}
@@ -511,8 +494,8 @@ static int ztd_open(struct dahdi_chan *chan)
{
struct dahdi_dynamic *z;
z = chan->span->pvt;
- if (z) {
- if (z->dead)
+ if (likely(z)) {
+ if (unlikely(z->dead))
return -ENODEV;
z->usecount++;
}
@@ -528,10 +511,11 @@ static int ztd_close(struct dahdi_chan *chan)
{
struct dahdi_dynamic *z;
z = chan->span->pvt;
- if (z)
+ if (z) {
z->usecount--;
- if (z->dead && !z->usecount)
- dynamic_destroy(z);
+ if (z->dead && !z->usecount)
+ dynamic_destroy(z);
+ }
return 0;
}
@@ -552,21 +536,13 @@ static int create_dynamic(struct dahdi_dynamic_span *zds)
return -EINVAL;
}
- spin_lock_irqsave(&dlock, flags);
z = find_dynamic(zds);
- spin_unlock_irqrestore(&dlock, flags);
if (z)
return -EEXIST;
- /* XXX There is a silly race here. We check it doesn't exist, but
- someone could create it between now and then and we'd end up
- with two of them. We don't want to hold the spinlock
- for *too* long though, especially not if there is a possibility
- of kmalloc. XXX */
-
-
/* Allocate memory */
- if (!(z = kmalloc(sizeof(*z), GFP_KERNEL))) {
+ z = (struct dahdi_dynamic *) kmalloc(sizeof(struct dahdi_dynamic), GFP_KERNEL);
+ if (!z) {
return -ENOMEM;
}
@@ -621,24 +597,20 @@ static int create_dynamic(struct dahdi_dynamic_span *zds)
z->chans[x]->pvt = z;
}
- spin_lock_irqsave(&dlock, flags);
ztd = find_driver(zds->driver);
if (!ztd) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,70)
char fn[80];
#endif
- spin_unlock_irqrestore(&dlock, flags);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,70)
request_module("dahdi_dynamic_%s", zds->driver);
#else
sprintf(fn, "dahdi_dynamic_%s", zds->driver);
request_module(fn);
#endif
- spin_lock_irqsave(&dlock, flags);
ztd = find_driver(zds->driver);
}
- spin_unlock_irqrestore(&dlock, flags);
/* Another race -- should let the module get unloaded while we
@@ -667,11 +639,9 @@ static int create_dynamic(struct dahdi_dynamic_span *zds)
return -EINVAL;
}
- /* Okay, created and registered. add it to the list */
- spin_lock_irqsave(&dlock, flags);
- z->next = dspans;
- dspans = z;
- spin_unlock_irqrestore(&dlock, flags);
+ spin_lock_irqsave(&dspan_lock, flags);
+ list_add_rcu(&z->list, &dspan_list);
+ spin_unlock_irqrestore(&dspan_lock, flags);
checkmaster();
@@ -732,70 +702,52 @@ int dahdi_dynamic_register(struct dahdi_dynamic_driver *dri)
{
unsigned long flags;
int res = 0;
- write_lock_irqsave(&drvlock, flags);
- if (find_driver(dri->name))
+
+ if (find_driver(dri->name)) {
res = -1;
- else {
- dri->next = drivers;
- drivers = dri;
+ } else {
+ spin_lock_irqsave(&driver_lock, flags);
+ list_add_rcu(&dri->list, &driver_list);
+ spin_unlock_irqrestore(&driver_lock, flags);
}
- write_unlock_irqrestore(&drvlock, flags);
return res;
}
void dahdi_dynamic_unregister(struct dahdi_dynamic_driver *dri)
{
- struct dahdi_dynamic_driver *cur, *prev=NULL;
- struct dahdi_dynamic *z, *zp, *zn;
+ struct dahdi_dynamic *z;
unsigned long flags;
- write_lock_irqsave(&drvlock, flags);
- cur = drivers;
- while(cur) {
- if (cur == dri) {
- if (prev)
- prev->next = cur->next;
- else
- drivers = cur->next;
- break;
- }
- prev = cur;
- cur = cur->next;
- }
- write_unlock_irqrestore(&drvlock, flags);
- spin_lock_irqsave(&dlock, flags);
- z = dspans;
- zp = NULL;
- while(z) {
- zn = z->next;
+
+ spin_lock_irqsave(&driver_lock, flags);
+ list_del_rcu(&dri->list);
+ spin_unlock_irqrestore(&driver_lock, flags);
+ synchronize_rcu();
+
+ list_for_each_entry(z, &dspan_list, list) {
if (z->driver == dri) {
- /* Unlink */
- if (zp)
- zp->next = z->next;
- else
- dspans = z->next;
+ spin_lock_irqsave(&dspan_lock, flags);
+ list_del_rcu(&z->list);
+ spin_unlock_irqrestore(&dspan_lock, flags);
+ synchronize_rcu();
+
if (!z->usecount)
dynamic_destroy(z);
else
z->dead = 1;
- } else {
- zp = z;
}
- z = zn;
}
- spin_unlock_irqrestore(&dlock, flags);
}
static struct timer_list alarmcheck;
static void check_for_red_alarm(unsigned long ignored)
{
- unsigned long flags;
int newalarm;
int alarmchanged = 0;
struct dahdi_dynamic *z;
- spin_lock_irqsave(&dlock, flags);
- z = dspans;
- while(z) {
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(z, &dspan_list, list) {
newalarm = z->span.alarms & ~DAHDI_ALARM_RED;
/* If nothing received for a second, consider that RED ALARM */
if ((jiffies - z->rxjif) > 1 * HZ) {
@@ -806,20 +758,20 @@ static void check_for_red_alarm(unsigned long ignored)
alarmchanged++;
}
}
- z = z->next;
}
- spin_unlock_irqrestore(&dlock, flags);
+ rcu_read_unlock();
+
if (alarmchanged)
checkmaster();
/* Do the next one */
mod_timer(&alarmcheck, jiffies + 1 * HZ);
-
}
static int ztdynamic_init(void)
{
dahdi_set_dynamic_ioctl(ztdynamic_ioctl);
+
/* Start process to check for RED ALARM */
init_timer(&alarmcheck);
alarmcheck.expires = 0;
diff --git a/drivers/dahdi/dahdi_dynamic_eth.c b/drivers/dahdi/dahdi_dynamic_eth.c
index ff26420..449a971 100644
--- a/drivers/dahdi/dahdi_dynamic_eth.c
+++ b/drivers/dahdi/dahdi_dynamic_eth.c
@@ -98,10 +98,12 @@ static int ztdeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
#endif
if (span) {
skb_pull(skb, sizeof(struct ztdeth_header));
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
- skb_linearize(skb);
+#ifdef NEW_SKB_LINEARIZE
+ if (skb_is_nonlinear(skb))
+ skb_linearize(skb);
#else
- skb_linearize(skb, GFP_KERNEL);
+ if (skb_is_nonlinear(skb))
+ skb_linearize(skb, GFP_KERNEL);
#endif
dahdi_dynamic_receive(span, (unsigned char *)skb->data, skb->len);
}
diff --git a/drivers/dahdi/dahdi_dynamic_ethmf.c b/drivers/dahdi/dahdi_dynamic_ethmf.c
new file mode 100644
index 0000000..e8245d7
--- /dev/null
+++ b/drivers/dahdi/dahdi_dynamic_ethmf.c
@@ -0,0 +1,823 @@
+/*
+ * Dynamic Span Interface for DAHDI (Multi-Span Ethernet Interface)
+ *
+ * Written by Joseph Benden <joe@thrallingpenguin.com>
+ *
+ * Copyright (C) 2007-2010, Thralling Penguin LLC.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/kmod.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/notifier.h>
+#include <linux/crc32.h>
+
+/**
+ * Undefine USE_PROC_FS, if you do not want the /proc/dahdi/dynamic-ethmf
+ * support. Undefining this would give a slight performance increase.
+ */
+#define USE_PROC_FS
+
+#ifdef USE_PROC_FS
+# include <linux/proc_fs.h>
+# include <asm/atomic.h>
+#endif
+
+#ifdef CONFIG_DEVFS_FS
+# include <linux/devfs_fs_kernel.h>
+#endif
+
+#include <dahdi/kernel.h>
+#include <dahdi/user.h>
+
+#define ETH_P_ZTDETH 0xd00d
+#define ETHMF_MAX_PER_SPAN_GROUP 8
+#define ETHMF_MAX_GROUPS 16
+#define ETHMF_FLAG_IGNORE_CHAN0 (1 << 3)
+#define ETHMF_MAX_SPANS 4
+
+struct ztdeth_header {
+ unsigned short subaddr;
+};
+
+/* Timer for enabling spans - used to combat a lock problem */
+static struct timer_list timer;
+
+/* Whether or not the timer has been deleted */
+static atomic_t timer_deleted = ATOMIC_INIT(0);
+
+/* Global error counter */
+static atomic_t errcount = ATOMIC_INIT(0);
+
+/* Whether or not we are in shutdown */
+static atomic_t shutdown = ATOMIC_INIT(0);
+
+static struct sk_buff_head skbs;
+
+#ifdef USE_PROC_FS
+struct ethmf_group {
+ unsigned int hash_addr;
+ atomic_t spans;
+ atomic_t rxframecount;
+ atomic_t txframecount;
+ atomic_t rxbytecount;
+ atomic_t txbytecount;
+ atomic_t devupcount;
+ atomic_t devdowncount;
+};
+static struct ethmf_group ethmf_groups[ETHMF_MAX_GROUPS];
+#endif
+
+struct ztdeth {
+ /* Destination MAC address */
+ unsigned char addr[ETH_ALEN];
+ /* Destination MAC address hash value */
+ unsigned int addr_hash;
+ /* span sub-address, in network byte order */
+ unsigned short subaddr;
+ /* DAHDI span associated with this TDMoE-mf span */
+ struct dahdi_span *span;
+ /* Ethernet interface name */
+ char ethdev[IFNAMSIZ];
+ /* Ethernet device reference */
+ struct net_device *dev;
+ /* trx buffer */
+ unsigned char *msgbuf;
+ /* trx buffer length */
+ int msgbuf_len;
+ /* wether or not this frame is ready for trx */
+ atomic_t ready;
+ /* delay counter, to ensure all spans are added, prior to usage */
+ atomic_t delay;
+ /* rvc buffer */
+ unsigned char *rcvbuf;
+ /* the number of channels in this span */
+ int real_channels;
+ /* use padding if 1, else no padding */
+ atomic_t no_front_padding;
+ /* counter to pseudo lock the rcvbuf */
+ atomic_t refcnt;
+
+ struct list_head list;
+};
+
+/**
+ * Lock for adding and removing items in ethmf_list
+ */
+static DEFINE_SPINLOCK(ethmf_lock);
+
+/**
+ * The active list of all running spans
+ */
+static LIST_HEAD(ethmf_list);
+
+static inline void ethmf_errors_inc(void)
+{
+#ifdef USE_PROC_FS
+ atomic_inc(&errcount);
+#endif
+}
+
+#ifdef USE_PROC_FS
+static inline int hashaddr_to_index(unsigned int hash_addr)
+{
+ int i, z = -1;
+ for (i = 0; i < ETHMF_MAX_GROUPS; ++i) {
+ if (z == -1 && ethmf_groups[i].hash_addr == 0)
+ z = i;
+ if (ethmf_groups[i].hash_addr == hash_addr)
+ return i;
+ }
+ if (z != -1) {
+ ethmf_groups[z].hash_addr = hash_addr;
+ }
+ return z;
+}
+#endif
+
+/**
+ * Find the Ztdeth Struct and DAHDI span for a given MAC address and subaddr.
+ *
+ * NOTE: RCU read lock must already be held.
+ */
+static inline void find_ethmf(const unsigned char *addr,
+ const unsigned short subaddr, struct ztdeth **ze,
+ struct dahdi_span **span)
+{
+ struct ztdeth *z;
+
+ list_for_each_entry_rcu(z, &ethmf_list, list) {
+ if (!atomic_read(&z->delay)) {
+ if (!memcmp(addr, z->addr, ETH_ALEN)
+ && z->subaddr == subaddr) {
+ *ze = z;
+ *span = z->span;
+ return;
+ }
+ }
+ }
+
+ /* no results */
+ *ze = NULL;
+ *span = NULL;
+}
+
+/**
+ * Determines if all spans are ready for transmit. If all spans are ready,
+ * we return the number of spans which indeed are ready and populate the
+ * array of pointers to those spans..
+ *
+ * NOTE: RCU read lock must already be held.
+ */
+static inline int ethmf_trx_spans_ready(unsigned int addr_hash, struct ztdeth *(*ready_spans)[ETHMF_MAX_PER_SPAN_GROUP])
+{
+ struct ztdeth *t;
+ int span_count = 0, spans_ready = 0;
+
+ list_for_each_entry_rcu(t, &ethmf_list, list) {
+ if (!atomic_read(&t->delay) && t->addr_hash == addr_hash) {
+ ++span_count;
+ if (atomic_read(&t->ready)) {
+ short subaddr = ntohs(t->subaddr);
+ if (subaddr < ETHMF_MAX_PER_SPAN_GROUP) {
+ (*ready_spans)[subaddr] = t;
+ ++spans_ready;
+ } else {
+ printk(KERN_ERR "More than %d spans per multi-frame group are not currently supported.",
+ ETHMF_MAX_PER_SPAN_GROUP);
+ }
+ }
+ }
+ }
+
+ if (span_count && spans_ready && span_count == spans_ready) {
+ return spans_ready;
+ }
+ return 0;
+}
+
+/**
+ * Ethernet receiving side processing function.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)
+static int ztdethmf_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+#else
+static int ztdethmf_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt)
+#endif
+{
+ int num_spans = 0, span_index = 0;
+ unsigned char *data;
+ struct dahdi_span *span;
+ struct ztdeth *z = NULL;
+ struct ztdeth_header *zh;
+ unsigned int samples, channels, rbslen, flags;
+ unsigned int skip = 0;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
+ zh = (struct ztdeth_header *) skb_network_header(skb);
+#else
+ zh = (struct ztdeth_header *) skb->nh.raw;
+#endif
+ if (ntohs(zh->subaddr) & 0x8000) {
+ /* got a multi-span frame */
+ num_spans = ntohs(zh->subaddr) & 0xFF;
+
+ /* Currently max of 4 spans supported */
+ if (unlikely(num_spans > ETHMF_MAX_SPANS)) {
+ kfree_skb(skb);
+ return 0;
+ }
+
+ skb_pull(skb, sizeof(struct ztdeth_header));
+#ifdef NEW_SKB_LINEARIZE
+ if (skb_is_nonlinear(skb))
+ skb_linearize(skb);
+#else
+ if (skb_is_nonlinear(skb))
+ skb_linearize(skb, GFP_KERNEL);
+#endif
+ data = (unsigned char *) skb->data;
+
+ rcu_read_lock();
+ do {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
+ find_ethmf(eth_hdr(skb)->h_source,
+ htons(span_index), &z, &span);
+#else
+ find_ethmf(skb->mac.ethernet->h_source,
+ htons(span_index), &z, &span);
+#endif
+ if (unlikely(!z || !span)) {
+ /* The recv'd span does not belong to us */
+ /* ethmf_errors_inc(); */
+ ++span_index;
+ continue;
+ }
+
+ samples = data[(span_index * 6)] & 0xFF;
+ flags = data[((span_index * 6) + 1)] & 0xFF;
+ channels = data[((span_index * 6) + 5)] & 0xFF;
+
+ /* Precomputed defaults for most typical values */
+ if (channels == 24)
+ rbslen = 12;
+ else if (channels == 31)
+ rbslen = 16;
+ else
+ rbslen = ((channels + 3) / 4) * 2;
+
+ if (unlikely(samples != 8 || channels >= 32 || channels == 0)) {
+ ethmf_errors_inc();
+ ++span_index;
+ continue;
+ }
+
+ if (atomic_dec_and_test(&z->refcnt) == 0) {
+ memcpy(z->rcvbuf, data + 6*span_index, 6); /* TDM Header */
+ /*
+ * If we ignore channel zero we must skip the first eight bytes and
+ * ensure that ztdynamic doesn't get confused by this new flag
+ */
+ if (flags & ETHMF_FLAG_IGNORE_CHAN0) {
+ skip = 8;
+
+ /* Remove this flag since ztdynamic may not understand it */
+ z->rcvbuf[1] = flags & ~(ETHMF_FLAG_IGNORE_CHAN0);
+
+ /* Additionally, now we will transmit with front padding */
+ atomic_set(&z->no_front_padding, 0);
+ } else {
+ /* Disable front padding if we recv'd a packet without it */
+ atomic_set(&z->no_front_padding, 1);
+ }
+ memcpy(z->rcvbuf + 6, data + 6*num_spans + 16
+ *span_index, rbslen); /* RBS Header */
+
+ /* 256 == 32*8; if padding lengths change, this must be modified */
+ memcpy(z->rcvbuf + 6 + rbslen, data + 6*num_spans + 16
+ *num_spans + (256)*span_index + skip, channels
+ * 8); /* Payload */
+
+ dahdi_dynamic_receive(span, z->rcvbuf, 6 + rbslen
+ + channels*8);
+ } else {
+ ethmf_errors_inc();
+ printk(KERN_INFO "TDMoE span overflow detected. Span %d was dropped.", span_index);
+ }
+ atomic_inc(&z->refcnt);
+
+#ifdef USE_PROC_FS
+ if (span_index == 0) {
+ atomic_inc(&(ethmf_groups[hashaddr_to_index(z->addr_hash)].rxframecount));
+ atomic_add(skb->len + z->dev->hard_header_len +
+ sizeof(struct ztdeth_header),
+ &(ethmf_groups[hashaddr_to_index(z->addr_hash)].rxbytecount));
+ }
+#endif
+ ++span_index;
+ } while (!atomic_read(&shutdown) && span_index < num_spans);
+ rcu_read_unlock();
+ }
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static int ztdethmf_notifier(struct notifier_block *block, unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = ptr;
+ struct ztdeth *z;
+
+ switch (event) {
+ case NETDEV_GOING_DOWN:
+ case NETDEV_DOWN:
+ rcu_read_lock();
+ list_for_each_entry_rcu(z, &ethmf_list, list) {
+ /* Note that the device no longer exists */
+ if (z->dev == dev) {
+ z->dev = NULL;
+#ifdef USE_PROC_FS
+ atomic_inc(&(ethmf_groups[hashaddr_to_index(z->addr_hash)].devdowncount));
+#endif
+ }
+ }
+ rcu_read_unlock();
+ break;
+ case NETDEV_UP:
+ rcu_read_lock();
+ list_for_each_entry_rcu(z, &ethmf_list, list) {
+ /* Now that the device exists again, use it */
+ if (!strcmp(z->ethdev, dev->name)) {
+ z->dev = dev;
+#ifdef USE_PROC_FS
+ atomic_inc(&(ethmf_groups[hashaddr_to_index(z->addr_hash)].devupcount));
+#endif
+ }
+ }
+ rcu_read_unlock();
+ break;
+ }
+ return 0;
+}
+
+static int ztdethmf_transmit(void *pvt, unsigned char *msg, int msglen)
+{
+ struct ztdeth *z = pvt, *ready_spans[ETHMF_MAX_PER_SPAN_GROUP];
+ struct sk_buff *skb;
+ struct ztdeth_header *zh;
+ struct net_device *dev;
+ unsigned char addr[ETH_ALEN];
+ int spans_ready = 0, index = 0;
+
+ if (atomic_read(&shutdown))
+ return 0;
+
+ rcu_read_lock();
+
+ if (unlikely(!z || !z->dev)) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ if (!atomic_read(&z->ready)) {
+ if (atomic_inc_return(&z->ready) == 1) {
+ memcpy(z->msgbuf, msg, msglen);
+ z->msgbuf_len = msglen;
+ }
+ }
+
+ if ((spans_ready = ethmf_trx_spans_ready(z->addr_hash, &ready_spans))) {
+ int pad[ETHMF_MAX_SPANS], rbs[ETHMF_MAX_SPANS];
+
+ dev = z->dev;
+ memcpy(addr, z->addr, sizeof(z->addr));
+
+ for (index = 0; index < spans_ready; index++) {
+ int chan = ready_spans[index]->real_channels;
+ /* By default we pad to 32 channels, but if
+ * no_front_padding is false then we have a pad
+ * in the front of 8 bytes, so this implies one
+ * less channel
+ */
+ if (atomic_read(&(ready_spans[index]->no_front_padding)))
+ pad[index] = (32 - chan)*8;
+ else
+ pad[index] = (31 - chan)*8;
+
+ if (chan == 24)
+ rbs[index] = 12;
+ else if (chan == 31)
+ rbs[index] = 16;
+ else
+ // Shouldn't this be index, not spans_ready?
+ rbs[spans_ready] = ((chan + 3) / 4) * 2;
+ }
+
+ /* Allocate the standard size for a 32-chan frame */
+ skb = dev_alloc_skb(1112 + dev->hard_header_len
+ + sizeof(struct ztdeth_header) + 32);
+ if (unlikely(!skb)) {
+ rcu_read_unlock();
+ ethmf_errors_inc();
+ return 0;
+ }
+
+ /* Reserve header space */
+ skb_reserve(skb, dev->hard_header_len
+ + sizeof(struct ztdeth_header));
+ /* copy each spans header */
+ for (index = 0; index < spans_ready; index++) {
+ if (!atomic_read(&(ready_spans[index]->no_front_padding)))
+ ready_spans[index]->msgbuf[1]
+ |= ETHMF_FLAG_IGNORE_CHAN0;
+
+ memcpy(skb_put(skb, 6), ready_spans[index]->msgbuf, 6);
+ }
+
+ /* copy each spans RBS payload */
+ for (index = 0; index < spans_ready; index++) {
+ memcpy(skb_put(skb, 16), ready_spans[index]->msgbuf + 6,
+ rbs[index]);
+ }
+
+ /* copy each spans data/voice payload */
+ for (index = 0; index < spans_ready; index++) {
+ int chan = ready_spans[index]->real_channels;
+ if (!atomic_read(&(ready_spans[index]->no_front_padding))) {
+ /* This adds an additional (padded) channel to our total */
+ memset(skb_put(skb, 8), 0xA5, 8); /* ETHMF_IGNORE_CHAN0 */
+ }
+ memcpy(skb_put(skb, chan*8), ready_spans[index]->msgbuf
+ + (6 + rbs[index]), chan*8);
+ if (pad[index] > 0) {
+ memset(skb_put(skb, pad[index]), 0xDD, pad[index]);
+ }
+
+ /* mark span as ready for new data/voice */
+ atomic_set(&(ready_spans[index]->ready), 0);
+ }
+
+ /* Throw on header */
+ zh = (struct ztdeth_header *)skb_push(skb,
+ sizeof(struct ztdeth_header));
+ zh->subaddr = htons((unsigned short)(0x8000 | (unsigned char)(spans_ready & 0xFF)));
+
+ /* Setup protocol type */
+ skb->protocol = __constant_htons(ETH_P_ZTDETH);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
+ skb_set_network_header(skb, 0);
+#else
+ skb->nh.raw = skb->data;
+#endif
+ skb->dev = dev;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+ dev_hard_header(skb, dev, ETH_P_ZTDETH, addr, dev->dev_addr, skb->len);
+#else
+ if (dev->hard_header)
+ dev->hard_header(skb, dev, ETH_P_ZTDETH, addr,
+ dev->dev_addr, skb->len);
+#endif
+ /* queue frame for delivery */
+ if (dev) {
+ skb_queue_tail(&skbs, skb);
+ }
+#ifdef USE_PROC_FS
+ atomic_inc(&(ethmf_groups[hashaddr_to_index(z->addr_hash)].txframecount));
+ atomic_add(skb->len, &(ethmf_groups[hashaddr_to_index(z->addr_hash)].txbytecount));
+#endif
+ }
+
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int ztdethmf_flush(void)
+{
+ struct sk_buff *skb;
+
+ /* Handle all transmissions now */
+ while ((skb = skb_dequeue(&skbs))) {
+ dev_queue_xmit(skb);
+ }
+ return 0;
+}
+
+static struct packet_type ztdethmf_ptype =
+{
+ .type = __constant_htons(ETH_P_ZTDETH), /* Protocol */
+ .dev = NULL, /* Device (NULL = wildcard) */
+ .func = ztdethmf_rcv, /* Receiver */
+};
+
+static void ztdethmf_destroy(void *pvt)
+{
+ struct ztdeth *z = pvt;
+ unsigned long flags;
+
+ atomic_set(&shutdown, 1);
+ synchronize_rcu();
+
+ spin_lock_irqsave(&ethmf_lock, flags);
+ list_del_rcu(&z->list);
+ spin_unlock_irqrestore(&ethmf_lock, flags);
+ synchronize_rcu();
+ atomic_dec(&(ethmf_groups[hashaddr_to_index(z->addr_hash)].spans));
+
+ if (z) { /* Successfully removed */
+ printk(KERN_INFO "Removed interface for %s\n",
+ z->span->name);
+ kfree(z->msgbuf);
+ kfree(z);
+ module_put(THIS_MODULE);
+ } else {
+ if (z && z->span && z->span->name) {
+ printk(KERN_ERR "Cannot find interface for %s\n",
+ z->span->name);
+ }
+ }
+}
+
+static void *ztdethmf_create(struct dahdi_span *span, char *addr)
+{
+ struct ztdeth *z;
+ char src[256];
+ char *src_ptr;
+ int x, bufsize, num_matched;
+ unsigned long flags;
+
+ BUG_ON(!span);
+ BUG_ON(!addr);
+
+ z = kmalloc(sizeof(struct ztdeth), GFP_KERNEL);
+ if (!z)
+ return NULL;
+
+ /* Zero it out */
+ memset(z, 0, sizeof(struct ztdeth));
+
+ /* set a delay for xmit/recv to workaround Zaptel problems */
+ atomic_set(&z->delay, 4);
+
+ /* create a msg buffer. MAX OF 31 CHANNELS!!!! */
+ bufsize = 31 * DAHDI_CHUNKSIZE + 31 / 4 + 48;
+ z->msgbuf = kmalloc(bufsize, GFP_KERNEL);
+ z->rcvbuf = kmalloc(bufsize, GFP_KERNEL);
+
+ /* Address should be <dev>/<macaddr>/subaddr */
+ dahdi_copy_string(src, addr, sizeof(src));
+ /* replace all / with space; otherwise kernel sscanf does not work */
+ src_ptr = src;
+ while (*src_ptr) {
+ if (*src_ptr == '/')
+ *src_ptr = ' ';
+ ++src_ptr;
+ }
+ if (8 != (num_matched = sscanf(src,
+ "%16s %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hu",
+ z->ethdev, &z->addr[0], &z->addr[1],
+ &z->addr[2], &z->addr[3], &z->addr[4],
+ &z->addr[5], &z->subaddr))) {
+ printk(KERN_ERR "Only matched %d entries in '%s'\n", num_matched, src);
+ printk(KERN_ERR "Invalid TDMoE Multiframe address: %s\n", addr);
+ kfree(z);
+ return NULL;
+ }
+ z->dev = dev_get_by_name(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+ &init_net,
+#endif
+ z->ethdev);
+ if (!z->dev) {
+ printk(KERN_ERR "TDMoE Multiframe: Invalid device '%s'\n", z->ethdev);
+ kfree(z);
+ return NULL;
+ }
+ z->span = span;
+ z->subaddr = htons(z->subaddr);
+ z->addr_hash = crc32_le(0, z->addr, ETH_ALEN);
+ z->real_channels = span->channels;
+
+ src[0] ='\0';
+ for (x=0; x<5; x++)
+ sprintf(src + strlen(src), "%02x:", z->dev->dev_addr[x]);
+ sprintf(src + strlen(src), "%02x", z->dev->dev_addr[5]);
+
+ printk(KERN_INFO "TDMoEmf: Added new interface for %s at %s "
+ "(addr=%s, src=%s, subaddr=%d)\n", span->name, z->dev->name,
+ addr, src, ntohs(z->subaddr));
+
+ atomic_set(&z->ready, 0);
+ atomic_set(&z->refcnt, 0);
+
+ spin_lock_irqsave(&ethmf_lock, flags);
+ list_add_rcu(&z->list, &ethmf_list);
+ spin_unlock_irqrestore(&ethmf_lock, flags);
+ atomic_inc(&(ethmf_groups[hashaddr_to_index(z->addr_hash)].spans));
+
+ if (!try_module_get(THIS_MODULE))
+ printk(KERN_ERR "TDMoEmf: Unable to increment module use count\n");
+
+ /* enable the timer for enabling the spans */
+ mod_timer(&timer, jiffies + HZ);
+ atomic_set(&shutdown, 0);
+ return z;
+}
+
+static struct dahdi_dynamic_driver ztd_ethmf = {
+ "ethmf",
+ "Ethernet",
+ ztdethmf_create,
+ ztdethmf_destroy,
+ ztdethmf_transmit,
+ ztdethmf_flush
+};
+
+static struct notifier_block ztdethmf_nblock = {
+ .notifier_call = ztdethmf_notifier,
+};
+
+/**
+ * Decrements each delay counter in the ethmf_list and returns the number of
+ * delay counters that are not equal to zero.
+ */
+static int ethmf_delay_dec(void)
+{
+ struct ztdeth *z;
+ int count_nonzero = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(z, &ethmf_list, list) {
+ if (atomic_read(&z->delay)) {
+ atomic_dec(&z->delay);
+ ++count_nonzero;
+ } else
+ atomic_set(&z->delay, 0);
+ }
+ rcu_read_unlock();
+ return count_nonzero;
+}
+
+/**
+ * Timer callback function to allow all spans to be added, prior to any of
+ * them being used.
+ */
+static void timer_callback(unsigned long param)
+{
+ if (ethmf_delay_dec()) {
+ if (!atomic_read(&timer_deleted)) {
+ timer.expires = jiffies + HZ;
+ add_timer(&timer);
+ }
+ } else {
+ printk(KERN_INFO "All TDMoE multiframe span groups are active.\n");
+ del_timer(&timer);
+ }
+}
+
+#ifdef USE_PROC_FS
+static struct proc_dir_entry *proc_entry;
+static const char *ztdethmf_procname = "dahdi/dynamic-ethmf";
+static int ztdethmf_proc_read(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ struct ztdeth *z = NULL;
+ int len = 0, i = 0;
+ unsigned int group = 0, c = 0;
+
+ rcu_read_lock();
+
+ len += sprintf(page + len, "Errors: %d\n\n", atomic_read(&errcount));
+
+ for (group = 0; group < ETHMF_MAX_GROUPS; ++group) {
+ if (atomic_read(&(ethmf_groups[group].spans))) {
+ len += sprintf(page + len, "Group #%d (0x%x)\n", i++, ethmf_groups[group].hash_addr);
+ len += sprintf(page + len, " Spans: %d\n",
+ atomic_read(&(ethmf_groups[group].spans)));
+
+ c = 1;
+ list_for_each_entry_rcu(z, &ethmf_list, list) {
+ if (z->addr_hash == ethmf_groups[group].hash_addr) {
+ if (c == 1) {
+ len += sprintf(page + len,
+ " Device: %s (MAC: %02x:%02x:%02x:%02x:%02x:%02x)\n",
+ z->ethdev,
+ z->addr[0], z->addr[1], z->addr[2],
+ z->addr[3], z->addr[4], z->addr[5]);
+ }
+ len += sprintf(page + len, " Span %d: subaddr=%u ready=%d delay=%d real_channels=%d no_front_padding=%d\n",
+ c++, ntohs(z->subaddr),
+ atomic_read(&z->ready), atomic_read(&z->delay),
+ z->real_channels, atomic_read(&z->no_front_padding));
+ }
+ }
+ len += sprintf(page + len, " Device UPs: %u\n",
+ atomic_read(&(ethmf_groups[group].devupcount)));
+ len += sprintf(page + len, " Device DOWNs: %u\n",
+ atomic_read(&(ethmf_groups[group].devdowncount)));
+ len += sprintf(page + len, " Rx Frames: %u\n",
+ atomic_read(&(ethmf_groups[group].rxframecount)));
+ len += sprintf(page + len, " Tx Frames: %u\n",
+ atomic_read(&(ethmf_groups[group].txframecount)));
+ len += sprintf(page + len, " Rx Bytes: %u\n",
+ atomic_read(&(ethmf_groups[group].rxbytecount)));
+ len += sprintf(page + len, " Tx Bytes: %u\n",
+ atomic_read(&(ethmf_groups[group].txbytecount)));
+ if (len <= off) {
+ off -= len;
+ len = 0;
+ }
+ if (len > off+count)
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ if (len <= off) {
+ off -= len;
+ len = 0;
+ }
+ *start = page + off;
+ len -= off;
+ if (len > count)
+ len = count;
+ return (len);
+}
+#endif
+
+static int __init ztdethmf_init(void)
+{
+ init_timer(&timer);
+ timer.expires = jiffies + HZ;
+ timer.function = &timer_callback;
+ if (!timer_pending(&timer))
+ add_timer(&timer);
+
+ dev_add_pack(&ztdethmf_ptype);
+ register_netdevice_notifier(&ztdethmf_nblock);
+ dahdi_dynamic_register(&ztd_ethmf);
+
+ skb_queue_head_init(&skbs);
+
+#ifdef USE_PROC_FS
+ proc_entry = create_proc_read_entry(ztdethmf_procname, 0444, NULL,
+ ztdethmf_proc_read, NULL);
+ if (!proc_entry) {
+ printk(KERN_ALERT "create_proc_read_entry failed.\n");
+ }
+#endif
+
+ return 0;
+}
+
+static void __exit ztdethmf_exit(void)
+{
+ atomic_set(&timer_deleted, 1);
+ del_timer_sync(&timer);
+
+ dev_remove_pack(&ztdethmf_ptype);
+ unregister_netdevice_notifier(&ztdethmf_nblock);
+ dahdi_dynamic_unregister(&ztd_ethmf);
+
+#ifdef USE_PROC_FS
+ if (proc_entry)
+ remove_proc_entry(ztdethmf_procname, NULL);
+#endif
+}
+
+MODULE_DESCRIPTION("DAHDI Dynamic TDMoEmf Support");
+MODULE_AUTHOR("Joseph Benden <joe@thrallingpenguin.com>");
+#ifdef MODULE_LICENSE
+MODULE_LICENSE("GPL");
+#endif
+
+module_init(ztdethmf_init);
+module_exit(ztdethmf_exit);
diff --git a/include/dahdi/kernel.h b/include/dahdi/kernel.h
index c905234..563af82 100644
--- a/include/dahdi/kernel.h
+++ b/include/dahdi/kernel.h
@@ -960,7 +960,7 @@ struct dahdi_dynamic_driver {
/*! Flush any pending messages */
int (*flush)(void);
- struct dahdi_dynamic_driver *next;
+ struct list_head list;
};
/*! \brief Receive a dynamic span message */