summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorrussell <russell@5390a7c7-147a-4af0-8ec9-7488f05a26cb>2005-06-15 15:11:47 +0000
committerrussell <russell@5390a7c7-147a-4af0-8ec9-7488f05a26cb>2005-06-15 15:11:47 +0000
commit318dfc80dfede13577ef7f076b9ccdd3193744a8 (patch)
treeb1176cd6a43788bc859817c157ebbfefe7cc9f04
parent4909488d54b5b4a97f4024068ff11d4fac706982 (diff)
update to support 2nd gen cards and features
git-svn-id: http://svn.digium.com/svn/zaptel/branches/v1-0@676 5390a7c7-147a-4af0-8ec9-7488f05a26cb
-rwxr-xr-xwct4xxp.c1664
1 files changed, 1369 insertions, 295 deletions
diff --git a/wct4xxp.c b/wct4xxp.c
index c1fe977..484e1d1 100755
--- a/wct4xxp.c
+++ b/wct4xxp.c
@@ -1,12 +1,12 @@
/*
* TE410P Quad-T1/E1 PCI Driver version 0.1, 12/16/02
*
- * Written by Mark Spencer <markster@linux-support.net>
+ * Written by Mark Spencer <markster@digium.com>
* Based on previous works, designs, and archetectures conceived and
* written by Jim Dixon <jim@lambdatel.com>.
*
* Copyright (C) 2001 Jim Dixon / Zapata Telephony.
- * Copyright (C) 2001, Linux Support Services, Inc.
+ * Copyright (C) 2001-2005, Digium, Inc.
*
* All rights reserved.
*
@@ -53,17 +53,126 @@
/* #define ENABLE_TASKLETS */
+
+/* Work queues are a way to better distribute load on SMP systems */
+#ifdef LINUX26
+#define ENABLE_WORKQUEUES
+#endif
+
+/* Enable prefetching may help performance */
+#define ENABLE_PREFETCH
+
+/* Support first generation cards? */
+#define SUPPORT_GEN1
+
/* Define to get more attention-grabbing but slightly more I/O using
alarm status */
#define FANCY_ALARM
-static int debug;
+/* Define to support Digium Voice Processing Module expansion card */
+#define VPM_SUPPORT
+
+#define DEBUG_MAIN (1 << 0)
+#define DEBUG_DTMF (1 << 1)
+#define DEBUG_REGS (1 << 2)
+#define DEBUG_TSI (1 << 3)
+#define DEBUG_ECHOCAN (1 << 4)
+#define DEBUG_RBS (1 << 5)
+#define DEBUG_FRAMER (1 << 6)
+
+#ifdef ENABLE_WORKQUEUES
+#include <linux/cpumask.h>
+
+/* XXX UGLY!!!! XXX We have to access the direct structures of the workqueue which
+ are only defined within workqueue.c because they don't give us a routine to allow us
+ to nail a work to a particular thread of the CPU. Nailing to threads gives us substantially
+ higher scalability in multi-CPU environments though! */
+
+/*
+ * The per-CPU workqueue (if single thread, we always use cpu 0's).
+ *
+ * The sequence counters are for flush_scheduled_work(). It wants to wait
+ * until until all currently-scheduled works are completed, but it doesn't
+ * want to be livelocked by new, incoming ones. So it waits until
+ * remove_sequence is >= the insert_sequence which pertained when
+ * flush_scheduled_work() was called.
+ */
+
+struct cpu_workqueue_struct {
+
+ spinlock_t lock;
+
+ long remove_sequence; /* Least-recently added (next to run) */
+ long insert_sequence; /* Next to add */
+
+ struct list_head worklist;
+ wait_queue_head_t more_work;
+ wait_queue_head_t work_done;
+
+ struct workqueue_struct *wq;
+ task_t *thread;
+
+ int run_depth; /* Detect run_workqueue() recursion depth */
+} ____cacheline_aligned;
+
+/*
+ * The externally visible workqueue abstraction is an array of
+ * per-CPU workqueues:
+ */
+struct workqueue_struct {
+ struct cpu_workqueue_struct cpu_wq[NR_CPUS];
+ const char *name;
+ struct list_head list; /* Empty if single thread */
+};
+
+/* Preempt must be disabled. */
+static void __t4_queue_work(struct cpu_workqueue_struct *cwq,
+ struct work_struct *work)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cwq->lock, flags);
+ work->wq_data = cwq;
+ list_add_tail(&work->entry, &cwq->worklist);
+ cwq->insert_sequence++;
+ wake_up(&cwq->more_work);
+ spin_unlock_irqrestore(&cwq->lock, flags);
+}
+
+/*
+ * Queue work on a workqueue. Return non-zero if it was successfully
+ * added.
+ *
+ * We queue the work to the CPU it was submitted, but there is no
+ * guarantee that it will be processed by that CPU.
+ */
+static inline int t4_queue_work(struct workqueue_struct *wq, struct work_struct *work, int cpu)
+{
+ int ret = 0;
+
+ if (!test_and_set_bit(0, &work->pending)) {
+ BUG_ON(!list_empty(&work->entry));
+ __t4_queue_work(wq->cpu_wq + cpu, work);
+ ret = 1;
+ }
+ return ret;
+}
+
+#endif
+
+static int debug=DEBUG_ECHOCAN;
static int timingcable;
static int highestorder;
static int t1e1override = -1;
+static int j1mode = 0;
static int loopback = 0;
static int alarmdebounce = 0;
+#ifdef VPM_SUPPORT
+static int vpmsupport = 1;
+#endif
static int noburst = 0;
+static int debugslips = 0;
+static int polling = 0;
#ifdef FANCY_ALARM
static int altab[] = {
@@ -80,36 +189,86 @@ static int altab[] = {
#define TYPE_T1 1 /* is a T1 card */
#define TYPE_E1 2 /* is an E1 card */
+#define TYPE_J1 3 /* is a running J1 */
+
+#define FLAG_2NDGEN (1 << 3)
#define CANARY 0xc0de
+struct devtype {
+ char *desc;
+ unsigned int flags;
+};
+
+static struct devtype wct4xxp = { "Wildcard TE410P/TE405P (1st Gen)", 0 };
+static struct devtype wct410p2 = { "Wildcard TE410P (2nd Gen)", FLAG_2NDGEN };
+static struct devtype wct405p2 = { "Wildcard TE405P (2nd Gen)", FLAG_2NDGEN };
+
+
static int inirq = 0;
+struct t4;
+
+struct t4_span {
+ struct t4 *owner;
+ unsigned int *writechunk; /* Double-word aligned write memory */
+ unsigned int *readchunk; /* Double-word aligned read memory */
+ int spantype; /* card type, T1 or E1 or J1 */
+ int sync;
+ int psync;
+ int alarmtimer;
+ int redalarms;
+ int notclear;
+ int alarmcount;
+ int spanflags;
+ int syncpos;
+#ifdef SUPPORT_GEN1
+ int e1check; /* E1 check */
+#endif
+ struct zt_span span;
+ unsigned char txsigs[16]; /* Transmit sigs */
+ int loopupcnt;
+ int loopdowncnt;
+#ifdef SUPPORT_GEN1
+ unsigned char ec_chunk1[31][ZT_CHUNKSIZE]; /* first EC chunk buffer */
+ unsigned char ec_chunk2[31][ZT_CHUNKSIZE]; /* second EC chunk buffer */
+#endif
+ int irqmisses;
+#ifdef VPM_SUPPORT
+ unsigned int dtmfactive;
+ unsigned int dtmfmask;
+ unsigned int dtmfmutemask;
+ short dtmfenergy[31];
+ short dtmfdigit[31];
+#endif
+#ifdef ENABLE_WORKQUEUES
+ struct work_struct swork;
+#endif
+ struct zt_chan chans[0]; /* Individual channels */
+};
+
struct t4 {
/* This structure exists one per card */
struct pci_dev *dev; /* Pointer to PCI device */
- int intcount;
+ unsigned int intcount;
int num; /* Which card we are */
int t1e1; /* T1/E1 select pins */
int globalconfig; /* Whether global setup has been done */
int syncsrc; /* active sync source */
- int syncs[4]; /* sync sources */
- int psyncs[4]; /* span-relative sync sources */
- int alarmtimer[4]; /* Alarm timer */
- int redalarms[4];
+ struct t4_span *tspans[4]; /* Individual spans */
+#ifdef VPM_SUPPORT
+ int vpm;
+#endif
+
int blinktimer;
- int alarmcount[4]; /* How much red alarm we've seen */
#ifdef FANCY_ALARM
int alarmpos;
#endif
int irq; /* IRQ used by device */
int order; /* Order */
int flags; /* Device flags */
- int spanflags[4]; /* Span flags */
- int syncpos[4]; /* span-relative sync sources */
int master; /* Are we master */
int ledreg; /* LED Register */
- int e1check[4]; /* E1 check */
unsigned int dmactrl;
int e1recover; /* E1 recovery timer */
dma_addr_t readdma;
@@ -117,43 +276,51 @@ struct t4 {
unsigned long memaddr; /* Base address of card */
unsigned long memlen;
volatile unsigned int *membase; /* Base address of card */
- struct zt_span spans[4]; /* Spans */
- struct zt_chan *chans[4]; /* Pointers to blocks of 24(30/31) contiguous zt_chans for each span */
- unsigned char txsigs[4][16]; /* Copy of tx sig registers */
- int loopupcnt[4]; /* loop up code counter */
- int loopdowncnt[4]; /* loop down code counter */
int spansstarted; /* number of spans started */
/* spinlock_t lock; */ /* lock context */
spinlock_t reglock; /* lock register access */
- unsigned char ec_chunk1[4][31][ZT_CHUNKSIZE]; /* first EC chunk buffer */
- unsigned char ec_chunk2[4][31][ZT_CHUNKSIZE]; /* second EC chunk buffer */
volatile unsigned int *writechunk; /* Double-word aligned write memory */
volatile unsigned int *readchunk; /* Double-word aligned read memory */
unsigned short canary;
+#ifdef ENABLE_WORKQUEUES
+ atomic_t worklist;
+ struct workqueue_struct *workq;
+#else
#ifdef ENABLE_TASKLETS
int taskletrun;
int taskletsched;
int taskletpending;
int taskletexec;
int txerrors;
- struct tasklet_struct t4xxp_tlet;
+ struct tasklet_struct t4_tlet;
+#endif
#endif
- int spantype[4]; /* card type, T1 or E1 */
unsigned int passno; /* number of interrupt passes */
char *variety;
int last0; /* for detecting double-missed IRQ */
int checktiming; /* Set >0 to cause the timing source to be checked */
};
+#define T4_VPM_PRESENT (1 << 28)
+
+#ifdef VPM_SUPPORT
+static void t4_vpm_init(struct t4 *wc);
+#endif
static void __set_clear(struct t4 *wc, int span);
static int t4_startup(struct zt_span *span);
static int t4_shutdown(struct zt_span *span);
static int t4_rbsbits(struct zt_chan *chan, int bits);
static int t4_maint(struct zt_span *span, int cmd);
+#ifdef SUPPORT_GEN1
static int t4_reset_dma(struct t4 *wc);
+#endif
static int t4_ioctl(struct zt_chan *chan, unsigned int cmd, unsigned long data);
+static void t4_tsi_assign(struct t4 *wc, int fromspan, int fromchan, int tospan, int tochan);
+static void t4_tsi_unassign(struct t4 *wc, int tospan, int tochan);
static void __t4_set_timing_source(struct t4 *wc, int unit);
+static void __t4_check_alarms(struct t4 *wc, int span);
+static void __t4_check_sigbits(struct t4 *wc, int span);
#define WC_RDADDR 0
#define WC_WRADDR 1
@@ -184,12 +351,16 @@ static void t4_tasklet(unsigned long data);
static struct t4 *cards[MAX_T4_CARDS];
+
+#define MAX_TDM_CHAN 32
+#define MAX_DTMF_DET 16
+
static inline void __t4_pci_out(struct t4 *wc, const unsigned int addr, const unsigned int value)
{
unsigned int tmp;
- wc->membase[addr] = value;
+ wc->membase[addr] = cpu_to_le32(value);
#if 1
- tmp = wc->membase[addr];
+ tmp = le32_to_cpu(wc->membase[addr]);
if ((value != tmp) && (addr != WC_LEDS) && (addr != WC_LDATA) &&
(addr != WC_GPIO) && (addr != WC_INTR))
printk("Tried to load %08x into %08x, but got %08x instead\n", value, addr, tmp);
@@ -198,7 +369,7 @@ static inline void __t4_pci_out(struct t4 *wc, const unsigned int addr, const un
static inline unsigned int __t4_pci_in(struct t4 *wc, const unsigned int addr)
{
- return wc->membase[addr];
+ return le32_to_cpu(wc->membase[addr]);
}
static inline void t4_pci_out(struct t4 *wc, const unsigned int addr, const unsigned int value)
@@ -260,7 +431,7 @@ static inline unsigned int t4_framer_in(struct t4 *wc, int unit, const unsigned
static inline void __t4_framer_out(struct t4 *wc, int unit, const unsigned int addr, const unsigned int value)
{
unit &= 0x3;
- if (debug)
+ if (debug & DEBUG_REGS)
printk("Writing %02x to address %02x of unit %d\n", value, addr, unit);
__t4_pci_out(wc, WC_LADDR, (unit << 8) | (addr & 0xff));
__t4_pci_out(wc, WC_LDATA, value);
@@ -269,7 +440,7 @@ static inline void __t4_framer_out(struct t4 *wc, int unit, const unsigned int a
__t4_pci_out(wc, WC_LADDR, (unit << 8) | (addr & 0xff) | (1 << 10));
__t4_pci_out(wc, WC_LADDR, (unit << 8) | (addr & 0xff));
__t4_pci_out(wc, WC_LADDR, 0);
- if (debug) printk("Write complete\n");
+ if (debug & DEBUG_REGS) printk("Write complete\n");
#if 0
{ unsigned int tmp;
tmp = t4_framer_in(wc, unit, addr);
@@ -287,23 +458,177 @@ static inline void t4_framer_out(struct t4 *wc, int unit, const unsigned int add
spin_unlock_irqrestore(&wc->reglock, flags);
}
+#ifdef VPM_SUPPORT
+
+static inline void wait_a_little(void)
+{
+ unsigned long newjiffies=jiffies+2;
+ while(jiffies < newjiffies);
+}
+
+static inline unsigned int __t4_vpm_in(struct t4 *wc, int unit, const unsigned int addr)
+{
+ unsigned int ret;
+ unit &= 0x7;
+ __t4_pci_out(wc, WC_LADDR, (addr & 0x1ff) | ( unit << 12));
+ __t4_pci_out(wc, WC_LADDR, (addr & 0x1ff) | ( unit << 12) | (1 << 11) | WC_LREAD);
+ ret = __t4_pci_in(wc, WC_LDATA);
+ __t4_pci_out(wc, WC_LADDR, 0);
+ return ret & 0xff;
+}
+
+static inline unsigned int t4_vpm_in(struct t4 *wc, int unit, const unsigned int addr)
+{
+ unsigned long flags;
+ unsigned int ret;
+ spin_lock_irqsave(&wc->reglock, flags);
+ ret = __t4_vpm_in(wc, unit, addr);
+ spin_unlock_irqrestore(&wc->reglock, flags);
+ return ret;
+}
+
+static inline void __t4_vpm_out(struct t4 *wc, int unit, const unsigned int addr, const unsigned int value)
+{
+ unit &= 0x7;
+ if (debug & DEBUG_REGS)
+ printk("Writing %02x to address %02x of ec unit %d\n", value, addr, unit);
+ __t4_pci_out(wc, WC_LADDR, (addr & 0xff));
+ __t4_pci_out(wc, WC_LDATA, value);
+ __t4_pci_out(wc, WC_LADDR, (unit << 12) | (addr & 0x1ff) | (1 << 11));
+ __t4_pci_out(wc, WC_LADDR, (unit << 12) | (addr & 0x1ff) | (1 << 11) | WC_LWRITE);
+ __t4_pci_out(wc, WC_LADDR, (unit << 12) | (addr & 0x1ff) | (1 << 11));
+ __t4_pci_out(wc, WC_LADDR, (unit << 12) | (addr & 0x1ff));
+ __t4_pci_out(wc, WC_LADDR, 0);
+ if (debug & DEBUG_REGS) printk("Write complete\n");
+
+
+#if 0
+ { unsigned int tmp;
+ tmp = t4_vpm_in(wc, unit, addr);
+ if (tmp != value) {
+ printk("Expected %d from unit %d echo register %d but got %d instead\n", value, unit, addr, tmp);
+ } }
+#endif
+}
+
+static inline void t4_vpm_out(struct t4 *wc, int unit, const unsigned int addr, const unsigned int value)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&wc->reglock, flags);
+ __t4_vpm_out(wc, unit, addr, value);
+ spin_unlock_irqrestore(&wc->reglock, flags);
+}
+
+static void __t4_check_vpm(struct t4 *wc, unsigned int newio)
+{
+ unsigned int digit, regval = 0;
+ int x, i;
+ short energy;
+ static unsigned int lastio = 0;
+ struct t4_span *ts;
+
+ if (debug && (newio != lastio))
+ printk("Last was %08x, new is %08x\n", lastio, newio);
+
+ lastio = newio;
+
+ for(x = 0; x < 8; x++) {
+ if (newio & (1 << (7 - x)))
+ continue;
+ ts = wc->tspans[x%4];
+ /* Start of DTMF detection process */
+ regval = __t4_vpm_in(wc, x, 0xb8);
+ __t4_vpm_out(wc, x, 0xb8, regval); /* Write 1 to clear */
+ regval = regval << 8;
+ regval |= __t4_vpm_in(wc, x, 0xb9);
+ __t4_vpm_out(wc, x, 0xb9, regval & 0xff);
+
+ for(i = 0; (i < MAX_DTMF_DET) && regval; i++) {
+ if(regval & 0x0001) {
+ int channel = (i << 1) + (x >> 2);
+ int base = channel - 1;
+ if (!wc->t1e1)
+ base -= 4;
+ digit = __t4_vpm_in(wc, x, 0xa8 + i);
+ if (digit < 10)
+ digit += '0';
+ else if (digit < 0xe)
+ digit += 'A' - 0xe;
+ else if (digit == 0xe)
+ digit = '*';
+ else if (digit == 0xf)
+ digit = '#';
+ energy = __t4_vpm_in(wc, x, 0x58 + channel);
+ energy = ZT_XLAW(energy, ts->chans);
+ ts->dtmfactive |= (1 << base);
+ ts->dtmfenergy[base] = energy;
+ ts->dtmfdigit[base] = digit;
+ if (ts->dtmfmask & (1 << base))
+ zt_qevent_lock(&ts->span.chans[base], (ZT_EVENT_DTMFDOWN | digit));
+ if (ts->dtmfmutemask & (1 << base)) {
+ /* Mute active receive buffer*/
+ unsigned long flags;
+ struct zt_chan *chan = &ts->span.chans[base];
+ int y;
+ spin_lock_irqsave(&chan->lock, flags);
+ for (y=0;y<chan->numbufs;y++) {
+ if (chan->readidx[y])
+ memset(chan->readbuf[chan->inreadbuf], ZT_XLAW(0, chan), chan->readidx[y]);
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
+ }
+ if (debug)
+ printk("Digit Seen: %d, Span: %d, channel: %d, energy: %02x, 'channel %d' chip %d\n", digit, x % 4, base + 1, energy, channel, x);
+
+ }
+ regval = regval >> 1;
+ }
+ }
+}
+#endif
+
static void __set_clear(struct t4 *wc, int span)
{
int i,j;
+ int oldnotclear;
unsigned short val=0;
- for (i=0;i<24;i++) {
- j = (i/8);
- if (wc->spans[span].chans[i].flags & ZT_FLAG_CLEAR)
- val |= 1 << (7 - (i % 8));
- if ((i % 8)==7) {
- if (debug)
- printk("Putting %d in register %02x on span %d\n",
- val, 0x2f + j, span + 1);
- __t4_framer_out(wc,span, 0x2f + j, val);
- val = 0;
+ struct t4_span *ts = wc->tspans[span];
+
+ oldnotclear = ts->notclear;
+ if (ts->spantype == TYPE_T1) {
+ for (i=0;i<24;i++) {
+ j = (i/8);
+ if (ts->span.chans[i].flags & ZT_FLAG_CLEAR) {
+ val |= 1 << (7 - (i % 8));
+ ts->notclear &= ~(1 << i);
+ } else
+ ts->notclear |= (1 << i);
+ if ((i % 8)==7) {
+ if (debug)
+ printk("Putting %d in register %02x on span %d\n",
+ val, 0x2f + j, span + 1);
+ __t4_framer_out(wc, span, 0x2f + j, val);
+ val = 0;
+ }
+ }
+ } else {
+ for (i=0;i<31;i++) {
+ if (ts->span.chans[i].flags & ZT_FLAG_CLEAR)
+ ts->notclear &= ~(1 << i);
+ else
+ ts->notclear |= (1 << i);
}
}
+ if (ts->notclear != oldnotclear) {
+ unsigned char reg;
+ reg = __t4_framer_in(wc, span, 0x14);
+ if (ts->notclear)
+ reg &= ~0x08;
+ else
+ reg |= 0x08;
+ __t4_framer_out(wc, span, 0x14, reg);
+ }
}
#if 0
@@ -316,19 +641,103 @@ static void set_clear(struct t4 *wc, int span)
}
#endif
+static int t4_dacs(struct zt_chan *dst, struct zt_chan *src)
+{
+ struct t4 *wc;
+ struct t4_span *ts;
+ wc = dst->pvt;
+ ts = wc->tspans[dst->span->offset];
+ if (src && (src->pvt != dst->pvt)) {
+ if (ts->spanflags & FLAG_2NDGEN)
+ t4_tsi_unassign(wc, dst->span->offset, dst->chanpos);
+ wc = src->pvt;
+ if (ts->spanflags & FLAG_2NDGEN)
+ t4_tsi_unassign(wc, src->span->offset, src->chanpos);
+ if (debug)
+ printk("Unassigning %d/%d by default and...\n", src->span->offset, src->chanpos);
+ if (debug)
+ printk("Unassigning %d/%d by default\n", dst->span->offset, dst->chanpos);
+ return -1;
+ }
+ if (src) {
+ t4_tsi_assign(wc, src->span->offset, src->chanpos, dst->span->offset, dst->chanpos);
+ if (debug)
+ printk("Assigning channel %d/%d -> %d/%d!\n", src->span->offset, src->chanpos, dst->span->offset, dst->chanpos);
+ } else {
+ t4_tsi_unassign(wc, dst->span->offset, dst->chanpos);
+ if (debug)
+ printk("Unassigning channel %d/%d!\n", dst->span->offset, dst->chanpos);
+ }
+ return 0;
+}
+
+#ifdef VPM_SUPPORT
+static int t4_echocan(struct zt_chan *chan, int eclen)
+{
+ struct t4 *wc = chan->pvt;
+ int channel;
+ int unit;
+ if (!wc->vpm)
+ return -ENODEV;
+ unit = chan->span->offset;
+ if (wc->t1e1)
+ channel = chan->chanpos;
+ else
+ channel = chan->chanpos + 4;
+ if ((channel & 1))
+ unit += 4;
+ if(debug & DEBUG_ECHOCAN)
+ printk("echocan: Card is %d, Channel is %d, Span is %d, unit is %d, unit offset is %d length %d\n",
+ wc->num, chan->chanpos, chan->span->offset, unit, channel, eclen);
+ if (eclen)
+ t4_vpm_out(wc,unit,channel,0x3e);
+ else
+ t4_vpm_out(wc,unit,channel,0x01);
+ return 0;
+}
+#endif
+
static int t4_ioctl(struct zt_chan *chan, unsigned int cmd, unsigned long data)
{
struct t4_regs regs;
int x;
+ struct t4 *wc = chan->pvt;
+#ifdef VPM_SUPPORT
+ int j;
+ struct t4_span *ts = wc->tspans[chan->span->offset];
+#endif
+
switch(cmd) {
case WCT4_GET_REGS:
+ wc = chan->pvt;
for (x=0;x<NUM_PCI;x++)
- regs.pci[x] = t4_pci_in(chan->pvt, x);
+ regs.pci[x] = t4_pci_in(wc, x);
for (x=0;x<NUM_REGS;x++)
- regs.regs[x] = t4_framer_in(chan->pvt, chan->span->offset, x);
+ regs.regs[x] = t4_framer_in(wc, chan->span->offset, x);
if (copy_to_user((struct t4_regs *)data, &regs, sizeof(regs)))
return -EFAULT;
+ {
+ static unsigned char filldata = 0;
+ memset(wc->tspans[0]->writechunk, filldata, ZT_CHUNKSIZE * 32);
+ }
break;
+#ifdef VPM_SUPPORT
+ case ZT_TONEDETECT:
+ if (get_user(j, (int *)data))
+ return -EFAULT;
+ wc = chan->pvt;
+ if (!wc->vpm)
+ return -ENOSYS;
+ if (j & ZT_TONEDETECT_ON)
+ ts->dtmfmask |= (1 << (chan->chanpos - 1));
+ else
+ ts->dtmfmask &= ~(1 << (chan->chanpos - 1));
+ if (j & ZT_TONEDETECT_MUTE)
+ ts->dtmfmutemask |= (1 << (chan->chanpos - 1));
+ else
+ ts->dtmfmutemask &= ~(1 << (chan->chanpos - 1));
+ return 0;
+#endif
default:
return -ENOTTY;
}
@@ -337,9 +746,10 @@ static int t4_ioctl(struct zt_chan *chan, unsigned int cmd, unsigned long data)
static int t4_maint(struct zt_span *span, int cmd)
{
- struct t4 *wc = span->pvt;
+ struct t4_span *ts = span->pvt;
+ struct t4 *wc = ts->owner;
- if (wc->spantype[span->offset] == TYPE_E1) {
+ if (ts->spantype == TYPE_E1) {
switch(cmd) {
case ZT_MAINT_NONE:
printk("XXX Turn off local and remote loops E1 XXX\n");
@@ -396,12 +806,13 @@ static int t4_rbsbits(struct zt_chan *chan, int bits)
u_char m,c;
int k,n,b;
struct t4 *wc = chan->pvt;
+ struct t4_span *ts = wc->tspans[chan->span->offset];
unsigned long flags;
- if(debug) printk("Setting bits to %d on channel %s\n", bits, chan->name);
+ if(debug & DEBUG_RBS) printk("Setting bits to %d on channel %s\n", bits, chan->name);
spin_lock_irqsave(&wc->reglock, flags);
k = chan->span->offset;
- if (wc->spantype[k] == TYPE_E1) { /* do it E1 way */
+ if (ts->spantype == TYPE_E1) { /* do it E1 way */
if (chan->chanpos == 16) {
spin_unlock_irqrestore(&wc->reglock, flags);
return 0;
@@ -409,37 +820,37 @@ static int t4_rbsbits(struct zt_chan *chan, int bits)
n = chan->chanpos - 1;
if (chan->chanpos > 15) n--;
b = (n % 15);
- c = wc->txsigs[k][b];
+ c = ts->txsigs[b];
m = (n / 15) << 2; /* nibble selector */
c &= (0xf << m); /* keep the other nibble */
c |= (bits & 0xf) << (4 - m); /* put our new nibble here */
- wc->txsigs[k][b] = c;
+ ts->txsigs[b] = c;
/* output them to the chip */
__t4_framer_out(wc,k,0x71 + b,c);
- } else if (wc->spans[k].lineconfig & ZT_CONFIG_D4) {
+ } else if (ts->span.lineconfig & ZT_CONFIG_D4) {
n = chan->chanpos - 1;
b = (n/4);
- c = wc->txsigs[k][b];
+ c = ts->txsigs[b];
m = ((3 - (n % 4)) << 1); /* nibble selector */
c &= ~(0x3 << m); /* keep the other nibble */
c |= ((bits >> 2) & 0x3) << m; /* put our new nibble here */
- wc->txsigs[k][b] = c;
+ ts->txsigs[b] = c;
/* output them to the chip */
__t4_framer_out(wc,k,0x70 + b,c);
__t4_framer_out(wc,k,0x70 + b + 6,c);
- } else if (wc->spans[k].lineconfig & ZT_CONFIG_ESF) {
+ } else if (ts->span.lineconfig & ZT_CONFIG_ESF) {
n = chan->chanpos - 1;
b = (n/2);
- c = wc->txsigs[k][b];
+ c = ts->txsigs[b];
m = ((n % 2) << 2); /* nibble selector */
c &= (0xf << m); /* keep the other nibble */
c |= (bits & 0xf) << (4 - m); /* put our new nibble here */
- wc->txsigs[k][b] = c;
+ ts->txsigs[b] = c;
/* output them to the chip */
__t4_framer_out(wc,k,0x70 + b,c);
}
spin_unlock_irqrestore(&wc->reglock, flags);
- if (debug)
+ if (debug & DEBUG_RBS)
printk("Finished setting RBS bits\n");
return 0;
}
@@ -449,7 +860,8 @@ static int t4_shutdown(struct zt_span *span)
int tspan;
int wasrunning;
unsigned long flags;
- struct t4 *wc = span->pvt;
+ struct t4_span *ts = span->pvt;
+ struct t4 *wc = ts->owner;
tspan = span->offset + 1;
if (tspan < 0) {
@@ -464,10 +876,10 @@ static int t4_shutdown(struct zt_span *span)
if (wasrunning)
wc->spansstarted--;
__t4_set_led(wc, span->offset, WC_OFF);
- if ((!(wc->spans[0].flags & ZT_FLAG_RUNNING)) &&
- (!(wc->spans[1].flags & ZT_FLAG_RUNNING)) &&
- (!(wc->spans[2].flags & ZT_FLAG_RUNNING)) &&
- (!(wc->spans[3].flags & ZT_FLAG_RUNNING))) {
+ if ((!(wc->tspans[0]->span.flags & ZT_FLAG_RUNNING)) &&
+ (!(wc->tspans[1]->span.flags & ZT_FLAG_RUNNING)) &&
+ (!(wc->tspans[2]->span.flags & ZT_FLAG_RUNNING)) &&
+ (!(wc->tspans[3]->span.flags & ZT_FLAG_RUNNING))) {
/* No longer in use, disable interrupts */
printk("TE410P: Disabling interrupts since there are no active spans\n");
wc->dmactrl = 0x0;
@@ -477,7 +889,7 @@ static int t4_shutdown(struct zt_span *span)
__t4_set_timing_source(wc,4);
} else wc->checktiming = 1;
spin_unlock_irqrestore(&wc->reglock, flags);
- if (debug)
+ if (debug & DEBUG_MAIN)
printk("Span %d (%s) shutdown\n", span->spanno, span->name);
return 0;
}
@@ -485,9 +897,11 @@ static int t4_shutdown(struct zt_span *span)
static int t4_spanconfig(struct zt_span *span, struct zt_lineconfig *lc)
{
int i;
- struct t4 *wc = span->pvt;
+ struct t4_span *ts = span->pvt;
+ struct t4 *wc = ts->owner;
- if (debug)
+ printk("About to enter spanconfig!\n");
+ if (debug & DEBUG_MAIN)
printk("TE410P: Configuring span %d\n", span->spanno);
/* XXX We assume lineconfig is okay and shouldn't XXX */
span->lineconfig = lc->lineconfig;
@@ -500,21 +914,22 @@ static int t4_spanconfig(struct zt_span *span, struct zt_lineconfig *lc)
/* remove this span number from the current sync sources, if there */
for(i = 0; i < 4; i++) {
- if (wc->syncs[i] == span->spanno) {
- wc->syncs[i] = 0;
- wc->psyncs[i] = 0;
+ if (wc->tspans[i]->sync == span->spanno) {
+ wc->tspans[i]->sync = 0;
+ wc->tspans[i]->psync = 0;
}
}
- wc->syncpos[span->offset] = lc->sync;
+ wc->tspans[span->offset]->syncpos = lc->sync;
/* if a sync src, put it in proper place */
if (lc->sync) {
- wc->syncs[lc->sync - 1] = span->spanno;
- wc->psyncs[lc->sync - 1] = span->offset + 1;
+ wc->tspans[lc->sync - 1]->sync = span->spanno;
+ wc->tspans[lc->sync - 1]->psync = span->offset + 1;
}
wc->checktiming = 1;
/* If we're already running, then go ahead and apply the changes */
if (span->flags & ZT_FLAG_RUNNING)
return t4_startup(span);
+ printk("Done with spanconfig!\n");
return 0;
}
@@ -524,15 +939,13 @@ static int t4_chanconfig(struct zt_chan *chan, int sigtype)
unsigned long flags;
struct t4 *wc = chan->pvt;
- alreadyrunning = wc->spans[chan->span->offset].flags & ZT_FLAG_RUNNING;
- if (debug) {
+ alreadyrunning = wc->tspans[chan->span->offset]->span.flags & ZT_FLAG_RUNNING;
+ if (debug & DEBUG_MAIN) {
if (alreadyrunning)
printk("TE410P: Reconfigured channel %d (%s) sigtype %d\n", chan->channo, chan->name, sigtype);
else
printk("TE410P: Configured channel %d (%s) sigtype %d\n", chan->channo, chan->name, sigtype);
}
- /* nothing more to do if an E1 */
- if (wc->spantype[chan->span->offset] == TYPE_E1) return 0;
spin_lock_irqsave(&wc->reglock, flags);
if (alreadyrunning)
__set_clear(wc, chan->span->offset);
@@ -564,40 +977,61 @@ static int t4_close(struct zt_chan *chan)
static void init_spans(struct t4 *wc)
{
int x,y,c;
+ int gen2;
+ int offset = 1;
+ struct t4_span *ts;
+
+ gen2 = (wc->tspans[0]->spanflags & FLAG_2NDGEN);
+ if (!wc->t1e1)
+ offset += 4;
for (x=0;x<4;x++) {
- sprintf(wc->spans[x].name, "TE4/%d/%d",
+ ts = wc->tspans[x];
+ sprintf(ts->span.name, "TE4/%d/%d",
wc->num, x + 1);
- sprintf(wc->spans[x].desc, "TE410P (PCI) Card %d Span %d", wc->num, x+1);
- wc->spans[x].spanconfig = t4_spanconfig;
- wc->spans[x].chanconfig = t4_chanconfig;
- wc->spans[x].startup = t4_startup;
- wc->spans[x].shutdown = t4_shutdown;
- wc->spans[x].rbsbits = t4_rbsbits;
- wc->spans[x].maint = t4_maint;
- wc->spans[x].open = t4_open;
- wc->spans[x].close = t4_close;
- if (wc->spantype[x] == TYPE_T1) {
- wc->spans[x].channels = 24;
- wc->spans[x].deflaw = ZT_LAW_MULAW;
+ sprintf(ts->span.desc, "TE410P (PCI) Card %d Span %d", wc->num, x+1);
+ ts->span.spanconfig = t4_spanconfig;
+ ts->span.chanconfig = t4_chanconfig;
+ ts->span.startup = t4_startup;
+ ts->span.shutdown = t4_shutdown;
+ ts->span.rbsbits = t4_rbsbits;
+ ts->span.maint = t4_maint;
+ ts->span.open = t4_open;
+ ts->span.close = t4_close;
+ if (ts->spantype == TYPE_T1 || ts->spantype == TYPE_J1) {
+ ts->span.channels = 24;
+ ts->span.deflaw = ZT_LAW_MULAW;
} else {
- wc->spans[x].channels = 31;
- wc->spans[x].deflaw = ZT_LAW_ALAW;
+ ts->span.channels = 31;
+ ts->span.deflaw = ZT_LAW_ALAW;
+ }
+ ts->span.chans = ts->chans;
+ ts->span.flags = ZT_FLAG_RBS;
+ ts->span.linecompat = ZT_CONFIG_AMI | ZT_CONFIG_B8ZS | ZT_CONFIG_D4 | ZT_CONFIG_ESF;
+ ts->span.ioctl = t4_ioctl;
+ if (gen2) {
+#ifdef VPM_SUPPORT
+ ts->span.echocan = t4_echocan;
+#endif
+ ts->span.dacs = t4_dacs;
}
- wc->spans[x].chans = wc->chans[x];
- wc->spans[x].flags = ZT_FLAG_RBS;
- wc->spans[x].linecompat = ZT_CONFIG_AMI | ZT_CONFIG_B8ZS | ZT_CONFIG_D4 | ZT_CONFIG_ESF;
- wc->spans[x].ioctl = t4_ioctl;
- wc->spans[x].pvt = wc;
- wc->spans[x].offset = x;
- init_waitqueue_head(&wc->spans[x].maintq);
- for (y=0;y<wc->spans[x].channels;y++) {
- struct zt_chan *mychans = wc->chans[x] + y;
+ ts->span.pvt = ts;
+ ts->owner = wc;
+ ts->span.offset = x;
+ ts->writechunk = (void *)(wc->writechunk + x * 32 * 2);
+ ts->readchunk = (void *)(wc->readchunk + x * 32 * 2);
+ init_waitqueue_head(&ts->span.maintq);
+ for (y=0;y<wc->tspans[x]->span.channels;y++) {
+ struct zt_chan *mychans = ts->chans + y;
sprintf(mychans->name, "TE4/%d/%d/%d", wc->num, x + 1, y + 1);
mychans->sigcap = ZT_SIG_EM | ZT_SIG_CLEAR | ZT_SIG_FXSLS | ZT_SIG_FXSGS | ZT_SIG_FXSKS |
ZT_SIG_FXOLS | ZT_SIG_FXOGS | ZT_SIG_FXOKS | ZT_SIG_CAS | ZT_SIG_EM_E1 | ZT_SIG_DACS_RBS;
- c = (x * wc->spans[x].channels) + y;
+ c = (x * ts->span.channels) + y;
mychans->pvt = wc;
mychans->chanpos = y + 1;
+ if (gen2) {
+ mychans->writechunk = (void *)(wc->writechunk + (x * 32 + y + offset) * 2);
+ mychans->readchunk = (void *)(wc->readchunk + (x * 32 + y + offset) * 2);
+ }
}
}
}
@@ -622,19 +1056,22 @@ static void t4_serial_setup(struct t4 *wc, int unit)
}
/* Configure interrupts */
- t4_framer_out(wc, unit, 0x46, 0x40); /* GCR: Interrupt on Activation/Deactivation of AIX, LOS */
+ t4_framer_out(wc, unit, 0x46, 0x00); /* GCR: Interrupt on Activation/Deactivation of each */
/* Configure system interface */
t4_framer_out(wc, unit, 0x3e, 0xc2); /* SIC1: 8.192 Mhz clock/bus, double buffer receive / transmit, byte interleaved */
- t4_framer_out(wc, unit, 0x3f, unit << 1); /* SIC2: No FFS, no center receive eliastic buffer, phase */
+ t4_framer_out(wc, unit, 0x3f, 0x20 | (unit << 1)); /* SIC2: No FFS, no center receive eliastic buffer, phase */
t4_framer_out(wc, unit, 0x40, 0x04); /* SIC3: Edges for capture */
t4_framer_out(wc, unit, 0x45, 0x00); /* CMR2: We provide sync and clock for tx and rx. */
- if (!wc->t1e1) {
+ if (!wc->t1e1) { /* T1 mode */
t4_framer_out(wc, unit, 0x22, 0x03); /* XC0: Normal operation of Sa-bits */
t4_framer_out(wc, unit, 0x23, 0x84); /* XC1: 0 offset */
- t4_framer_out(wc, unit, 0x24, 0x03); /* RC0: Just shy of 1023 */
+ if (wc->tspans[unit]->spantype == TYPE_J1)
+ t4_framer_out(wc, unit, 0x24, 0x83); /* RC0: Just shy of 1023 */
+ else
+ t4_framer_out(wc, unit, 0x24, 0x03); /* RC0: Just shy of 1023 */
t4_framer_out(wc, unit, 0x25, 0x84); /* RC1: The rest of RC0 */
- } else {
+ } else { /* E1 mode */
t4_framer_out(wc, unit, 0x22, 0x00); /* XC0: Normal operation of Sa-bits */
t4_framer_out(wc, unit, 0x23, 0x04); /* XC1: 0 offset */
t4_framer_out(wc, unit, 0x24, 0x04); /* RC0: Just shy of 1023 */
@@ -647,7 +1084,7 @@ static void t4_serial_setup(struct t4 *wc, int unit)
t4_framer_out(wc, unit, 0x82, 0x65); /* PC3: Some unused stuff */
t4_framer_out(wc, unit, 0x83, 0x35); /* PC4: Some more unused stuff */
t4_framer_out(wc, unit, 0x84, 0x01); /* PC5: XMFS active low, SCLKR is input, RCLK is output */
- if (debug)
+ if (debug & DEBUG_MAIN)
printk("Successfully initialized serial bus for unit %d\n", unit);
}
@@ -661,10 +1098,12 @@ static void __t4_set_timing_source(struct t4 *wc, int unit)
timing |= (unit << 6);
for (x=0;x<4;x++) /* set all 4 receive reference clocks to unit */
__t4_framer_out(wc, x, 0x44, timing);
- __t4_pci_out(wc, WC_DMACTRL, wc->dmactrl | (1 << 29));
+ wc->dmactrl |= (1 << 29);
+ __t4_pci_out(wc, WC_DMACTRL, wc->dmactrl);
} else {
for (x=0;x<4;x++) /* set each receive reference clock to itself */
__t4_framer_out(wc, x, 0x44, timing | (x << 6));
+ wc->dmactrl &= ~(1 << 29);
__t4_pci_out(wc, WC_DMACTRL, wc->dmactrl);
}
wc->syncsrc = unit;
@@ -673,9 +1112,9 @@ static void __t4_set_timing_source(struct t4 *wc, int unit)
else
unit++;
for (x=0;x<4;x++)
- wc->spans[x].syncsrc = unit;
+ wc->tspans[x]->span.syncsrc = unit;
} else {
- if (debug)
+ if (debug & DEBUG_MAIN)
printk("TE410P: Timing source already set to %d\n", unit);
}
#if 0
@@ -688,11 +1127,11 @@ static void __t4_set_timing_source_auto(struct t4 *wc)
int x;
wc->checktiming = 0;
for (x=0;x<4;x++) {
- if (wc->syncs[x]) {
- if ((wc->spans[wc->syncs[x] - 1].flags & ZT_FLAG_RUNNING) &&
- !(wc->spans[wc->syncs[x] - 1].alarms & (ZT_ALARM_RED | ZT_ALARM_BLUE) )) {
- /* Valid timing sourc e*/
- __t4_set_timing_source(wc, wc->syncs[x] - 1);
+ if (wc->tspans[x]->sync) {
+ if ((wc->tspans[wc->tspans[x]->psync - 1]->span.flags & ZT_FLAG_RUNNING) &&
+ !(wc->tspans[wc->tspans[x]->psync - 1]->span.alarms & (ZT_ALARM_RED | ZT_ALARM_BLUE) )) {
+ /* Valid timing source */
+ __t4_set_timing_source(wc, wc->tspans[x]->psync - 1);
return;
}
}
@@ -747,7 +1186,7 @@ static void __t4_configure_t1(struct t4 *wc, int unit, int lineconfig, int txlev
__t4_framer_out(wc, unit, 0x3a, lim2); /* LIM2: 50% peak amplitude is a "1" */
__t4_framer_out(wc, unit, 0x38, 0x0a); /* PCD: LOS after 176 consecutive "zeros" */
__t4_framer_out(wc, unit, 0x39, 0x15); /* PCR: 22 "ones" clear LOS */
-
+
/* Generate pulse mask for T1 */
switch(mytxlevel) {
case 3:
@@ -772,6 +1211,23 @@ static void __t4_configure_t1(struct t4 *wc, int unit, int lineconfig, int txlev
__t4_framer_out(wc, unit, 0x28, 0x01); /* XPM2 */
break;
}
+
+ __t4_framer_out(wc, unit, 0x14, 0xff); /* IMR0: We care about CAS changes, etc */
+ __t4_framer_out(wc, unit, 0x15, 0xff); /* IMR1: We care about nothing */
+ __t4_framer_out(wc, unit, 0x16, 0x00); /* IMR2: We care about all the alarm stuff! */
+ if (debugslips) {
+ __t4_framer_out(wc, unit, 0x17, 0xf4); /* IMR3: We care about AIS and friends */
+ __t4_framer_out(wc, unit, 0x18, 0x3f); /* IMR4: We care about slips on transmit */
+ } else {
+ __t4_framer_out(wc, unit, 0x17, 0xf7); /* IMR3: We care about AIS and friends */
+ __t4_framer_out(wc, unit, 0x18, 0xff); /* IMR4: We don't care about slips on transmit */
+ }
+
+ if (!polling) {
+ __t4_check_alarms(wc, unit);
+ __t4_check_sigbits(wc, unit);
+ }
+
printk("TE410P: Span %d configured for %s/%s\n", unit + 1, framing, line);
}
@@ -779,6 +1235,7 @@ static void __t4_configure_e1(struct t4 *wc, int unit, int lineconfig)
{
unsigned int fmr2, fmr1, fmr0;
unsigned int cas = 0;
+ unsigned int imr3extra=0;
char *crc4 = "";
char *framing, *line;
fmr1 = 0x44; /* FMR1: E1 mode, Automatic force resync, PCM30 mode, 8.192 Mhz backplane, no XAIS */
@@ -803,6 +1260,7 @@ static void __t4_configure_e1(struct t4 *wc, int unit, int lineconfig)
}
if (lineconfig & ZT_CONFIG_CCS) {
framing = "CCS";
+ imr3extra = 0x28;
} else {
framing = "CAS";
cas = 0x40;
@@ -841,17 +1299,36 @@ static void __t4_configure_e1(struct t4 *wc, int unit, int lineconfig)
__t4_framer_out(wc, unit, 0x26, 0x54); /* XPM0 */
__t4_framer_out(wc, unit, 0x27, 0x02); /* XPM1 */
__t4_framer_out(wc, unit, 0x28, 0x00); /* XPM2 */
+
+ __t4_framer_out(wc, unit, 0x14, 0xff); /* IMR0: We care about CRC errors, CAS changes, etc */
+ __t4_framer_out(wc, unit, 0x15, 0x3f); /* IMR1: We care about loopup / loopdown */
+ __t4_framer_out(wc, unit, 0x16, 0x00); /* IMR2: We care about all the alarm stuff! */
+ if (debugslips) {
+ __t4_framer_out(wc, unit, 0x17, 0xc4 | imr3extra); /* IMR3: We care about AIS and friends */
+ __t4_framer_out(wc, unit, 0x18, 0x3f); /* IMR4: We care about slips on transmit */
+ } else {
+ __t4_framer_out(wc, unit, 0x17, 0xc7 | imr3extra); /* IMR3: We care about AIS and friends */
+ __t4_framer_out(wc, unit, 0x18, 0xff); /* IMR4: We don't care about slips on transmit */
+ }
+ if (!polling) {
+ __t4_check_alarms(wc, unit);
+ __t4_check_sigbits(wc, unit);
+ }
printk("TE410P: Span %d configured for %s/%s%s\n", unit + 1, framing, line, crc4);
}
static int t4_startup(struct zt_span *span)
{
+#ifdef SUPPORT_GEN1
int i;
+#endif
int tspan;
unsigned long flags;
int alreadyrunning;
- struct t4 *wc = span->pvt;
+ struct t4_span *ts = span->pvt;
+ struct t4 *wc = ts->owner;
+ printk("About to enter startup!\n");
tspan = span->offset + 1;
if (tspan < 0) {
printk("TE410P: Span '%d' isn't us?\n", span->spanno);
@@ -862,59 +1339,86 @@ static int t4_startup(struct zt_span *span)
alreadyrunning = span->flags & ZT_FLAG_RUNNING;
+#ifdef SUPPORT_GEN1
/* initialize the start value for the entire chunk of last ec buffer */
for(i = 0; i < span->channels; i++)
{
- memset(wc->ec_chunk1[span->offset][i],
+ memset(ts->ec_chunk1[i],
ZT_LIN2X(0,&span->chans[i]),ZT_CHUNKSIZE);
- memset(wc->ec_chunk2[span->offset][i],
+ memset(ts->ec_chunk2[i],
ZT_LIN2X(0,&span->chans[i]),ZT_CHUNKSIZE);
}
+#endif
/* Force re-evaluation fo timing source */
if (timingcable)
wc->syncsrc = -1;
- if (wc->spantype[span->offset] == TYPE_E1) { /* if this is an E1 card */
+ if (ts->spantype == TYPE_E1) { /* if this is an E1 card */
__t4_configure_e1(wc, span->offset, span->lineconfig);
} else { /* is a T1 card */
__t4_configure_t1(wc, span->offset, span->lineconfig, span->txlevel);
- __set_clear(wc, span->offset);
}
+ /* Note clear channel status */
+ __set_clear(wc, span->offset);
if (!alreadyrunning) {
span->flags |= ZT_FLAG_RUNNING;
wc->spansstarted++;
/* enable interrupts */
/* Start DMA, enabling DMA interrupts on read only */
- wc->dmactrl = 0xc0000003 | (1 << 29);
+ if (ts->spanflags & FLAG_2NDGEN)
+#ifdef VPM_SUPPORT
+ wc->dmactrl = 0xc0000000 | (1 << 29) | wc->vpm;
+#else
+ wc->dmactrl = 0xc0000000 | (1 << 29);
+#endif
+ else
+#ifdef VPM_SUPPORT
+ wc->dmactrl = 0xc0000003 | (1 << 29) | wc->vpm;
+#else
+ wc->dmactrl = 0xc0000003 | (1 << 29);
+#endif
if (noburst)
wc->dmactrl |= (1 << 26);
__t4_pci_out(wc, WC_DMACTRL, wc->dmactrl);
+ if (!polling) {
+ __t4_check_alarms(wc, span->offset);
+ __t4_check_sigbits(wc, span->offset);
+ }
}
spin_unlock_irqrestore(&wc->reglock, flags);
- if (wc->syncs[0] == span->spanno) printk("SPAN %d: Primary Sync Source\n",span->spanno);
- if (wc->syncs[1] == span->spanno) printk("SPAN %d: Secondary Sync Source\n",span->spanno);
- if (wc->syncs[2] == span->spanno) printk("SPAN %d: Tertiary Sync Source\n",span->spanno);
- if (wc->syncs[3] == span->spanno) printk("SPAN %d: Quaternary Sync Source\n",span->spanno);
+ if (wc->tspans[0]->sync == span->spanno) printk("SPAN %d: Primary Sync Source\n",span->spanno);
+ if (wc->tspans[1]->sync == span->spanno) printk("SPAN %d: Secondary Sync Source\n",span->spanno);
+ if (wc->tspans[2]->sync == span->spanno) printk("SPAN %d: Tertiary Sync Source\n",span->spanno);
+ if (wc->tspans[3]->sync == span->spanno) printk("SPAN %d: Quaternary Sync Source\n",span->spanno);
+#ifdef VPM_SUPPORT
+ if (!alreadyrunning && !wc->vpm) {
+ wait_a_little();
+ t4_vpm_init(wc);
+ }
+#endif
+ printk("Completed startup!\n");
return 0;
}
+#ifdef SUPPORT_GEN1
static inline void e1_check(struct t4 *wc, int span, int val)
{
- if ((wc->spans[span].channels > 24) &&
- (wc->spans[span].flags & ZT_FLAG_RUNNING) &&
- !(wc->spans[span].alarms) &&
+ struct t4_span *ts = wc->tspans[span];
+ if ((ts->span.channels > 24) &&
+ (ts->span.flags & ZT_FLAG_RUNNING) &&
+ !(ts->span.alarms) &&
(!wc->e1recover)) {
if (val != 0x1b) {
- wc->e1check[span]++;
+ ts->e1check++;
} else
- wc->e1check[span] = 0;
- if (wc->e1check[span] > 100) {
+ ts->e1check = 0;
+ if (ts->e1check > 100) {
/* Wait 1000 ms */
wc->e1recover = 1000 * 8;
- memset(wc->e1check, 0, sizeof(wc->e1check));
- if (debug)
+ wc->tspans[0]->e1check = wc->tspans[1]->e1check = wc->tspans[2]->e1check = wc->tspans[3]->e1check = 0;
+ if (debug & DEBUG_MAIN)
printk("Detected loss of E1 alignment on span %d!\n", span);
t4_reset_dma(wc);
}
@@ -944,18 +1448,18 @@ static void t4_receiveprep(struct t4 *wc, int irq)
}
if (dbl) {
for (x=0;x<4;x++)
- wc->spans[x].irqmisses++;
- if (debug)
+ wc->tspans[x]->irqmisses++;
+ if (debug & DEBUG_MAIN)
printk("TE410P: Double/missed interrupt detected\n");
}
for (x=0;x<ZT_CHUNKSIZE;x++) {
for (z=0;z<24;z++) {
/* All T1/E1 channels */
tmp = readchunk[z+1+offset];
- wc->spans[3].chans[z].readchunk[x] = tmp & 0xff;
- wc->spans[2].chans[z].readchunk[x] = (tmp & 0xff00) >> 8;
- wc->spans[1].chans[z].readchunk[x] = (tmp & 0xff0000) >> 16;
- wc->spans[0].chans[z].readchunk[x] = tmp >> 24;
+ wc->tspans[3]->span.chans[z].readchunk[x] = tmp & 0xff;
+ wc->tspans[2]->span.chans[z].readchunk[x] = (tmp & 0xff00) >> 8;
+ wc->tspans[1]->span.chans[z].readchunk[x] = (tmp & 0xff0000) >> 16;
+ wc->tspans[0]->span.chans[z].readchunk[x] = tmp >> 24;
}
if (wc->t1e1) {
if (wc->e1recover > 0)
@@ -968,37 +1472,110 @@ static void t4_receiveprep(struct t4 *wc, int irq)
for (z=24;z<31;z++) {
/* Only E1 channels now */
tmp = readchunk[z+1];
- if (wc->spans[3].channels > 24)
- wc->spans[3].chans[z].readchunk[x] = tmp & 0xff;
- if (wc->spans[2].channels > 24)
- wc->spans[2].chans[z].readchunk[x] = (tmp & 0xff00) >> 8;
- if (wc->spans[1].channels > 24)
- wc->spans[1].chans[z].readchunk[x] = (tmp & 0xff0000) >> 16;
- if (wc->spans[0].channels > 24)
- wc->spans[0].chans[z].readchunk[x] = tmp >> 24;
+ if (wc->tspans[3]->span.channels > 24)
+ wc->tspans[3]->span.chans[z].readchunk[x] = tmp & 0xff;
+ if (wc->tspans[2]->span.channels > 24)
+ wc->tspans[2]->span.chans[z].readchunk[x] = (tmp & 0xff00) >> 8;
+ if (wc->tspans[1]->span.channels > 24)
+ wc->tspans[1]->span.chans[z].readchunk[x] = (tmp & 0xff0000) >> 16;
+ if (wc->tspans[0]->span.channels > 24)
+ wc->tspans[0]->span.chans[z].readchunk[x] = tmp >> 24;
}
}
/* Advance pointer by 4 TDM frame lengths */
readchunk += 32;
}
for (x=0;x<4;x++) {
- if (wc->spans[x].flags & ZT_FLAG_RUNNING) {
- for (y=0;y<wc->spans[x].channels;y++) {
+ if (wc->tspans[x]->span.flags & ZT_FLAG_RUNNING) {
+ for (y=0;y<wc->tspans[x]->span.channels;y++) {
/* Echo cancel double buffered data */
- zt_ec_chunk(&wc->spans[x].chans[y],
- wc->spans[x].chans[y].readchunk,
- wc->ec_chunk2[x][y]);
- memcpy(wc->ec_chunk2[x][y],wc->ec_chunk1[x][y],
+ zt_ec_chunk(&wc->tspans[x]->span.chans[y],
+ wc->tspans[x]->span.chans[y].readchunk,
+ wc->tspans[x]->ec_chunk2[y]);
+ memcpy(wc->tspans[x]->ec_chunk2[y],wc->tspans[x]->ec_chunk1[y],
ZT_CHUNKSIZE);
- memcpy(wc->ec_chunk1[x][y],
- wc->spans[x].chans[y].writechunk,
+ memcpy(wc->tspans[x]->ec_chunk1[y],
+ wc->tspans[x]->span.chans[y].writechunk,
ZT_CHUNKSIZE);
}
- zt_receive(&wc->spans[x]);
+ zt_receive(&wc->tspans[x]->span);
+ }
+ }
+}
+#endif
+
+#if (ZT_CHUNKSIZE != 8)
+#error Sorry, nextgen does not support chunksize != 8
+#endif
+
+static inline void __receive_span(struct t4_span *ts)
+{
+#ifdef VPM_SUPPORT
+ int y;
+ unsigned int merged;
+ if ((merged = ts->dtmfactive & ts->dtmfmutemask)) {
+ for (y=0;y<ts->span.channels;y++) {
+ /* Mute any DTMFs which are supposed to be muted */
+ if (merged & (1 << y))
+ memset(ts->span.chans[y].readchunk, ZT_XLAW(0, (ts->span.chans + y)), ZT_CHUNKSIZE);
}
}
+#endif
+
+#ifdef ENABLE_PREFETCH
+ prefetch((void *)(ts->readchunk));
+ prefetch((void *)(ts->writechunk));
+ prefetch((void *)(ts->readchunk + 8));
+ prefetch((void *)(ts->writechunk + 8));
+ prefetch((void *)(ts->readchunk + 16));
+ prefetch((void *)(ts->writechunk + 16));
+ prefetch((void *)(ts->readchunk + 24));
+ prefetch((void *)(ts->writechunk + 24));
+ prefetch((void *)(ts->readchunk + 32));
+ prefetch((void *)(ts->writechunk + 32));
+ prefetch((void *)(ts->readchunk + 40));
+ prefetch((void *)(ts->writechunk + 40));
+ prefetch((void *)(ts->readchunk + 48));
+ prefetch((void *)(ts->writechunk + 48));
+ prefetch((void *)(ts->readchunk + 56));
+ prefetch((void *)(ts->writechunk + 56));
+#endif
+
+ zt_ec_span(&ts->span);
+ zt_receive(&ts->span);
+}
+
+static inline void __transmit_span(struct t4_span *ts)
+{
+ zt_transmit(&ts->span);
}
+#ifdef ENABLE_WORKQUEUES
+static void workq_handlespan(void *data)
+{
+ struct t4_span *ts = data;
+ struct t4 *wc = ts->owner;
+
+ __receive_span(ts);
+ __transmit_span(ts);
+ atomic_dec(&wc->worklist);
+ if (!atomic_read(&wc->worklist))
+ t4_pci_out(wc, WC_INTR, 0);
+}
+#else
+static void t4_prep_gen2(struct t4 *wc)
+{
+ int x;
+ for (x=0;x<4;x++) {
+ if (wc->tspans[x]->span.flags & ZT_FLAG_RUNNING) {
+ __receive_span(wc->tspans[x]);
+ __transmit_span(wc->tspans[x]);
+ }
+ }
+}
+
+#endif
+#ifdef SUPPORT_GEN1
static void t4_transmitprep(struct t4 *wc, int irq)
{
volatile unsigned int *writechunk;
@@ -1011,35 +1588,35 @@ static void t4_transmitprep(struct t4 *wc, int irq)
/* First part */
writechunk = wc->writechunk + 1;
} else {
- writechunk = wc->writechunk + ZT_CHUNKSIZE * 32 + 1;
+ writechunk = wc->writechunk + ZT_CHUNKSIZE * 32 + 1;
}
for (y=0;y<4;y++) {
- if (wc->spans[y].flags & ZT_FLAG_RUNNING)
- zt_transmit(&wc->spans[y]);
+ if (wc->tspans[y]->span.flags & ZT_FLAG_RUNNING)
+ zt_transmit(&wc->tspans[y]->span);
}
for (x=0;x<ZT_CHUNKSIZE;x++) {
/* Once per chunk */
for (z=0;z<24;z++) {
/* All T1/E1 channels */
- tmp = (wc->spans[3].chans[z].writechunk[x]) |
- (wc->spans[2].chans[z].writechunk[x] << 8) |
- (wc->spans[1].chans[z].writechunk[x] << 16) |
- (wc->spans[0].chans[z].writechunk[x] << 24);
+ tmp = (wc->tspans[3]->span.chans[z].writechunk[x]) |
+ (wc->tspans[2]->span.chans[z].writechunk[x] << 8) |
+ (wc->tspans[1]->span.chans[z].writechunk[x] << 16) |
+ (wc->tspans[0]->span.chans[z].writechunk[x] << 24);
writechunk[z+offset] = tmp;
}
if (wc->t1e1) {
for (z=24;z<31;z++) {
/* Only E1 channels now */
tmp = 0;
- if (wc->spans[3].channels > 24)
- tmp |= wc->spans[3].chans[z].writechunk[x];
- if (wc->spans[2].channels > 24)
- tmp |= (wc->spans[2].chans[z].writechunk[x] << 8);
- if (wc->spans[1].channels > 24)
- tmp |= (wc->spans[1].chans[z].writechunk[x] << 16);
- if (wc->spans[0].channels > 24)
- tmp |= (wc->spans[0].chans[z].writechunk[x] << 24);
+ if (wc->tspans[3]->span.channels > 24)
+ tmp |= wc->tspans[3]->span.chans[z].writechunk[x];
+ if (wc->tspans[2]->span.channels > 24)
+ tmp |= (wc->tspans[2]->span.chans[z].writechunk[x] << 8);
+ if (wc->tspans[1]->span.channels > 24)
+ tmp |= (wc->tspans[1]->span.chans[z].writechunk[x] << 16);
+ if (wc->tspans[0]->span.channels > 24)
+ tmp |= (wc->tspans[0]->span.chans[z].writechunk[x] << 24);
writechunk[z] = tmp;
}
}
@@ -1048,50 +1625,56 @@ static void t4_transmitprep(struct t4 *wc, int irq)
}
}
+#endif
static void __t4_check_sigbits(struct t4 *wc, int span)
{
int a,i,rxs;
- if (!(wc->spans[span].flags & ZT_FLAG_RUNNING))
+ struct t4_span *ts = wc->tspans[span];
+
+ if (debug & DEBUG_RBS)
+ printk("Checking sigbits on span %d\n", span + 1);
+
+ if (!(ts->span.flags & ZT_FLAG_RUNNING))
return;
- if (wc->spantype[span] == TYPE_E1) {
+ if (ts->spantype == TYPE_E1) {
for (i = 0; i < 15; i++) {
a = __t4_framer_in(wc, span, 0x71 + i);
/* Get high channel in low bits */
rxs = (a & 0xf);
- if (!(wc->spans[span].chans[i+16].sig & ZT_SIG_CLEAR)) {
- if (wc->spans[span].chans[i+16].rxsig != rxs)
- zt_rbsbits(&wc->spans[span].chans[i+16], rxs);
+ if (!(ts->span.chans[i+16].sig & ZT_SIG_CLEAR)) {
+ if (ts->span.chans[i+16].rxsig != rxs)
+ zt_rbsbits(&ts->span.chans[i+16], rxs);
}
rxs = (a >> 4) & 0xf;
- if (!(wc->spans[span].chans[i].sig & ZT_SIG_CLEAR)) {
- if (wc->spans[span].chans[i].rxsig != rxs)
- zt_rbsbits(&wc->spans[span].chans[i], rxs);
+ if (!(ts->span.chans[i].sig & ZT_SIG_CLEAR)) {
+ if (ts->span.chans[i].rxsig != rxs)
+ zt_rbsbits(&ts->span.chans[i], rxs);
}
}
- } else if (wc->spans[span].lineconfig & ZT_CONFIG_D4) {
+ } else if (ts->span.lineconfig & ZT_CONFIG_D4) {
for (i = 0; i < 24; i+=4) {
a = __t4_framer_in(wc, span, 0x70 + (i>>2));
/* Get high channel in low bits */
rxs = (a & 0x3) << 2;
- if (!(wc->spans[span].chans[i+3].sig & ZT_SIG_CLEAR)) {
- if (wc->spans[span].chans[i+3].rxsig != rxs)
- zt_rbsbits(&wc->spans[span].chans[i+3], rxs);
+ if (!(ts->span.chans[i+3].sig & ZT_SIG_CLEAR)) {
+ if (ts->span.chans[i+3].rxsig != rxs)
+ zt_rbsbits(&ts->span.chans[i+3], rxs);
}
rxs = (a & 0xc);
- if (!(wc->spans[span].chans[i+2].sig & ZT_SIG_CLEAR)) {
- if (wc->spans[span].chans[i+2].rxsig != rxs)
- zt_rbsbits(&wc->spans[span].chans[i+2], rxs);
+ if (!(ts->span.chans[i+2].sig & ZT_SIG_CLEAR)) {
+ if (ts->span.chans[i+2].rxsig != rxs)
+ zt_rbsbits(&ts->span.chans[i+2], rxs);
}
rxs = (a >> 2) & 0xc;
- if (!(wc->spans[span].chans[i+1].sig & ZT_SIG_CLEAR)) {
- if (wc->spans[span].chans[i+1].rxsig != rxs)
- zt_rbsbits(&wc->spans[span].chans[i+1], rxs);
+ if (!(ts->span.chans[i+1].sig & ZT_SIG_CLEAR)) {
+ if (ts->span.chans[i+1].rxsig != rxs)
+ zt_rbsbits(&ts->span.chans[i+1], rxs);
}
rxs = (a >> 4) & 0xc;
- if (!(wc->spans[span].chans[i].sig & ZT_SIG_CLEAR)) {
- if (wc->spans[span].chans[i].rxsig != rxs)
- zt_rbsbits(&wc->spans[span].chans[i], rxs);
+ if (!(ts->span.chans[i].sig & ZT_SIG_CLEAR)) {
+ if (ts->span.chans[i].rxsig != rxs)
+ zt_rbsbits(&ts->span.chans[i], rxs);
}
}
} else {
@@ -1099,27 +1682,18 @@ static void __t4_check_sigbits(struct t4 *wc, int span)
a = __t4_framer_in(wc, span, 0x70 + (i>>1));
/* Get high channel in low bits */
rxs = (a & 0xf);
- if (!(wc->spans[span].chans[i+1].sig & ZT_SIG_CLEAR)) {
- if (wc->spans[span].chans[i+1].rxsig != rxs)
- zt_rbsbits(&wc->spans[span].chans[i+1], rxs);
+ if (!(ts->span.chans[i+1].sig & ZT_SIG_CLEAR)) {
+ /* XXX Not really reset on every trans! XXX */
+ if (ts->span.chans[i+1].rxsig != rxs) {
+ zt_rbsbits(&ts->span.chans[i+1], rxs);
+ }
}
rxs = (a >> 4) & 0xf;
- if (!(wc->spans[span].chans[i].sig & ZT_SIG_CLEAR)) {
- if (wc->spans[span].chans[i].rxsig != rxs)
- zt_rbsbits(&wc->spans[span].chans[i], rxs);
- }
- }
- }
-}
-
-static void __t4_do_counters(struct t4 *wc)
-{
- int span;
- for (span=0;span<4;span++) {
- if (wc->alarmtimer[span]) {
- if (!--wc->alarmtimer[span]) {
- wc->spans[span].alarms &= ~(ZT_ALARM_RECOVER);
- zt_alarm_notify(&wc->spans[span]);
+ if (!(ts->span.chans[i].sig & ZT_SIG_CLEAR)) {
+ /* XXX Not really reset on every trans! XXX */
+ if (ts->span.chans[i].rxsig != rxs) {
+ zt_rbsbits(&ts->span.chans[i], rxs);
+ }
}
}
}
@@ -1130,8 +1704,9 @@ static void __t4_check_alarms(struct t4 *wc, int span)
unsigned char c,d;
int alarms;
int x,j;
+ struct t4_span *ts = wc->tspans[span];
- if (!(wc->spans[span].flags & ZT_FLAG_RUNNING))
+ if (!(ts->span.flags & ZT_FLAG_RUNNING))
return;
c = __t4_framer_in(wc, span, 0x4c);
@@ -1141,82 +1716,82 @@ static void __t4_check_alarms(struct t4 *wc, int span)
alarms = 0;
/* And consider only carrier alarms */
- wc->spans[span].alarms &= (ZT_ALARM_RED | ZT_ALARM_BLUE | ZT_ALARM_NOTOPEN);
+ ts->span.alarms &= (ZT_ALARM_RED | ZT_ALARM_BLUE | ZT_ALARM_NOTOPEN);
- if (wc->spantype[span] == TYPE_E1) {
+ if (ts->spantype == TYPE_E1) {
if (c & 0x04) {
/* No multiframe found, force RAI high after 400ms only if
we haven't found a multiframe since last loss
of frame */
- if (!(wc->spanflags[span] & FLAG_NMF)) {
+ if (!(ts->spanflags & FLAG_NMF)) {
__t4_framer_out(wc, span, 0x20, 0x9f | 0x20); /* LIM0: Force RAI High */
- wc->spanflags[span] |= FLAG_NMF;
+ ts->spanflags |= FLAG_NMF;
printk("NMF workaround on!\n");
}
__t4_framer_out(wc, span, 0x1e, 0xc3); /* Reset to CRC4 mode */
__t4_framer_out(wc, span, 0x1c, 0xf2); /* Force Resync */
__t4_framer_out(wc, span, 0x1c, 0xf0); /* Force Resync */
} else if (!(c & 0x02)) {
- if ((wc->spanflags[span] & FLAG_NMF)) {
+ if ((ts->spanflags & FLAG_NMF)) {
__t4_framer_out(wc, span, 0x20, 0x9f); /* LIM0: Clear forced RAI */
- wc->spanflags[span] &= ~FLAG_NMF;
+ ts->spanflags &= ~FLAG_NMF;
printk("NMF workaround off!\n");
}
}
} else {
/* Detect loopup code if we're not sending one */
- if ((!wc->spans[span].mainttimer) && (d & 0x08)) {
+ if ((!ts->span.mainttimer) && (d & 0x08)) {
/* Loop-up code detected */
- if ((wc->loopupcnt[span]++ > 80) && (wc->spans[span].maintstat != ZT_MAINT_REMOTELOOP)) {
+ if ((ts->loopupcnt++ > 80) && (ts->span.maintstat != ZT_MAINT_REMOTELOOP)) {
__t4_framer_out(wc, span, 0x36, 0x08); /* LIM0: Disable any local loop */
__t4_framer_out(wc, span, 0x37, 0xf6 ); /* LIM1: Enable remote loop */
- wc->spans[span].maintstat = ZT_MAINT_REMOTELOOP;
+ ts->span.maintstat = ZT_MAINT_REMOTELOOP;
}
} else
- wc->loopupcnt[span] = 0;
+ ts->loopupcnt = 0;
/* Same for loopdown code */
- if ((!wc->spans[span].mainttimer) && (d & 0x10)) {
+ if ((!ts->span.mainttimer) && (d & 0x10)) {
/* Loop-down code detected */
- if ((wc->loopdowncnt[span]++ > 80) && (wc->spans[span].maintstat == ZT_MAINT_REMOTELOOP)) {
+ if ((ts->loopdowncnt++ > 80) && (ts->span.maintstat == ZT_MAINT_REMOTELOOP)) {
__t4_framer_out(wc, span, 0x36, 0x08); /* LIM0: Disable any local loop */
__t4_framer_out(wc, span, 0x37, 0xf0 ); /* LIM1: Disable remote loop */
- wc->spans[span].maintstat = ZT_MAINT_NONE;
+ ts->span.maintstat = ZT_MAINT_NONE;
}
} else
- wc->loopdowncnt[span] = 0;
+ ts->loopdowncnt = 0;
}
- if (wc->spans[span].lineconfig & ZT_CONFIG_NOTOPEN) {
- for (x=0,j=0;x < wc->spans[span].channels;x++)
- if ((wc->spans[span].chans[x].flags & ZT_FLAG_OPEN) ||
- (wc->spans[span].chans[x].flags & ZT_FLAG_NETDEV))
+ if (ts->span.lineconfig & ZT_CONFIG_NOTOPEN) {
+ for (x=0,j=0;x < ts->span.channels;x++)
+ if ((ts->span.chans[x].flags & ZT_FLAG_OPEN) ||
+ (ts->span.chans[x].flags & ZT_FLAG_NETDEV))
j++;
if (!j)
alarms |= ZT_ALARM_NOTOPEN;
}
if (c & 0xa0) {
- if (wc->alarmcount[span] >= alarmdebounce)
+ if (ts->alarmcount >= alarmdebounce)
alarms |= ZT_ALARM_RED;
else
- wc->alarmcount[span]++;
+ ts->alarmcount++;
} else
- wc->alarmcount[span] = 0;
+ ts->alarmcount = 0;
if (c & 0x4)
alarms |= ZT_ALARM_BLUE;
- if (((!wc->spans[span].alarms) && alarms) ||
- (wc->spans[span].alarms && (!alarms)))
+ if (((!ts->span.alarms) && alarms) ||
+ (ts->span.alarms && (!alarms)))
wc->checktiming = 1;
/* Keep track of recovering */
- if ((!alarms) && wc->spans[span].alarms)
- wc->alarmtimer[span] = ZT_ALARMSETTLE_TIME;
- if (wc->alarmtimer[span])
+ if ((!alarms) && ts->span.alarms)
+ ts->alarmtimer = ZT_ALARMSETTLE_TIME;
+ if (ts->alarmtimer)
alarms |= ZT_ALARM_RECOVER;
/* If receiving alarms, go into Yellow alarm state */
- if (alarms && !(wc->spanflags[span] & FLAG_SENDINGYELLOW)) {
+ if (alarms && !(ts->spanflags & FLAG_SENDINGYELLOW)) {
unsigned char fmr4;
#if 1
printk("wct4xxp: Setting yellow alarm on span %d\n", span + 1);
@@ -1224,8 +1799,8 @@ static void __t4_check_alarms(struct t4 *wc, int span)
/* We manually do yellow alarm to handle RECOVER and NOTOPEN, otherwise it's auto anyway */
fmr4 = __t4_framer_in(wc, span, 0x20);
__t4_framer_out(wc, span, 0x20, fmr4 | 0x20);
- wc->spanflags[span] |= FLAG_SENDINGYELLOW;
- } else if ((!alarms) && (wc->spanflags[span] & FLAG_SENDINGYELLOW)) {
+ ts->spanflags |= FLAG_SENDINGYELLOW;
+ } else if ((!alarms) && (ts->spanflags & FLAG_SENDINGYELLOW)) {
unsigned char fmr4;
#if 1
printk("wct4xxp: Clearing yellow alarm on span %d\n", span + 1);
@@ -1233,17 +1808,33 @@ static void __t4_check_alarms(struct t4 *wc, int span)
/* We manually do yellow alarm to handle RECOVER */
fmr4 = __t4_framer_in(wc, span, 0x20);
__t4_framer_out(wc, span, 0x20, fmr4 & ~0x20);
- wc->spanflags[span] &= ~FLAG_SENDINGYELLOW;
+ ts->spanflags &= ~FLAG_SENDINGYELLOW;
}
/* Re-check the timing source when we enter/leave alarm, not withstanding
yellow alarm */
if (c & 0x10)
alarms |= ZT_ALARM_YELLOW;
- if (wc->spans[span].mainttimer || wc->spans[span].maintstat)
+ if (ts->span.mainttimer || ts->span.maintstat)
alarms |= ZT_ALARM_LOOPBACK;
- wc->spans[span].alarms = alarms;
- zt_alarm_notify(&wc->spans[span]);
+ ts->span.alarms = alarms;
+ zt_alarm_notify(&ts->span);
+}
+
+static void __t4_do_counters(struct t4 *wc)
+{
+ int span;
+ for (span=0;span<4;span++) {
+ struct t4_span *ts = wc->tspans[span];
+ if (ts->alarmtimer) {
+ if (!--ts->alarmtimer) {
+ ts->span.alarms &= ~(ZT_ALARM_RECOVER);
+ if (!polling)
+ __t4_check_alarms(wc, span);
+ zt_alarm_notify(&ts->span);
+ }
+ }
+ }
}
static inline void __handle_leds(struct t4 *wc)
@@ -1252,8 +1843,9 @@ static inline void __handle_leds(struct t4 *wc)
wc->blinktimer++;
for (x=0;x<4;x++) {
- if (wc->spans[x].flags & ZT_FLAG_RUNNING) {
- if (wc->spans[x].alarms & (ZT_ALARM_RED | ZT_ALARM_BLUE)) {
+ struct t4_span *ts = wc->tspans[x];
+ if (ts->span.flags & ZT_FLAG_RUNNING) {
+ if (ts->span.alarms & (ZT_ALARM_RED | ZT_ALARM_BLUE)) {
#ifdef FANCY_ALARM
if (wc->blinktimer == (altab[wc->alarmpos] >> 1)) {
__t4_set_led(wc, x, WC_RED);
@@ -1268,10 +1860,10 @@ static inline void __handle_leds(struct t4 *wc)
__t4_set_led(wc, x, WC_OFF);
}
#endif
- } else if (wc->spans[x].alarms & ZT_ALARM_YELLOW) {
+ } else if (ts->span.alarms & ZT_ALARM_YELLOW) {
/* Yellow Alarm */
__t4_set_led(wc, x, WC_YELLOW);
- } else if (wc->spans[x].mainttimer || wc->spans[x].maintstat) {
+ } else if (ts->span.mainttimer || ts->span.maintstat) {
#ifdef FANCY_ALARM
if (wc->blinktimer == (altab[wc->alarmpos] >> 1)) {
__t4_set_led(wc, x, WC_GREEN);
@@ -1307,6 +1899,7 @@ static inline void __handle_leds(struct t4 *wc)
#endif
}
+#ifdef SUPPORT_GEN1
#ifdef LINUX26
static irqreturn_t t4_interrupt(int irq, void *dev_id, struct pt_regs *regs)
#else
@@ -1399,7 +1992,242 @@ static void t4_interrupt(int irq, void *dev_id, struct pt_regs *regs)
return IRQ_RETVAL(1);
#endif
}
+#endif
+
+static inline void __t4_framer_interrupt(struct t4 *wc, int span)
+{
+ /* Check interrupts for a given span */
+ unsigned char gis, isr0=0, isr1=0, isr2=0, isr3=0, isr4;
+ struct t4_span *ts;
+
+ if (debug & DEBUG_FRAMER)
+ printk("framer interrupt span %d:%d!\n", wc->num, span + 1);
+ ts = wc->tspans[span];
+
+ gis = __t4_framer_in(wc, span, 0x6e);
+
+ if (ts->spantype == TYPE_E1) {
+ /* E1 checks */
+ if (gis & 0x1)
+ isr0 = __t4_framer_in(wc, span, 0x68);
+ if (gis & 0x2)
+ isr1 = __t4_framer_in(wc, span, 0x69);
+ if (gis & 0x4)
+ isr2 = __t4_framer_in(wc, span, 0x6a);
+ if (gis & 0x8)
+ isr3 = __t4_framer_in(wc, span, 0x6b);
+
+
+ if (isr0)
+ __t4_check_sigbits(wc, span);
+
+ if ((isr3 & 0x38) || isr2 || isr1)
+ __t4_check_alarms(wc, span);
+ if (debug & DEBUG_FRAMER)
+ printk("gis: %02x, isr0: %02x, isr1: %02x, isr2: %02x, isr3: %02x\n", gis, isr0, isr1, isr2, isr3);
+ } else {
+ /* T1 checks */
+ if (gis & 0x1)
+ isr0 = __t4_framer_in(wc, span, 0x68);
+ if (gis & 0x4)
+ isr2 = __t4_framer_in(wc, span, 0x6a);
+ if (gis & 0x8)
+ isr3 = __t4_framer_in(wc, span, 0x6b);
+
+ if (isr0)
+ __t4_check_sigbits(wc, span);
+ if (isr2 || (isr3 & 0x08))
+ __t4_check_alarms(wc, span);
+ if (debug & DEBUG_FRAMER)
+ printk("gis: %02x, isr0: %02x, isr1: %02x, irs2: %02x, isr3: %02x\n", gis, isr0, isr1, isr2, isr3);
+ }
+ if (debugslips && !ts->span.alarms) {
+ if (isr3 & 0x02)
+ printk("TE410P: RECEIVE slip NEGATIVE on span %d\n", span + 1);
+ if (isr3 & 0x01)
+ printk("TE410P: RECEIVE slip POSITIVE on span %d\n", span + 1);
+ if (gis & 0x10)
+ isr4 = __t4_framer_in(wc, span, 0x6c);
+ else
+ isr4 = 0;
+ if (isr4 & 0x80)
+ printk("TE410P: TRANSMIT slip POSITIVE on span %d\n", span + 1);
+ if (isr4 & 0x40)
+ printk("TE410P: TRANSMIT slip NEGATIVE on span %d\n", span + 1);
+ }
+}
+
+#ifdef LINUX26
+static irqreturn_t t4_interrupt_gen2(int irq, void *dev_id, struct pt_regs *regs)
+#else
+static void t4_interrupt_gen2(int irq, void *dev_id, struct pt_regs *regs)
+#endif
+{
+ struct t4 *wc = dev_id;
+ unsigned long flags;
+ unsigned char cis;
+ int x;
+
+ unsigned int status;
+#if 0
+ unsigned int status2;
+#endif
+
+#if 0
+ if (wc->intcount < 20)
+ printk("2G: Pre-interrupt\n");
+#endif
+
+ inirq = 1;
+ /* Make sure it's really for us */
+ status = t4_pci_in(wc, WC_INTR);
+#if 1
+ t4_pci_out(wc, WC_INTR, status & 0x00000008);
+#endif
+
+ /* Ignore if it's not for us */
+ if (!(status & 0x7))
+#ifdef LINUX26
+ return IRQ_NONE;
+#else
+ return;
+#endif
+
+ if (!wc->spansstarted) {
+ printk("Not prepped yet!\n");
+#ifdef LINUX26
+ return IRQ_NONE;
+#else
+ return;
+#endif
+ }
+
+ wc->intcount++;
+#if 1
+ if (wc->intcount < 20)
+ printk("2G: Got interrupt, status = %08x, GIS = %04x\n", status, __t4_framer_in(wc, 0, 0x6f));
+#endif
+
+ if (status & 0x2) {
+#ifdef ENABLE_WORKQUEUES
+ int cpus = num_online_cpus();
+ atomic_set(&wc->worklist, 4);
+ if (wc->tspans[0]->span.flags & ZT_FLAG_RUNNING)
+ t4_queue_work(wc->workq, &wc->tspans[0]->swork, 0);
+ else
+ atomic_dec(&wc->worklist);
+ if (wc->tspans[1]->span.flags & ZT_FLAG_RUNNING)
+ t4_queue_work(wc->workq, &wc->tspans[1]->swork, 1 % cpus);
+ else
+ atomic_dec(&wc->worklist);
+ if (wc->tspans[2]->span.flags & ZT_FLAG_RUNNING)
+ t4_queue_work(wc->workq, &wc->tspans[2]->swork, 2 % cpus);
+ else
+ atomic_dec(&wc->worklist);
+ if (wc->tspans[3]->span.flags & ZT_FLAG_RUNNING)
+ t4_queue_work(wc->workq, &wc->tspans[3]->swork, 3 % cpus);
+ else
+ atomic_dec(&wc->worklist);
+#else
+ t4_prep_gen2(wc);
+#endif
+ }
+
+ spin_lock_irqsave(&wc->reglock, flags);
+
+ if (status & 0x2)
+ __t4_do_counters(wc);
+
+ if (polling && (status & 0x2)) {
+ x = wc->intcount & 15 /* 63 */;
+ switch(x) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ __t4_check_sigbits(wc, x);
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ __t4_check_alarms(wc, x - 4);
+ break;
+ }
+ } else if (status & 0x1) {
+ cis = __t4_framer_in(wc, 0, 0x6f);
+ if (cis & 0x1)
+ __t4_framer_interrupt(wc, 0);
+ if (cis & 0x2)
+ __t4_framer_interrupt(wc, 1);
+ if (cis & 0x4)
+ __t4_framer_interrupt(wc, 2);
+ if (cis & 0x8)
+ __t4_framer_interrupt(wc, 3);
+ }
+#ifdef VPM_SUPPORT
+ if (wc->vpm) {
+ if (!(wc->intcount % 16)) {
+ /* Check DTMF events */
+ if (wc->vpm) {
+ int span = (wc->intcount >> 4) & 0x3;
+ int y;
+ short energy;
+ int offset = 1;
+ int chip;
+ int channel;
+ struct t4_span *ts = wc->tspans[span];
+ if (!wc->t1e1)
+ offset = 5;
+ if (ts->dtmfactive) {
+ for (y = 0; y < ts->span.channels; y++) {
+ if (ts->dtmfactive & (1 << y)) {
+ channel = y + offset;
+ chip = span + ((channel & 0x1) << 2);
+ /* Have an active channel, check its energy! */
+ energy = __t4_vpm_in(wc, chip, 0x58 + channel);
+ energy = ZT_XLAW(energy, ts->span.chans);
+ if (energy < (ts->dtmfenergy[y])) {
+ if (debug & DEBUG_DTMF)
+ printk("Finished digit on span %d, channel %d (energy = %02x < %02x) 'channel' %d, chip %d!\n", span, y + 1, energy, ts->dtmfenergy[y], channel, chip);
+ if (debug & DEBUG_DTMF)
+ printk("Finished digit '%c' on channel %d of span %d\n", ts->dtmfdigit[y], y + 1, span);
+ if (ts->dtmfmask & (1 << y))
+ zt_qevent_lock(&ts->span.chans[y], (ZT_EVENT_DTMFUP | ts->dtmfdigit[y]));
+ ts->dtmfenergy[y] = 0;
+ ts->dtmfdigit[y] = 0;
+ ts->dtmfactive &= ~(1 << y);
+ } else if (energy > (ts->dtmfenergy[y])) {
+ if (debug & DEBUG_DTMF)
+ printk("Increasing digit energy on span %d, channel %d (energy = %02x > %02x)!\n", span, y + 1, energy, ts->dtmfenergy[y]);
+ ts->dtmfenergy[y] = energy;
+ }
+ }
+ }
+ }
+ }
+ } else if ((status & 0xff00) != 0xff00)
+ __t4_check_vpm(wc, (status & 0xff00) >> 8);
+ }
+#endif
+
+#if 1
+ __handle_leds(wc);
+#endif
+
+ if (wc->checktiming > 0)
+ __t4_set_timing_source_auto(wc);
+ spin_unlock_irqrestore(&wc->reglock, flags);
+
+#ifndef ENABLE_WORKQUEUES
+ t4_pci_out(wc, WC_INTR, 0);
+#endif
+#ifdef LINUX26
+ return IRQ_RETVAL(1);
+#endif
+}
+#ifdef SUPPORT_GEN1
static int t4_reset_dma(struct t4 *wc)
{
/* Turn off DMA and such */
@@ -1414,21 +2242,187 @@ static int t4_reset_dma(struct t4 *wc)
t4_pci_out(wc, WC_WRADDR, wc->writedma);
t4_pci_out(wc, WC_COUNT, ((ZT_MAX_CHUNKSIZE * 2 * 32 - 1) << 18) | ((ZT_MAX_CHUNKSIZE * 2 * 32 - 1) << 2));
t4_pci_out(wc, WC_INTR, 0);
- wc->dmactrl = 0xc0000003 | (1 << 29);
+#ifdef VPM_SUPPORT
+ wc->dmactrl = 0xc0000000 | (1 << 29) | wc->vpm;
+#else
+ wc->dmactrl = 0xc0000000 | (1 << 29);
+#endif
if (noburst)
wc->dmactrl |= (1 << 26);
t4_pci_out(wc, WC_DMACTRL, wc->dmactrl);
return 0;
}
+#endif
-static int t4_hardware_init(struct t4 *wc)
+#ifdef VPM_SUPPORT
+static void t4_vpm_init(struct t4 *wc)
+{
+ unsigned char reg;
+ unsigned int mask;
+ unsigned int ver;
+ int i,x,y;
+ if (!vpmsupport) {
+ printk("VPM: Support Disabled\n");
+ return;
+ }
+
+ for (x=0;x<8;x++) {
+ struct t4_span *ts = wc->tspans[x & 0x3];
+ ver = t4_vpm_in(wc, x, 0x1a0); /* revision */
+ if (ver != 0x26) {
+ if (x)
+ printk("VPM: Inopperable\n");
+ else
+ printk("VPM: Not Present\n");
+ return;
+ }
+
+ /* Setup GPIO's */
+ for (y=0;y<4;y++) {
+ t4_vpm_out(wc, x, 0x1a8 + y, 0x00); /* GPIO out */
+ t4_vpm_out(wc, x, 0x1ac + y, 0x00); /* GPIO dir */
+ t4_vpm_out(wc, x, 0x1b0 + y, 0x00); /* GPIO sel */
+ }
+
+ /* Setup TDM path - sets fsync and tdm_clk as inputs */
+ reg = t4_vpm_in(wc, x, 0x1a3); /* misc_con */
+ t4_vpm_out(wc, x, 0x1a3, reg & ~2);
+
+ /* Setup timeslots */
+ t4_vpm_out(wc, x, 0x02f, 0x20 | ((x%4) << 3));
+ if (x < 4)
+ mask = 0x55555555;
+ else
+ mask = 0xaaaaaaaa;
+
+ /* Setup Echo length (128 taps) */
+ t4_vpm_out(wc, x, 0x022, 0x00);
+ t4_vpm_out(wc, x, 0x023, 0x7f);
+
+ /* Setup the tdm channel masks for all LV's*/
+ for (i=0;i<4;i++)
+ t4_vpm_out(wc, x, 0x30+i, (mask >> (i << 3)) & 0xff);
+
+ /* Setup convergence rate */
+ reg = t4_vpm_in(wc,x,0x20);
+ reg &= 0xE0;
+ if (ts->spantype == TYPE_E1) {
+ if (x < 4)
+ printk("VPM: Span %d A-law mode\n", x & 0x3);
+ reg |= 0x01;
+ } else {
+ if (x < 4)
+ printk("VPM: Span %d U-law mode\n", x & 0x3);
+ reg &= ~0x01;
+ }
+ t4_vpm_out(wc,x,0x20,(reg | 0x20));
+
+ /* Initialize echo cans */
+ for (i = 0 ; i < MAX_TDM_CHAN ; i++) {
+ if (mask & (0x00000001 << i))
+ t4_vpm_out(wc,x,i,0x00);
+ }
+
+ wait_a_little();
+
+ /* Put in bypass mode */
+ for (i = 0 ; i < MAX_TDM_CHAN ; i++) {
+ if (mask & (0x00000001 << i)) {
+ t4_vpm_out(wc,x,i,0x01);
+ }
+ }
+
+ /* Enable bypass */
+ for (i = 0 ; i < MAX_TDM_CHAN ; i++) {
+ if (mask & (0x00000001 << i))
+ t4_vpm_out(wc,x,0x78 + i,0x01);
+ }
+
+ /* Enable DTMF detectors */
+ for (i=0;i<MAX_DTMF_DET;i++) {
+ if(x < 4)
+ t4_vpm_out(wc, x, 0x98+i,(i*2)|0x40);
+ else
+ t4_vpm_out(wc, x, 0x98+i, ((i*2)+1)|0x40);
+ }
+ for (i=0xb8;i<0xbe;i++)
+ t4_vpm_out(wc, x, i, 0xff);
+ if(x < 4) {
+ for(i=0;i<4;i++)
+ t4_vpm_out(wc, x, 0xc0+i, 0x55);
+ } else {
+ for(i = 0; i < 4; i++)
+ t4_vpm_out(wc, x, 0xc0+i, 0xaa);
+ }
+ }
+ printk("VPM: Present and operational\n");
+ wc->vpm = T4_VPM_PRESENT;
+}
+
+#endif
+
+static void t4_tsi_reset(struct t4 *wc)
{
int x;
+ for (x=0;x<128;x++) {
+ wc->dmactrl &= ~0x00007fff;
+ wc->dmactrl |= (0x00004000 | (x << 7));
+ t4_pci_out(wc, WC_DMACTRL, wc->dmactrl);
+ }
+ wc->dmactrl &= ~0x00007fff;
+ t4_pci_out(wc, WC_DMACTRL, wc->dmactrl);
+}
+
+/* Note that channels here start from 1 */
+static void t4_tsi_assign(struct t4 *wc, int fromspan, int fromchan, int tospan, int tochan)
+{
+ unsigned long flags;
+ int fromts, tots;
+
+ fromts = (fromspan << 5) |(fromchan);
+ tots = (tospan << 5) | (tochan);
+
+ if (!wc->t1e1) {
+ fromts += 4;
+ tots += 4;
+ }
+ spin_lock_irqsave(&wc->reglock, flags);
+ wc->dmactrl &= ~0x00007fff;
+ wc->dmactrl |= (0x00004000 | (tots << 7) | (fromts));
+ __t4_pci_out(wc, WC_DMACTRL, wc->dmactrl);
+ wc->dmactrl &= ~0x00007fff;
+ __t4_pci_out(wc, WC_DMACTRL, wc->dmactrl);
+ spin_unlock_irqrestore(&wc->reglock, flags);
+}
+
+static void t4_tsi_unassign(struct t4 *wc, int tospan, int tochan)
+{
+ unsigned long flags;
+ int tots;
+
+ tots = (tospan << 5) | (tochan);
+
+ if (!wc->t1e1)
+ tots += 4;
+ spin_lock_irqsave(&wc->reglock, flags);
+ wc->dmactrl &= ~0x00007fff;
+ wc->dmactrl |= (0x00004000 | (tots << 7));
+ __t4_pci_out(wc, WC_DMACTRL, wc->dmactrl);
+ if (debug & DEBUG_TSI)
+ printk("Sending '%08x\n", wc->dmactrl);
+ wc->dmactrl &= ~0x00007fff;
+ __t4_pci_out(wc, WC_DMACTRL, wc->dmactrl);
+ spin_unlock_irqrestore(&wc->reglock, flags);
+}
+static int t4_hardware_init_1(struct t4 *wc, int gen2)
+{
unsigned int version;
- unsigned int falcver;
version = t4_pci_in(wc, WC_VERSION);
- printk("TE410P version %08x\n", version);
+ printk("TE410P version %08x, burst %s, slip debug: %s\n", version, noburst ? "OFF" : "ON", debugslips ? "ON" : "OFF");
+#ifdef ENABLE_WORKQUEUES
+ printk("TE410P running with work queues.\n");
+#endif
/* Make sure DMA engine is not running and interrupts are acknowledged */
wc->dmactrl = 0x0;
@@ -1440,8 +2434,12 @@ static int t4_hardware_init(struct t4 *wc)
t4_pci_out(wc, WC_RDADDR, wc->readdma);
t4_pci_out(wc, WC_WRADDR, wc->writedma);
- /* Setup counters, interrupt flags */
- t4_pci_out(wc, WC_COUNT, ((ZT_MAX_CHUNKSIZE * 2 * 32 - 1) << 18) | ((ZT_MAX_CHUNKSIZE * 2 * 32 - 1) << 2));
+ /* Setup counters, interrupt flags (ignored in Gen2) */
+ if (gen2) {
+ t4_tsi_reset(wc);
+ } else {
+ t4_pci_out(wc, WC_COUNT, ((ZT_MAX_CHUNKSIZE * 2 * 32 - 1) << 18) | ((ZT_MAX_CHUNKSIZE * 2 * 32 - 1) << 2));
+ }
/* Reset pending interrupts */
t4_pci_out(wc, WC_INTR, 0x00000000);
@@ -1452,6 +2450,13 @@ static int t4_hardware_init(struct t4 *wc)
else
wc->t1e1 = ((t4_pci_in(wc, WC_LEDS)) & 0x0f00) >> 8;
wc->order = ((t4_pci_in(wc, WC_LEDS)) & 0xf0000000) >> 28;
+ return 0;
+}
+
+static int t4_hardware_init_2(struct t4 *wc)
+{
+ int x;
+ unsigned int falcver;
/* Setup LEDS, take out of reset */
t4_pci_out(wc, WC_LEDS, 0x000000ff);
@@ -1470,7 +2475,7 @@ static int __devinit t4_launch(struct t4 *wc)
{
int x;
unsigned long flags;
- if (wc->spans[0].flags & ZT_FLAG_REGISTERED)
+ if (wc->tspans[0]->span.flags & ZT_FLAG_REGISTERED)
return 0;
printk("TE410P: Launching card: %d\n", wc->order);
@@ -1478,26 +2483,26 @@ static int __devinit t4_launch(struct t4 *wc)
for (x=0;x<4;x++)
t4_serial_setup(wc, x);
- if (zt_register(&wc->spans[0], 0)) {
- printk(KERN_ERR "Unable to register span %s\n", wc->spans[0].name);
+ if (zt_register(&wc->tspans[0]->span, 0)) {
+ printk(KERN_ERR "Unable to register span %s\n", wc->tspans[0]->span.name);
return -1;
}
- if (zt_register(&wc->spans[1], 0)) {
- printk(KERN_ERR "Unable to register span %s\n", wc->spans[1].name);
- zt_unregister(&wc->spans[0]);
+ if (zt_register(&wc->tspans[1]->span, 0)) {
+ printk(KERN_ERR "Unable to register span %s\n", wc->tspans[1]->span.name);
+ zt_unregister(&wc->tspans[0]->span);
return -1;
}
- if (zt_register(&wc->spans[2], 0)) {
- printk(KERN_ERR "Unable to register span %s\n", wc->spans[2].name);
- zt_unregister(&wc->spans[0]);
- zt_unregister(&wc->spans[1]);
+ if (zt_register(&wc->tspans[2]->span, 0)) {
+ printk(KERN_ERR "Unable to register span %s\n", wc->tspans[2]->span.name);
+ zt_unregister(&wc->tspans[0]->span);
+ zt_unregister(&wc->tspans[1]->span);
return -1;
}
- if (zt_register(&wc->spans[3], 0)) {
- printk(KERN_ERR "Unable to register span %s\n", wc->spans[3].name);
- zt_unregister(&wc->spans[0]);
- zt_unregister(&wc->spans[1]);
- zt_unregister(&wc->spans[2]);
+ if (zt_register(&wc->tspans[3]->span, 0)) {
+ printk(KERN_ERR "Unable to register span %s\n", wc->tspans[3]->span.name);
+ zt_unregister(&wc->tspans[0]->span);
+ zt_unregister(&wc->tspans[1]->span);
+ zt_unregister(&wc->tspans[2]->span);
return -1;
}
wc->checktiming = 1;
@@ -1514,7 +2519,9 @@ static int __devinit t4_init_one(struct pci_dev *pdev, const struct pci_device_i
{
int res;
struct t4 *wc;
+ struct devtype *dt;
int x,f;
+ int basesize;
#if 0
int y;
unsigned int *canary;
@@ -1528,7 +2535,12 @@ static int __devinit t4_init_one(struct pci_dev *pdev, const struct pci_device_i
if (wc) {
memset(wc, 0x0, sizeof(struct t4));
spin_lock_init(&wc->reglock);
- wc->variety = (char *)(ent->driver_data);
+ dt = (struct devtype *)(ent->driver_data);
+ if (dt->flags & FLAG_2NDGEN)
+ basesize = ZT_MAX_CHUNKSIZE * 32 * 4;
+ else
+ basesize = ZT_MAX_CHUNKSIZE * 32 * 2 * 4;
+ wc->variety = dt->desc;
wc->memaddr = pci_resource_start(pdev, 0);
wc->memlen = pci_resource_len(pdev, 0);
@@ -1548,21 +2560,21 @@ static int __devinit t4_init_one(struct pci_dev *pdev, const struct pci_device_i
wc->writechunk =
/* 32 channels, Double-buffer, Read/Write, 4 spans */
- (unsigned int *)pci_alloc_consistent(pdev, ZT_MAX_CHUNKSIZE * 32 * 2 * 2 * 4, &wc->writedma);
+ (unsigned int *)pci_alloc_consistent(pdev, basesize * 2, &wc->writedma);
if (!wc->writechunk) {
- printk("wct1xxp: Unable to allocate DMA-able memory\n");
+ printk("wct4xxp: Unable to allocate DMA-able memory\n");
return -ENOMEM;
}
/* Read is after the whole write piece (in words) */
- wc->readchunk = wc->writechunk + ZT_CHUNKSIZE * 32 * 2;
+ wc->readchunk = wc->writechunk + basesize / 4;
/* Same thing but in bytes... */
- wc->readdma = wc->writedma + ZT_CHUNKSIZE * 32 * 2 * 4;
+ wc->readdma = wc->writedma + basesize;
/* Initialize Write/Buffers to all blank data */
- memset((void *)wc->writechunk,0xff,ZT_MAX_CHUNKSIZE * 2 * 32 * 4);
- memset((void *)wc->readchunk,0xff,ZT_MAX_CHUNKSIZE * 2 * 32 * 4);
+ memset((void *)wc->writechunk,0x00, basesize);
+ memset((void *)wc->readchunk,0xff, basesize);
#if 0
memset((void *)wc->readchunk,0xff,ZT_MAX_CHUNKSIZE * 2 * 32 * 4);
/* Initialize canary */
@@ -1577,36 +2589,76 @@ static int __devinit t4_init_one(struct pci_dev *pdev, const struct pci_device_i
pci_set_drvdata(pdev, wc);
/* Initialize hardware */
- t4_hardware_init(wc);
+ t4_hardware_init_1(wc, dt->flags & FLAG_2NDGEN);
+
for(x = 0; x < MAX_T4_CARDS; x++) {
if (!cards[x]) break;
}
+
if (x >= MAX_T4_CARDS) {
printk("No cards[] slot available!!\n");
return -ENOMEM;
}
+
wc->num = x;
cards[x] = wc;
- if (request_irq(pdev->irq, t4_interrupt, SA_INTERRUPT | SA_SHIRQ, "t4xxp", wc)) {
- printk("t4xxp: Unable to request IRQ %d\n", pdev->irq);
- kfree(wc);
- return -EIO;
+#ifdef ENABLE_WORKQUEUES
+ if (dt->flags & FLAG_2NDGEN) {
+ char tmp[20];
+ sprintf(tmp, "te4xxp[%d]", wc->num);
+ wc->workq = create_workqueue(tmp);
}
+#endif
+
+ /* Allocate pieces we need here */
for (x=0;x<4;x++) {
if (wc->t1e1 & (1 << x)) {
- wc->spantype[x] = TYPE_E1;
- wc->chans[x] = kmalloc(sizeof(struct zt_chan) * 31,GFP_KERNEL);
- memset(wc->chans[x],0,sizeof(struct zt_chan) * 31);
+ wc->tspans[x] = kmalloc(sizeof(struct t4_span) + sizeof(struct zt_chan) * 31, GFP_KERNEL);
+ if (wc->tspans[x]) {
+ memset(wc->tspans[x], 0, sizeof(struct t4_span) + sizeof(struct zt_chan) * 31);
+ wc->tspans[x]->spantype = TYPE_E1;
+ }
} else {
- wc->spantype[x] = TYPE_T1;
- wc->chans[x] = kmalloc(sizeof(struct zt_chan) * 24,GFP_KERNEL);
- memset(wc->chans[x],0,sizeof(struct zt_chan) * 24);
+ wc->tspans[x] = kmalloc(sizeof(struct t4_span) + sizeof(struct zt_chan) * 24, GFP_KERNEL);
+ if (wc->tspans[x]) {
+ memset(wc->tspans[x], 0, sizeof(struct t4_span) + sizeof(struct zt_chan) * 24);
+ if (j1mode)
+ wc->tspans[x]->spantype = TYPE_J1;
+ else
+ wc->tspans[x]->spantype = TYPE_T1;
+ }
}
- if (!wc->chans[x])
+ if (!wc->tspans[x])
return -ENOMEM;
+#ifdef ENABLE_WORKQUEUES
+ INIT_WORK(&wc->tspans[x]->swork, workq_handlespan, wc->tspans[x]);
+#endif
+ wc->tspans[x]->spanflags |= dt->flags;
}
+
+
+ /* Continue hardware intiialization */
+ t4_hardware_init_2(wc);
+
+
+#ifdef SUPPORT_GEN1
+ if (request_irq(pdev->irq, (dt->flags & FLAG_2NDGEN) ? t4_interrupt_gen2 :t4_interrupt, SA_INTERRUPT | SA_SHIRQ, "t4xxp", wc))
+#else
+ if (!(wc->tspans[0]->spanflags & FLAG_2NDGEN)) {
+ printk("This driver does not support 1st gen modules\n");
+ kfree(wc);
+ return -ENODEV;
+ }
+ if (request_irq(pdev->irq, t4_interrupt_gen2, SA_INTERRUPT | SA_SHIRQ, "t4xxp", wc))
+#endif
+ {
+ printk("t4xxp: Unable to request IRQ %d\n", pdev->irq);
+ kfree(wc);
+ return -EIO;
+ }
+
init_spans(wc);
/* Launch cards as appropriate */
@@ -1649,7 +2701,11 @@ static int t4_hardware_stop(struct t4 *wc)
schedule_timeout((25 * HZ) / 1000);
/* Turn off counter, address, etc */
- t4_pci_out(wc, WC_COUNT, 0x000000);
+ if (wc->tspans[0]->spanflags & FLAG_2NDGEN) {
+ t4_tsi_reset(wc);
+ } else {
+ t4_pci_out(wc, WC_COUNT, 0x000000);
+ }
t4_pci_out(wc, WC_RDADDR, 0x0000000);
t4_pci_out(wc, WC_WRADDR, 0x0000000);
t4_pci_out(wc, WC_GPIO, 0x0000000);
@@ -1668,14 +2724,20 @@ static void __devexit t4_remove_one(struct pci_dev *pdev)
t4_hardware_stop(wc);
/* Unregister spans */
- if (wc->spans[0].flags & ZT_FLAG_REGISTERED)
- zt_unregister(&wc->spans[0]);
- if (wc->spans[1].flags & ZT_FLAG_REGISTERED)
- zt_unregister(&wc->spans[1]);
- if (wc->spans[2].flags & ZT_FLAG_REGISTERED)
- zt_unregister(&wc->spans[2]);
- if (wc->spans[3].flags & ZT_FLAG_REGISTERED)
- zt_unregister(&wc->spans[3]);
+ if (wc->tspans[0]->span.flags & ZT_FLAG_REGISTERED)
+ zt_unregister(&wc->tspans[0]->span);
+ if (wc->tspans[1]->span.flags & ZT_FLAG_REGISTERED)
+ zt_unregister(&wc->tspans[1]->span);
+ if (wc->tspans[2]->span.flags & ZT_FLAG_REGISTERED)
+ zt_unregister(&wc->tspans[2]->span);
+ if (wc->tspans[3]->span.flags & ZT_FLAG_REGISTERED)
+ zt_unregister(&wc->tspans[3]->span);
+#ifdef ENABLE_WORKQUEUES
+ if (wc->workq) {
+ flush_workqueue(wc->workq);
+ destroy_workqueue(wc->workq);
+ }
+#endif
#if 0
/* Stop any DMA */
__t1xxp_stop_dma(wc);
@@ -1703,16 +2765,19 @@ static void __devexit t4_remove_one(struct pci_dev *pdev)
cards[wc->num] = NULL;
pci_set_drvdata(pdev, NULL);
for (x=0;x<4;x++) {
- if (wc->chans[x])
- kfree(wc->chans[x]);
+ if (wc->tspans[x])
+ kfree(wc->tspans[x]);
}
kfree(wc);
}
}
+
static struct pci_device_id t4_pci_tbl[] __devinitdata =
{
- { 0x10ee, 0x0314, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long)"Wildcard TE410P-Xilinx" },
+ { 0x10ee, 0x0314, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long)&wct4xxp },
+ { 0xd161, 0x0410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long)&wct410p2 },
+ { 0xd161, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long)&wct405p2 },
{ 0, }
};
@@ -1756,13 +2821,22 @@ module_param(noburst, int, 0600);
module_param(timingcable, int, 0600);
module_param(t1e1override, int, 0600);
module_param(alarmdebounce, int, 0600);
+module_param(j1mode, int, 0600);
+module_param(debugslips, int, 0600);
+module_param(polling, int, 0600);
#else
MODULE_PARM(debug, "i");
MODULE_PARM(loopback, "i");
MODULE_PARM(noburst, "i");
+MODULE_PARM(debugslips, "i");
+MODULE_PARM(polling, "i");
MODULE_PARM(timingcable, "i");
+#ifdef VPM_SUPPORT
+MODULE_PARM(vpmsupport,"i");
+#endif
MODULE_PARM(t1e1override, "i");
MODULE_PARM(alarmdebounce, "i");
+MODULE_PARM(j1mode, "i");
#endif
MODULE_DEVICE_TABLE(pci, t4_pci_tbl);