summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorsruffell <sruffell@5390a7c7-147a-4af0-8ec9-7488f05a26cb>2009-07-21 18:29:22 +0000
committersruffell <sruffell@5390a7c7-147a-4af0-8ec9-7488f05a26cb>2009-07-21 18:29:22 +0000
commit400534fd362499ba7953dab31ac638c5d7c38f99 (patch)
tree3a24ddd4c07fa3984be6e8778e96a4c78eb9f03c
parent7e7de3b94ca6799eb3637c3922463250fae471cc (diff)
dahdi-base: Add support for core timing.
This essentially moves the function of dahdi_dummy into the core of Zaptel. It ensures that if Zaptel is loaded, it will always be able to provide timing, regardless of whether there are board drivers loaded, or if the board drivers are properly calling dahdi_receive. If there is a master span loaded which is calling ztreceive, then the behavior will be like it is normally. This functionality is off by default, uncomment CONFIG_ZAPTEL_CORE_TIMER in include/dahdi/config_dahdi.h in order to enable it. git-svn-id: http://svn.digium.com/svn/zaptel/branches/1.4@4666 5390a7c7-147a-4af0-8ec9-7488f05a26cb
-rw-r--r--kernel/zaptel-base.c313
-rw-r--r--kernel/zconfig.h6
2 files changed, 228 insertions, 91 deletions
diff --git a/kernel/zaptel-base.c b/kernel/zaptel-base.c
index 23e6a6c..89346b4 100644
--- a/kernel/zaptel-base.c
+++ b/kernel/zaptel-base.c
@@ -278,6 +278,17 @@ static struct
int dst; /* dst conf number */
} conf_links[ZT_MAX_CONF + 1];
+#ifdef CONFIG_ZAPTEL_CORE_TIMER
+
+static struct core_timer {
+ struct timer_list timer;
+ struct timespec start_interval;
+ atomic_t count;
+ atomic_t shutdown;
+ atomic_t last_count;
+} core_timer;
+
+#endif /* CONFIG_ZAPTEL_CORE_TIMER */
/* There are three sets of conference sum accumulators. One for the current
sample chunk (conf_sums), one for the next sample chunk (conf_sums_next), and
@@ -2544,12 +2555,21 @@ static int zt_specchan_release(struct inode *node, struct file *file, int unit)
return res;
}
+static int can_open_timer(void)
+{
+#ifdef CONFIG_ZAPTEL_CORE_TIMER
+ return 1;
+#else
+ return maxspans > 0;
+#endif
+}
+
static struct zt_chan *zt_alloc_pseudo(void)
{
struct zt_chan *pseudo;
unsigned long flags;
- /* Don't allow /dev/zap/pseudo to open if there are no spans */
- if (maxspans < 1)
+ /* Don't allow /dev/zap/pseudo to open if there is not timing */
+ if (!can_open_timer())
return NULL;
pseudo = kmalloc(sizeof(struct zt_chan), GFP_KERNEL);
if (!pseudo)
@@ -2609,7 +2629,7 @@ static int zt_open(struct inode *inode, struct file *file)
return -ENXIO;
}
if (unit == 253) {
- if (maxspans) {
+ if (can_open_timer()) {
return zt_timing_open(inode, file);
} else {
return -ENXIO;
@@ -2618,16 +2638,13 @@ static int zt_open(struct inode *inode, struct file *file)
if (unit == 254)
return zt_chan_open(inode, file);
if (unit == 255) {
- if (maxspans) {
- chan = zt_alloc_pseudo();
- if (chan) {
- file->private_data = chan;
- return zt_specchan_open(inode, file, chan->channo, 1);
- } else {
- return -ENXIO;
- }
- } else
+ chan = zt_alloc_pseudo();
+ if (chan) {
+ file->private_data = chan;
+ return zt_specchan_open(inode, file, chan->channo, 1);
+ } else {
return -ENXIO;
+ }
}
return zt_specchan_open(inode, file, unit, 1);
}
@@ -7449,10 +7466,197 @@ int zt_transmit(struct zt_span *span)
return 0;
}
+static void process_masterspan(void)
+{
+ int unsigned long flags, flagso;
+ int x, y, z;
+
+#ifdef CONFIG_ZAPTEL_CORE_TIMER
+ /* We increment the calls since start here, so that if we switch over
+ * to the core timer, we know how many times we need to call
+ * process_masterspan in order to catch up since this function needs
+ * to be called 1000 times per second. */
+ atomic_inc(&core_timer.count);
+#endif
+ /* Hold the big zap lock for the duration of major
+ activities which touch all sorts of channels */
+ spin_lock_irqsave(&bigzaplock, flagso);
+ /* Process any timers */
+ process_timers();
+ /* If we have dynamic stuff, call the ioctl with 0,0 parameters to
+ make it run */
+ if (zt_dynamic_ioctl)
+ zt_dynamic_ioctl(0,0);
+ for (x=1;x<maxchans;x++) {
+ if (chans[x] && chans[x]->confmode && !(chans[x]->flags & ZT_FLAG_PSEUDO)) {
+ u_char *data;
+ spin_lock_irqsave(&chans[x]->lock, flags);
+ data = __buf_peek(&chans[x]->confin);
+ __zt_receive_chunk(chans[x], data);
+ if (data)
+ __buf_pull(&chans[x]->confin, NULL,chans[x], "confreceive");
+ spin_unlock_irqrestore(&chans[x]->lock, flags);
+ }
+ }
+ /* This is the master channel, so make things switch over */
+ rotate_sums();
+ /* do all the pseudo and/or conferenced channel receives (getbuf's) */
+ for (x=1;x<maxchans;x++) {
+ if (chans[x] && (chans[x]->flags & ZT_FLAG_PSEUDO)) {
+ spin_lock_irqsave(&chans[x]->lock, flags);
+ __zt_transmit_chunk(chans[x], NULL);
+ spin_unlock_irqrestore(&chans[x]->lock, flags);
+ }
+ }
+ if (maxlinks) {
+#ifdef CONFIG_ZAPTEL_MMX
+ zt_kernel_fpu_begin();
+#endif
+ /* process all the conf links */
+ for(x = 1; x <= maxlinks; x++) {
+ /* if we have a destination conf */
+ if (((z = confalias[conf_links[x].dst]) > 0) &&
+ ((y = confalias[conf_links[x].src]) > 0)) {
+ ACSS(conf_sums[z], conf_sums[y]);
+ }
+ }
+#ifdef CONFIG_ZAPTEL_MMX
+ kernel_fpu_end();
+#endif
+ }
+ /* do all the pseudo/conferenced channel transmits (putbuf's) */
+ for (x=1;x<maxchans;x++) {
+ if (chans[x] && (chans[x]->flags & ZT_FLAG_PSEUDO)) {
+ unsigned char tmp[ZT_CHUNKSIZE];
+ spin_lock_irqsave(&chans[x]->lock, flags);
+ __zt_getempty(chans[x], tmp);
+ __zt_receive_chunk(chans[x], tmp);
+ spin_unlock_irqrestore(&chans[x]->lock, flags);
+ }
+ }
+ for (x=1;x<maxchans;x++) {
+ if (chans[x] && chans[x]->confmode && !(chans[x]->flags & ZT_FLAG_PSEUDO)) {
+ u_char *data;
+ spin_lock_irqsave(&chans[x]->lock, flags);
+ data = __buf_pushpeek(&chans[x]->confout);
+ __zt_transmit_chunk(chans[x], data);
+ if (data)
+ __buf_push(&chans[x]->confout, NULL, "conftransmit");
+ spin_unlock_irqrestore(&chans[x]->lock, flags);
+ }
+ }
+#ifdef ZAPTEL_SYNC_TICK
+ for (x=0;x<maxspans;x++) {
+ struct zt_span *s = spans[x];
+
+ if (s && s->sync_tick)
+ s->sync_tick(s, s == master);
+ }
+#endif
+ spin_unlock_irqrestore(&bigzaplock, flagso);
+}
+
+#ifndef CONFIG_ZAPTEL_CORE_TIMER
+
+static void coretimer_init(void)
+{
+ return;
+}
+
+static void coretimer_cleanup(void)
+{
+ return;
+}
+
+#else
+
+static unsigned long core_diff_ms(struct timespec *t0, struct timespec *t1)
+{
+ long nanosec, sec;
+ unsigned long ms;
+ sec = (t1->tv_sec - t0->tv_sec);
+ nanosec = (t1->tv_nsec - t0->tv_nsec);
+ while (nanosec >= NSEC_PER_SEC) {
+ nanosec -= NSEC_PER_SEC;
+ ++sec;
+ }
+ while (nanosec < 0) {
+ nanosec += NSEC_PER_SEC;
+ --sec;
+ }
+ ms = (sec * 1000) + (nanosec / 1000000L);
+ return ms;
+}
+
+static void coretimer_func(unsigned long param)
+{
+ unsigned long ms_since_start;
+ struct timespec now;
+ const unsigned long MAX_INTERVAL = 100000L;
+ const unsigned long FOURMS_INTERVAL = HZ/250;
+ const unsigned long ONESEC_INTERVAL = HZ;
+
+ now = current_kernel_time();
+
+ if (atomic_read(&core_timer.count) ==
+ atomic_read(&core_timer.last_count)) {
+
+ /* This is the code path if a board driver is not calling
+ * dahdi_receive, and therefore the core of dahdi needs to
+ * perform the master span processing itself. */
+
+ if (!atomic_read(&core_timer.shutdown))
+ mod_timer(&core_timer.timer, jiffies + FOURMS_INTERVAL);
+
+ ms_since_start = core_diff_ms(&core_timer.start_interval, &now);
+ while (ms_since_start > atomic_read(&core_timer.count))
+ process_masterspan();
+
+ if (ms_since_start > MAX_INTERVAL) {
+ atomic_set(&core_timer.count, 0);
+ atomic_set(&core_timer.last_count, 0);
+ core_timer.start_interval = now;
+ } else {
+ atomic_set(&core_timer.last_count,
+ atomic_read(&core_timer.count));
+ }
+
+ } else {
+
+ /* It looks like a board driver is calling dahdi_receive. We
+ * will just check again in a second. */
+ atomic_set(&core_timer.count, 0);
+ atomic_set(&core_timer.last_count, 0);
+ core_timer.start_interval = now;
+ if (!atomic_read(&core_timer.shutdown))
+ mod_timer(&core_timer.timer, jiffies + ONESEC_INTERVAL);
+ }
+}
+
+static void coretimer_init(void)
+{
+ init_timer(&core_timer.timer);
+ core_timer.timer.function = coretimer_func;
+ core_timer.start_interval = current_kernel_time();
+ core_timer.timer.expires = jiffies + HZ;
+ atomic_set(&core_timer.count, 0);
+ atomic_set(&core_timer.shutdown, 0);
+ add_timer(&core_timer.timer);
+}
+
+static void coretimer_cleanup(void)
+{
+ atomic_set(&core_timer.shutdown, 1);
+ del_timer_sync(&core_timer.timer);
+}
+
+#endif /* CONFIG_ZAPTEL_CORE_TIMER */
+
+
int zt_receive(struct zt_span *span)
{
int x,y,z;
- unsigned long flags, flagso;
+ unsigned long flags;
#if 1
#ifdef CONFIG_ZAPTEL_WATCHDOG
@@ -7531,84 +7735,8 @@ int zt_receive(struct zt_span *span)
}
}
- if (span == master) {
- /* Hold the big zap lock for the duration of major
- activities which touch all sorts of channels */
- spin_lock_irqsave(&bigzaplock, flagso);
- /* Process any timers */
- process_timers();
- /* If we have dynamic stuff, call the ioctl with 0,0 parameters to
- make it run */
- if (zt_dynamic_ioctl)
- zt_dynamic_ioctl(0,0);
- for (x=1;x<maxchans;x++) {
- if (chans[x] && chans[x]->confmode && !(chans[x]->flags & ZT_FLAG_PSEUDO)) {
- u_char *data;
- spin_lock_irqsave(&chans[x]->lock, flags);
- data = __buf_peek(&chans[x]->confin);
- __zt_receive_chunk(chans[x], data);
- if (data)
- __buf_pull(&chans[x]->confin, NULL,chans[x], "confreceive");
- spin_unlock_irqrestore(&chans[x]->lock, flags);
- }
- }
- /* This is the master channel, so make things switch over */
- rotate_sums();
- /* do all the pseudo and/or conferenced channel receives (getbuf's) */
- for (x=1;x<maxchans;x++) {
- if (chans[x] && (chans[x]->flags & ZT_FLAG_PSEUDO)) {
- spin_lock_irqsave(&chans[x]->lock, flags);
- __zt_transmit_chunk(chans[x], NULL);
- spin_unlock_irqrestore(&chans[x]->lock, flags);
- }
- }
- if (maxlinks) {
-#ifdef CONFIG_ZAPTEL_MMX
- zt_kernel_fpu_begin();
-#endif
- /* process all the conf links */
- for(x = 1; x <= maxlinks; x++) {
- /* if we have a destination conf */
- if (((z = confalias[conf_links[x].dst]) > 0) &&
- ((y = confalias[conf_links[x].src]) > 0)) {
- ACSS(conf_sums[z], conf_sums[y]);
- }
- }
-#ifdef CONFIG_ZAPTEL_MMX
- kernel_fpu_end();
-#endif
- }
- /* do all the pseudo/conferenced channel transmits (putbuf's) */
- for (x=1;x<maxchans;x++) {
- if (chans[x] && (chans[x]->flags & ZT_FLAG_PSEUDO)) {
- unsigned char tmp[ZT_CHUNKSIZE];
- spin_lock_irqsave(&chans[x]->lock, flags);
- __zt_getempty(chans[x], tmp);
- __zt_receive_chunk(chans[x], tmp);
- spin_unlock_irqrestore(&chans[x]->lock, flags);
- }
- }
- for (x=1;x<maxchans;x++) {
- if (chans[x] && chans[x]->confmode && !(chans[x]->flags & ZT_FLAG_PSEUDO)) {
- u_char *data;
- spin_lock_irqsave(&chans[x]->lock, flags);
- data = __buf_pushpeek(&chans[x]->confout);
- __zt_transmit_chunk(chans[x], data);
- if (data)
- __buf_push(&chans[x]->confout, NULL, "conftransmit");
- spin_unlock_irqrestore(&chans[x]->lock, flags);
- }
- }
-#ifdef ZAPTEL_SYNC_TICK
- for (x=0;x<maxspans;x++) {
- struct zt_span *s = spans[x];
-
- if (s && s->sync_tick)
- s->sync_tick(s, s == master);
- }
-#endif
- spin_unlock_irqrestore(&bigzaplock, flagso);
- }
+ if (span == master)
+ process_masterspan();
#endif
return 0;
}
@@ -7783,12 +7911,15 @@ static int __init zt_init(void) {
#ifdef CONFIG_ZAPTEL_WATCHDOG
watchdog_init();
#endif
+ coretimer_init();
return res;
}
static void __exit zt_cleanup(void) {
int x;
+ coretimer_cleanup();
+
#ifdef CONFIG_PROC_FS
remove_proc_entry("zaptel", NULL);
#endif
diff --git a/kernel/zconfig.h b/kernel/zconfig.h
index 26d1755..d2eca7d 100644
--- a/kernel/zconfig.h
+++ b/kernel/zconfig.h
@@ -150,6 +150,12 @@
/* #define CONFIG_ZAPTEL_WATCHDOG */
/*
+ * Define CONFIG_ZAPTEL_CORE_TIMER if you would like dahdi to always provide a
+ * timing source regardless of which spans / drivers are configured.
+ */
+/* #define CONFIG_ZAPTEL_CORE_TIMER */
+
+/*
* Uncomment for Non-standard FXS groundstart start state (A=Low, B=Low)
* particularly for CAC channel bank groundstart FXO ports.
*/