summaryrefslogtreecommitdiff
path: root/main/event.c
diff options
context:
space:
mode:
authorRussell Bryant <russell@russellbryant.com>2012-07-31 20:33:57 +0000
committerRussell Bryant <russell@russellbryant.com>2012-07-31 20:33:57 +0000
commit733b46022bae0385e9fb33e29c16eee67a5a5110 (patch)
tree4d09421d8c5a36300a18a0f88afbf201413ded33 /main/event.c
parent9b16c8b0f6c3b6310e303411421bfcb16b26c3c4 (diff)
Move event cache updates into event processing thread.
Prior to this patch, updating the device state cache was done by the thread that originated the event. It would update the cache and then queue the event up for another thread to dispatch. This thread moves the cache updating part to be in the same thread as event dispatching. I was working with someone on a heavily loaded Asterisk system and while reviewing backtraces of the system while it was having problems, I noticed that there were a lot of threads contending for the lock on the event cache. By simply moving this into a single thread, this helped performance *a lot* and alleviated some deadlock-like symptoms. Review: https://reviewboard.asterisk.org/r/2066/ git-svn-id: https://origsvn.digium.com/svn/asterisk/trunk@370664 65c4cc65-6c06-0410-ace0-fbb531ad65f3
Diffstat (limited to 'main/event.c')
-rw-r--r--main/event.c41
1 files changed, 23 insertions, 18 deletions
diff --git a/main/event.c b/main/event.c
index 23e20f333..12d3abb7f 100644
--- a/main/event.c
+++ b/main/event.c
@@ -103,6 +103,7 @@ struct ast_event {
*/
struct ast_event_ref {
struct ast_event *event;
+ unsigned int cache;
};
struct ast_event_ie_val {
@@ -1470,23 +1471,6 @@ static void event_update_cache(struct ao2_container *cache, struct ast_event *ev
ao2_unlock(cache);
}
-int ast_event_queue_and_cache(struct ast_event *event)
-{
- struct ao2_container *container;
-
- container = ast_event_cache[ast_event_get_type(event)].container;
- if (!container) {
- ast_log(LOG_WARNING, "cache requested for non-cached event type\n");
- } else {
- event_update_cache(container, event);
- }
-
- if (ast_event_queue(event)) {
- ast_event_destroy(event);
- }
- return 0;
-}
-
static int handle_event(void *data)
{
struct ast_event_ref *event_ref = data;
@@ -1497,6 +1481,16 @@ static int handle_event(void *data)
};
int i;
+ if (event_ref->cache) {
+ struct ao2_container *container;
+ container = ast_event_cache[ast_event_get_type(event_ref->event)].container;
+ if (!container) {
+ ast_log(LOG_WARNING, "cache requested for non-cached event type\n");
+ } else {
+ event_update_cache(container, event_ref->event);
+ }
+ }
+
for (i = 0; i < ARRAY_LEN(event_types); i++) {
AST_RWDLLIST_RDLOCK(&ast_event_subs[event_types[i]]);
AST_RWDLLIST_TRAVERSE(&ast_event_subs[event_types[i]], sub, entry) {
@@ -1522,7 +1516,7 @@ static int handle_event(void *data)
return 0;
}
-int ast_event_queue(struct ast_event *event)
+static int _ast_event_queue(struct ast_event *event, unsigned int cache)
{
struct ast_event_ref *event_ref;
uint16_t host_event_type;
@@ -1549,6 +1543,7 @@ int ast_event_queue(struct ast_event *event)
}
event_ref->event = event;
+ event_ref->cache = cache;
res = ast_taskprocessor_push(event_dispatcher, handle_event, event_ref);
if (res) {
@@ -1558,6 +1553,16 @@ int ast_event_queue(struct ast_event *event)
return res;
}
+int ast_event_queue(struct ast_event *event)
+{
+ return _ast_event_queue(event, 0);
+}
+
+int ast_event_queue_and_cache(struct ast_event *event)
+{
+ return _ast_event_queue(event, 1);
+}
+
static int ast_event_hash_mwi(const void *obj, const int flags)
{
const struct ast_event *event = obj;