From 22da209226aac2c65f45900e55cf5f162cd311d4 Mon Sep 17 00:00:00 2001 From: Benny Prijono Date: Tue, 5 Mar 2013 11:59:54 +0000 Subject: Implementation of Re #1628: Modify SIP transaction to use group lock to avoid deadlock etc. git-svn-id: http://svn.pjsip.org/repos/pjproject/trunk@4420 74dad513-b988-da41-8d7b-12977e46ad98 --- pjsip/include/pjsip/sip_transaction.h | 50 +++++- pjsip/src/pjsip-ua/sip_inv.c | 2 +- pjsip/src/pjsip/sip_transaction.c | 282 ++++++++++++++-------------------- pjsip/src/pjsip/sip_ua_layer.c | 2 +- pjsip/src/test/regc_test.c | 5 +- pjsip/src/test/test.c | 10 +- pjsip/src/test/test.h | 3 + pjsip/src/test/tsx_basic_test.c | 188 ++++++++++++++++++++++- pjsip/src/test/tsx_bench.c | 15 ++ pjsip/src/test/tsx_uac_test.c | 13 +- pjsip/src/test/tsx_uas_test.c | 8 +- 11 files changed, 394 insertions(+), 184 deletions(-) (limited to 'pjsip') diff --git a/pjsip/include/pjsip/sip_transaction.h b/pjsip/include/pjsip/sip_transaction.h index 5be37ac4..e60f214d 100644 --- a/pjsip/include/pjsip/sip_transaction.h +++ b/pjsip/include/pjsip/sip_transaction.h @@ -85,7 +85,8 @@ struct pjsip_transaction pj_pool_t *pool; /**< Pool owned by the tsx. */ pjsip_module *tsx_user; /**< Transaction user. */ pjsip_endpoint *endpt; /**< Endpoint instance. */ - pj_mutex_t *mutex; /**< Mutex for this tsx. */ + pj_bool_t terminating; /**< terminate() was called */ + pj_grp_lock_t *grp_lock; /**< Transaction grp lock. */ pj_mutex_t *mutex_b; /**< Second mutex to avoid deadlock. It is used to protect timer. */ @@ -212,6 +213,30 @@ PJ_DECL(pj_status_t) pjsip_tsx_create_uac( pjsip_module *tsx_user, pjsip_tx_data *tdata, pjsip_transaction **p_tsx); +/** + * Variant of pjsip_tsx_create_uac() with additional parameter to specify + * the group lock to use. Group lock can be used to synchronize locking + * among several objects to prevent deadlock, and to synchronize the + * lifetime of objects sharing the same group lock. + * + * See pjsip_tsx_create_uac() for general info about this function. + * + * @param tsx_user Module to be registered as transaction user of the new + * transaction, which will receive notification from the + * transaction via on_tsx_state() callback. + * @param tdata The outgoing request message. + * @param grp_lock Optional group lock to use by this transaction. If + * the value is NULL, the transaction will create its + * own group lock. + * @param p_tsx On return will contain the new transaction instance. + * + * @return PJ_SUCCESS if successfull. + */ +PJ_DECL(pj_status_t) pjsip_tsx_create_uac2(pjsip_module *tsx_user, + pjsip_tx_data *tdata, + pj_grp_lock_t *grp_lock, + pjsip_transaction **p_tsx); + /** * Create, initialize, and register a new transaction as UAS from the * specified incoming request in \c rdata. After calling this function, @@ -230,6 +255,29 @@ PJ_DECL(pj_status_t) pjsip_tsx_create_uas( pjsip_module *tsx_user, pjsip_rx_data *rdata, pjsip_transaction **p_tsx ); +/** + * Variant of pjsip_tsx_create_uas() with additional parameter to specify + * the group lock to use. Group lock can be used to synchronize locking + * among several objects to prevent deadlock, and to synchronize the + * lifetime of objects sharing the same group lock. + * + * See pjsip_tsx_create_uas() for general info about this function. + * + * @param tsx_user Module to be registered as transaction user of the new + * transaction, which will receive notification from the + * transaction via on_tsx_state() callback. + * @param rdata The received incoming request. + * @param grp_lock Optional group lock to use by this transaction. If + * the value is NULL, the transaction will create its + * own group lock. + * @param p_tsx On return will contain the new transaction instance. + * + * @return PJ_SUCCESS if successfull. + */ +PJ_DECL(pj_status_t) pjsip_tsx_create_uas2(pjsip_module *tsx_user, + pjsip_rx_data *rdata, + pj_grp_lock_t *grp_lock, + pjsip_transaction **p_tsx ); /** * Lock/bind transaction to a specific transport/listener. This is optional, diff --git a/pjsip/src/pjsip-ua/sip_inv.c b/pjsip/src/pjsip-ua/sip_inv.c index 62764977..7d45894e 100644 --- a/pjsip/src/pjsip-ua/sip_inv.c +++ b/pjsip/src/pjsip-ua/sip_inv.c @@ -2910,7 +2910,7 @@ static void inv_respond_incoming_cancel(pjsip_inv_session *inv, } if (invite_tsx) - pj_mutex_unlock(invite_tsx->mutex); + pj_grp_lock_release(invite_tsx->grp_lock); } diff --git a/pjsip/src/pjsip/sip_transaction.c b/pjsip/src/pjsip/sip_transaction.c index b0cad3ce..eff48562 100644 --- a/pjsip/src/pjsip/sip_transaction.c +++ b/pjsip/src/pjsip/sip_transaction.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -90,9 +91,6 @@ static struct mod_tsx_layer } }; -/* Thread Local Storage ID for transaction lock */ -static long pjsip_tsx_lock_tls_id; - /* Transaction state names */ static const char *state_str[] = { @@ -123,14 +121,6 @@ enum TSX_HAS_RESOLVED_SERVER = 16, }; -/* Transaction lock. */ -typedef struct tsx_lock_data { - struct tsx_lock_data *prev; - pjsip_transaction *tsx; - int is_alive; -} tsx_lock_data; - - /* Timer timeout value constants */ static pj_time_val t1_timer_val = { PJSIP_T1_TIMEOUT/1000, PJSIP_T1_TIMEOUT%1000 }; @@ -148,9 +138,6 @@ static pj_time_val timeout_timer_val = { (64*PJSIP_T1_TIMEOUT)/1000, /* Prototypes. */ -static void lock_tsx(pjsip_transaction *tsx, struct tsx_lock_data *lck); -static pj_status_t unlock_tsx( pjsip_transaction *tsx, - struct tsx_lock_data *lck); static pj_status_t tsx_on_state_null( pjsip_transaction *tsx, pjsip_event *event); static pj_status_t tsx_on_state_calling( pjsip_transaction *tsx, @@ -178,8 +165,10 @@ static void tsx_tp_state_callback( pjsip_transport_state state, const pjsip_transport_state_info *info); static pj_status_t tsx_create( pjsip_module *tsx_user, + pj_grp_lock_t *grp_lock, pjsip_transaction **p_tsx); -static pj_status_t tsx_destroy( pjsip_transaction *tsx ); +static void tsx_on_destroy(void *arg); +static pj_status_t tsx_shutdown( pjsip_transaction *tsx ); static void tsx_resched_retransmission( pjsip_transaction *tsx ); static pj_status_t tsx_retransmit( pjsip_transaction *tsx, int resched); static int tsx_send_msg( pjsip_transaction *tsx, @@ -270,10 +259,9 @@ static pj_status_t create_tsx_key_2543( pj_pool_t *pool, const pjsip_rx_data *rdata ) { #define SEPARATOR '$' - char *key, *p, *end; + char *key, *p; int len; pj_size_t len_required; - pjsip_uri *req_uri; pj_str_t *host; PJ_ASSERT_RETURN(pool && str && method && rdata, PJ_EINVAL); @@ -283,7 +271,6 @@ static pj_status_t create_tsx_key_2543( pj_pool_t *pool, PJ_ASSERT_RETURN(rdata->msg_info.from, PJSIP_EMISSINGHDR); host = &rdata->msg_info.via->sent_by.host; - req_uri = (pjsip_uri*)rdata->msg_info.msg->line.req.uri; /* Calculate length required. */ len_required = 9 + /* CSeq number */ @@ -293,7 +280,6 @@ static pj_status_t create_tsx_key_2543( pj_pool_t *pool, 9 + /* Via port. */ 16; /* Separator+Allowance. */ key = p = (char*) pj_pool_alloc(pool, len_required); - end = p + len_required; /* Add role. */ *p++ = (char)(role==PJSIP_ROLE_UAC ? 'c' : 's'); @@ -451,13 +437,6 @@ PJ_DEF(pj_status_t) pjsip_tsx_layer_init_module(pjsip_endpoint *endpt) //timeout_timer_val.msec = (64 * pjsip_cfg()->tsx.t1) % 1000; timeout_timer_val = td_timer_val; - /* Initialize TLS ID for transaction lock. */ - status = pj_thread_local_alloc(&pjsip_tsx_lock_tls_id); - if (status != PJ_SUCCESS) - return status; - - pj_thread_local_set(pjsip_tsx_lock_tls_id, NULL); - /* * Initialize transaction layer structure. */ @@ -482,7 +461,7 @@ PJ_DEF(pj_status_t) pjsip_tsx_layer_init_module(pjsip_endpoint *endpt) return PJ_ENOMEM; } - /* Create mutex. */ + /* Create group lock. */ status = pj_mutex_create_recursive(pool, "tsxlayer", &mod_tsx_layer.mutex); if (status != PJ_SUCCESS) { pjsip_endpt_release_pool(endpt, pool); @@ -665,8 +644,10 @@ PJ_DEF(pjsip_transaction*) pjsip_tsx_layer_find_tsx( const pj_str_t *key, * Transaction may gets deleted before we have chance to lock it. */ PJ_TODO(FIX_RACE_CONDITION_HERE); + PJ_RACE_ME(5); + if (tsx && lock) - pj_mutex_lock(tsx->mutex); + pj_grp_lock_acquire(tsx->grp_lock); return tsx; } @@ -711,7 +692,7 @@ static pj_status_t mod_tsx_layer_stop(void) if (tsx) { pjsip_tsx_terminate(tsx, PJSIP_SC_SERVICE_UNAVAILABLE); mod_tsx_layer_unregister_tsx(tsx); - tsx_destroy(tsx); + tsx_shutdown(tsx); } it = next; } @@ -735,9 +716,6 @@ static void tsx_layer_destroy(pjsip_endpoint *endpt) /* Release pool. */ pjsip_endpt_release_pool(mod_tsx_layer.endpt, mod_tsx_layer.pool); - /* Free TLS */ - pj_thread_local_free(pjsip_tsx_lock_tls_id); - /* Mark as unregistered. */ mod_tsx_layer.endpt = NULL; @@ -813,6 +791,7 @@ static pj_bool_t mod_tsx_layer_on_rx_request(pjsip_rx_data *rdata) * in pjsip_tsx_recv_msg(). */ PJ_TODO(FIX_RACE_CONDITION_HERE); + PJ_RACE_ME(5); /* Pass the message to the transaction. */ pjsip_tsx_recv_msg(tsx, rdata ); @@ -862,6 +841,7 @@ static pj_bool_t mod_tsx_layer_on_rx_response(pjsip_rx_data *rdata) * in pjsip_tsx_recv_msg(). */ PJ_TODO(FIX_RACE_CONDITION_HERE); + PJ_RACE_ME(5); /* Pass the message to the transaction. */ pjsip_tsx_recv_msg(tsx, rdata ); @@ -928,43 +908,6 @@ PJ_DEF(void) pjsip_tsx_layer_dump(pj_bool_t detail) ** ***************************************************************************** **/ -/* - * Lock transaction and set the value of Thread Local Storage. - */ -static void lock_tsx(pjsip_transaction *tsx, struct tsx_lock_data *lck) -{ - struct tsx_lock_data *prev_data; - - pj_mutex_lock(tsx->mutex); - prev_data = (struct tsx_lock_data *) - pj_thread_local_get(pjsip_tsx_lock_tls_id); - lck->prev = prev_data; - lck->tsx = tsx; - lck->is_alive = 1; - pj_thread_local_set(pjsip_tsx_lock_tls_id, lck); -} - - -/* - * Unlock transaction. - * This will selectively unlock the mutex ONLY IF the transaction has not been - * destroyed. The function knows whether the transaction has been destroyed - * because when transaction is destroyed the is_alive flag for the transaction - * will be set to zero. - */ -static pj_status_t unlock_tsx( pjsip_transaction *tsx, - struct tsx_lock_data *lck) -{ - pj_assert( (void*)pj_thread_local_get(pjsip_tsx_lock_tls_id) == lck); - pj_assert( lck->tsx == tsx ); - pj_thread_local_set(pjsip_tsx_lock_tls_id, lck->prev); - if (lck->is_alive) - pj_mutex_unlock(tsx->mutex); - - return lck->is_alive ? PJ_SUCCESS : PJSIP_ETSXDESTROYED; -} - - /* Lock transaction for accessing the timeout timer only. */ static void lock_timer(pjsip_transaction *tsx) { @@ -981,6 +924,7 @@ static void unlock_timer(pjsip_transaction *tsx) * This function is called by both UAC and UAS creation. */ static pj_status_t tsx_create( pjsip_module *tsx_user, + pj_grp_lock_t *grp_lock, pjsip_transaction **p_tsx) { pj_pool_t *pool; @@ -1009,16 +953,22 @@ static pj_status_t tsx_create( pjsip_module *tsx_user, tsx->timeout_timer.user_data = tsx; tsx->timeout_timer.cb = &tsx_timer_callback; - status = pj_mutex_create_recursive(pool, tsx->obj_name, &tsx->mutex); - if (status != PJ_SUCCESS) { - pjsip_endpt_release_pool(mod_tsx_layer.endpt, pool); - return status; + if (grp_lock) { + tsx->grp_lock = grp_lock; + } else { + status = pj_grp_lock_create(pool, NULL, &tsx->grp_lock); + if (status != PJ_SUCCESS) { + pjsip_endpt_release_pool(mod_tsx_layer.endpt, pool); + return status; + } } + pj_grp_lock_add_ref(tsx->grp_lock); + pj_grp_lock_add_handler(tsx->grp_lock, tsx->pool, tsx, &tsx_on_destroy); + status = pj_mutex_create_simple(pool, tsx->obj_name, &tsx->mutex_b); if (status != PJ_SUCCESS) { - pj_mutex_destroy(tsx->mutex); - pjsip_endpt_release_pool(mod_tsx_layer.endpt, pool); + tsx_shutdown(tsx); return status; } @@ -1026,17 +976,28 @@ static pj_status_t tsx_create( pjsip_module *tsx_user, return PJ_SUCCESS; } - -/* Destroy transaction. */ -static pj_status_t tsx_destroy( pjsip_transaction *tsx ) +/* Really destroy transaction, when grp_lock reference is zero */ +static void tsx_on_destroy( void *arg ) { - struct tsx_lock_data *lck; + pjsip_transaction *tsx = (pjsip_transaction*)arg; + + PJ_LOG(5,(tsx->obj_name, "Transaction destroyed!")); + + pj_mutex_destroy(tsx->mutex_b); + pjsip_endpt_release_pool(tsx->endpt, tsx->pool); +} +/* Shutdown transaction. */ +static pj_status_t tsx_shutdown( pjsip_transaction *tsx ) +{ /* Release the transport */ tsx_update_transport(tsx, NULL); - /* Decrement reference counter in transport selector */ - pjsip_tpselector_dec_ref(&tsx->tp_sel); + /* Decrement reference counter in transport selector, only if + * we haven't been called before */ + if (!tsx->terminating) { + pjsip_tpselector_dec_ref(&tsx->tp_sel); + } /* Free last transmitted message. */ if (tsx->last_tx) { @@ -1057,30 +1018,21 @@ static pj_status_t tsx_destroy( pjsip_transaction *tsx ) /* Clear some pending flags. */ tsx->transport_flag &= ~(TSX_HAS_PENDING_RESCHED | TSX_HAS_PENDING_SEND); + /* Refuse to destroy transaction if it has pending resolving. */ if (tsx->transport_flag & TSX_HAS_PENDING_TRANSPORT) { tsx->transport_flag |= TSX_HAS_PENDING_DESTROY; tsx->tsx_user = NULL; PJ_LOG(4,(tsx->obj_name, "Will destroy later because transport is " "in progress")); - return PJ_EBUSY; } - /* Clear TLS, so that mutex will not be unlocked */ - lck = (struct tsx_lock_data*) pj_thread_local_get(pjsip_tsx_lock_tls_id); - while (lck) { - if (lck->tsx == tsx) { - lck->is_alive = 0; - } - lck = lck->prev; + if (!tsx->terminating) { + tsx->terminating = PJ_TRUE; + pj_grp_lock_dec_ref(tsx->grp_lock); } - pj_mutex_destroy(tsx->mutex_b); - pj_mutex_destroy(tsx->mutex); - - PJ_LOG(5,(tsx->obj_name, "Transaction destroyed!")); - - pjsip_endpt_release_pool(tsx->endpt, tsx->pool); + /* No acccess to tsx after this, it may have been destroyed */ return PJ_SUCCESS; } @@ -1093,7 +1045,6 @@ static void tsx_timer_callback( pj_timer_heap_t *theap, pj_timer_entry *entry) { pjsip_event event; pjsip_transaction *tsx = (pjsip_transaction*) entry->user_data; - struct tsx_lock_data lck; PJ_UNUSED_ARG(theap); @@ -1107,9 +1058,9 @@ static void tsx_timer_callback( pj_timer_heap_t *theap, pj_timer_entry *entry) PJSIP_EVENT_INIT_TIMER(event, entry); /* Dispatch event to transaction. */ - lock_tsx(tsx, &lck); + pj_grp_lock_acquire(tsx->grp_lock); (*tsx->state_handler)(tsx, &event); - unlock_tsx(tsx, &lck); + pj_grp_lock_release(tsx->grp_lock); pj_log_pop_indent(); } @@ -1208,7 +1159,7 @@ static void tsx_set_state( pjsip_transaction *tsx, mod_tsx_layer_unregister_tsx(tsx); /* Destroy transaction. */ - tsx_destroy(tsx); + tsx_shutdown(tsx); } pj_log_pop_indent(); @@ -1221,13 +1172,20 @@ static void tsx_set_state( pjsip_transaction *tsx, PJ_DEF(pj_status_t) pjsip_tsx_create_uac( pjsip_module *tsx_user, pjsip_tx_data *tdata, pjsip_transaction **p_tsx) +{ + return pjsip_tsx_create_uac2(tsx_user, tdata, NULL, p_tsx); +} + +PJ_DEF(pj_status_t) pjsip_tsx_create_uac2(pjsip_module *tsx_user, + pjsip_tx_data *tdata, + pj_grp_lock_t *grp_lock, + pjsip_transaction **p_tsx) { pjsip_transaction *tsx; pjsip_msg *msg; pjsip_cseq_hdr *cseq; pjsip_via_hdr *via; pjsip_host_info dst_info; - struct tsx_lock_data lck; pj_status_t status; /* Validate arguments. */ @@ -1251,13 +1209,13 @@ PJ_DEF(pj_status_t) pjsip_tsx_create_uac( pjsip_module *tsx_user, /* Create transaction instance. */ - status = tsx_create( tsx_user, &tsx); + status = tsx_create( tsx_user, grp_lock, &tsx); if (status != PJ_SUCCESS) return status; /* Lock transaction. */ - lock_tsx(tsx, &lck); + pj_grp_lock_acquire(tsx->grp_lock); /* Role is UAC. */ tsx->role = PJSIP_ROLE_UAC; @@ -1323,8 +1281,8 @@ PJ_DEF(pj_status_t) pjsip_tsx_create_uac( pjsip_module *tsx_user, */ status = pjsip_get_request_dest(tdata, &dst_info); if (status != PJ_SUCCESS) { - unlock_tsx(tsx, &lck); - tsx_destroy(tsx); + pj_grp_lock_release(tsx->grp_lock); + tsx_shutdown(tsx); return status; } tsx->is_reliable = (dst_info.flag & PJSIP_TRANSPORT_RELIABLE); @@ -1335,14 +1293,14 @@ PJ_DEF(pj_status_t) pjsip_tsx_create_uac( pjsip_module *tsx_user, /* The assertion is removed by #1090: pj_assert(!"Bug in branch_param generator (i.e. not unique)"); */ - unlock_tsx(tsx, &lck); - tsx_destroy(tsx); + pj_grp_lock_release(tsx->grp_lock); + tsx_shutdown(tsx); return status; } /* Unlock transaction and return. */ - unlock_tsx(tsx, &lck); + pj_grp_lock_release(tsx->grp_lock); pj_log_push_indent(); PJ_LOG(5,(tsx->obj_name, "Transaction created for %s", @@ -1360,13 +1318,20 @@ PJ_DEF(pj_status_t) pjsip_tsx_create_uac( pjsip_module *tsx_user, PJ_DEF(pj_status_t) pjsip_tsx_create_uas( pjsip_module *tsx_user, pjsip_rx_data *rdata, pjsip_transaction **p_tsx) +{ + return pjsip_tsx_create_uas2(tsx_user, rdata, NULL, p_tsx); +} + +PJ_DEF(pj_status_t) pjsip_tsx_create_uas2(pjsip_module *tsx_user, + pjsip_rx_data *rdata, + pj_grp_lock_t *grp_lock, + pjsip_transaction **p_tsx) { pjsip_transaction *tsx; pjsip_msg *msg; pj_str_t *branch; pjsip_cseq_hdr *cseq; pj_status_t status; - struct tsx_lock_data lck; /* Validate arguments. */ PJ_ASSERT_RETURN(rdata && rdata->msg_info.msg && p_tsx, PJ_EINVAL); @@ -1404,13 +1369,13 @@ PJ_DEF(pj_status_t) pjsip_tsx_create_uas( pjsip_module *tsx_user, /* * Create transaction instance. */ - status = tsx_create( tsx_user, &tsx); + status = tsx_create( tsx_user, grp_lock, &tsx); if (status != PJ_SUCCESS) return status; /* Lock transaction. */ - lock_tsx(tsx, &lck); + pj_grp_lock_acquire(tsx->grp_lock); /* Role is UAS */ tsx->role = PJSIP_ROLE_UAS; @@ -1427,8 +1392,8 @@ PJ_DEF(pj_status_t) pjsip_tsx_create_uas( pjsip_module *tsx_user, status = pjsip_tsx_create_key(tsx->pool, &tsx->transaction_key, PJSIP_ROLE_UAS, &tsx->method, rdata); if (status != PJ_SUCCESS) { - unlock_tsx(tsx, &lck); - tsx_destroy(tsx); + pj_grp_lock_release(tsx->grp_lock); + tsx_shutdown(tsx); return status; } @@ -1454,8 +1419,8 @@ PJ_DEF(pj_status_t) pjsip_tsx_create_uas( pjsip_module *tsx_user, /* Get response address. */ status = pjsip_get_response_addr( tsx->pool, rdata, &tsx->res_addr ); if (status != PJ_SUCCESS) { - unlock_tsx(tsx, &lck); - tsx_destroy(tsx); + pj_grp_lock_release(tsx->grp_lock); + tsx_shutdown(tsx); return status; } @@ -1476,8 +1441,8 @@ PJ_DEF(pj_status_t) pjsip_tsx_create_uas( pjsip_module *tsx_user, /* Register the transaction. */ status = mod_tsx_layer_register_tsx(tsx); if (status != PJ_SUCCESS) { - unlock_tsx(tsx, &lck); - tsx_destroy(tsx); + pj_grp_lock_release(tsx->grp_lock); + tsx_shutdown(tsx); return status; } @@ -1485,7 +1450,7 @@ PJ_DEF(pj_status_t) pjsip_tsx_create_uas( pjsip_module *tsx_user, rdata->endpt_info.mod_data[mod_tsx_layer.mod.id] = tsx; /* Unlock transaction and return. */ - unlock_tsx(tsx, &lck); + pj_grp_lock_release(tsx->grp_lock); pj_log_push_indent(); PJ_LOG(5,(tsx->obj_name, "Transaction created for %s", @@ -1504,13 +1469,11 @@ PJ_DEF(pj_status_t) pjsip_tsx_create_uas( pjsip_module *tsx_user, PJ_DEF(pj_status_t) pjsip_tsx_set_transport(pjsip_transaction *tsx, const pjsip_tpselector *sel) { - struct tsx_lock_data lck; - /* Must be UAC transaction */ PJ_ASSERT_RETURN(tsx && sel, PJ_EINVAL); /* Start locking the transaction. */ - lock_tsx(tsx, &lck); + pj_grp_lock_acquire(tsx->grp_lock); /* Decrement reference counter of previous transport selector */ pjsip_tpselector_dec_ref(&tsx->tp_sel); @@ -1522,7 +1485,7 @@ PJ_DEF(pj_status_t) pjsip_tsx_set_transport(pjsip_transaction *tsx, pjsip_tpselector_add_ref(&tsx->tp_sel); /* Unlock transaction. */ - unlock_tsx(tsx, &lck); + pj_grp_lock_release(tsx->grp_lock); return PJ_SUCCESS; } @@ -1547,8 +1510,6 @@ static void tsx_set_status_code(pjsip_transaction *tsx, */ PJ_DEF(pj_status_t) pjsip_tsx_terminate( pjsip_transaction *tsx, int code ) { - struct tsx_lock_data lck; - PJ_ASSERT_RETURN(tsx != NULL, PJ_EINVAL); PJ_LOG(5,(tsx->obj_name, "Request to terminate transaction")); @@ -1560,10 +1521,10 @@ PJ_DEF(pj_status_t) pjsip_tsx_terminate( pjsip_transaction *tsx, int code ) pj_log_push_indent(); - lock_tsx(tsx, &lck); + pj_grp_lock_acquire(tsx->grp_lock); tsx_set_status_code(tsx, code, NULL); tsx_set_state( tsx, PJSIP_TSX_STATE_TERMINATED, PJSIP_EVENT_USER, NULL); - unlock_tsx(tsx, &lck); + pj_grp_lock_release(tsx->grp_lock); pj_log_pop_indent(); @@ -1578,8 +1539,6 @@ PJ_DEF(pj_status_t) pjsip_tsx_terminate( pjsip_transaction *tsx, int code ) */ PJ_DEF(pj_status_t) pjsip_tsx_stop_retransmit(pjsip_transaction *tsx) { - struct tsx_lock_data lck; - PJ_ASSERT_RETURN(tsx != NULL, PJ_EINVAL); PJ_ASSERT_RETURN(tsx->role == PJSIP_ROLE_UAC && tsx->method.id == PJSIP_INVITE_METHOD, @@ -1589,13 +1548,13 @@ PJ_DEF(pj_status_t) pjsip_tsx_stop_retransmit(pjsip_transaction *tsx) pj_log_push_indent(); - lock_tsx(tsx, &lck); + pj_grp_lock_acquire(tsx->grp_lock); /* Cancel retransmission timer. */ if (tsx->retransmit_timer.id != 0) { pjsip_endpt_cancel_timer(tsx->endpt, &tsx->retransmit_timer); tsx->retransmit_timer.id = 0; } - unlock_tsx(tsx, &lck); + pj_grp_lock_release(tsx->grp_lock); pj_log_pop_indent(); @@ -1617,8 +1576,8 @@ PJ_DEF(pj_status_t) pjsip_tsx_set_timeout( pjsip_transaction *tsx, tsx->method.id == PJSIP_INVITE_METHOD, PJ_EINVALIDOP); - /* Note: must not call lock_tsx() as that would introduce deadlock. - * See #1121. + /* Note: must not call pj_grp_lock_acquire(tsx->grp_lock) as + * that would introduce deadlock. See #1121. */ lock_timer(tsx); @@ -1659,7 +1618,6 @@ PJ_DEF(pj_status_t) pjsip_tsx_send_msg( pjsip_transaction *tsx, pjsip_tx_data *tdata ) { pjsip_event event; - struct tsx_lock_data lck; pj_status_t status; if (tdata == NULL) @@ -1675,7 +1633,7 @@ PJ_DEF(pj_status_t) pjsip_tsx_send_msg( pjsip_transaction *tsx, PJSIP_EVENT_INIT_TX_MSG(event, tdata); /* Dispatch to transaction. */ - lock_tsx(tsx, &lck); + pj_grp_lock_acquire(tsx->grp_lock); /* Set transport selector to tdata */ pjsip_tx_data_set_transport(tdata, &tsx->tp_sel); @@ -1683,7 +1641,7 @@ PJ_DEF(pj_status_t) pjsip_tsx_send_msg( pjsip_transaction *tsx, /* Dispatch to state handler */ status = (*tsx->state_handler)(tsx, &event); - unlock_tsx(tsx, &lck); + pj_grp_lock_release(tsx->grp_lock); /* Only decrement reference counter when it returns success. * (This is the specification from the .PDF design document). @@ -1706,8 +1664,6 @@ PJ_DEF(void) pjsip_tsx_recv_msg( pjsip_transaction *tsx, pjsip_rx_data *rdata) { pjsip_event event; - struct tsx_lock_data lck; - pj_status_t status; PJ_LOG(5,(tsx->obj_name, "Incoming %s in state %s", pjsip_rx_data_get_info(rdata), state_str[tsx->state])); @@ -1720,9 +1676,9 @@ PJ_DEF(void) pjsip_tsx_recv_msg( pjsip_transaction *tsx, PJSIP_EVENT_INIT_RX_MSG(event, rdata); /* Dispatch to transaction. */ - lock_tsx(tsx, &lck); - status = (*tsx->state_handler)(tsx, &event); - unlock_tsx(tsx, &lck); + pj_grp_lock_acquire(tsx->grp_lock); + (*tsx->state_handler)(tsx, &event); + pj_grp_lock_release(tsx->grp_lock); pj_log_pop_indent(); } @@ -1734,7 +1690,6 @@ static void send_msg_callback( pjsip_send_state *send_state, { pjsip_transaction *tsx = (pjsip_transaction*) send_state->token; pjsip_tx_data *tdata = send_state->tdata; - struct tsx_lock_data lck; /* Check if transaction has cancelled itself from this transmit * notification (https://trac.pjsip.org/repos/ticket/1033). @@ -1748,12 +1703,12 @@ static void send_msg_callback( pjsip_send_state *send_state, return; } + pj_grp_lock_acquire(tsx->grp_lock); + /* Reset */ tdata->mod_data[mod_tsx_layer.mod.id] = NULL; tsx->pending_tx = NULL; - lock_tsx(tsx, &lck); - if (sent > 0) { /* Successfully sent! */ pj_assert(send_state->cur_transport != NULL); @@ -1782,7 +1737,7 @@ static void send_msg_callback( pjsip_send_state *send_state, if (tsx->transport_flag & TSX_HAS_PENDING_DESTROY) { tsx_set_state( tsx, PJSIP_TSX_STATE_DESTROYED, PJSIP_EVENT_UNKNOWN, NULL ); - unlock_tsx(tsx, &lck); + pj_grp_lock_release(tsx->grp_lock); return; } @@ -1824,7 +1779,7 @@ static void send_msg_callback( pjsip_send_state *send_state, err =pj_strerror(-sent, errmsg, sizeof(errmsg)); - PJ_LOG(2,(tsx->obj_name, + PJ_LOG(2,(tsx->obj_name, "Failed to send %s! err=%d (%s)", pjsip_tx_data_get_info(send_state->tdata), -sent, errmsg)); @@ -1862,13 +1817,10 @@ static void send_msg_callback( pjsip_send_state *send_state, } } else { - char errmsg[PJ_ERR_MSG_SIZE]; - - PJ_LOG(2,(tsx->obj_name, - "Temporary failure in sending %s, " - "will try next server. Err=%d (%s)", - pjsip_tx_data_get_info(send_state->tdata), -sent, - pj_strerror(-sent, errmsg, sizeof(errmsg)).ptr)); + PJ_PERROR(2,(tsx->obj_name, -sent, + "Temporary failure in sending %s, " + "will try next server", + pjsip_tx_data_get_info(send_state->tdata))); /* Reset retransmission count */ tsx->retransmit_count = 0; @@ -1893,7 +1845,7 @@ static void send_msg_callback( pjsip_send_state *send_state, } } - unlock_tsx(tsx, &lck); + pj_grp_lock_release(tsx->grp_lock); } @@ -1903,7 +1855,6 @@ static void transport_callback(void *token, pjsip_tx_data *tdata, { if (sent < 0) { pjsip_transaction *tsx = (pjsip_transaction*) token; - struct tsx_lock_data lck; char errmsg[PJ_ERR_MSG_SIZE]; pj_str_t err; @@ -1912,9 +1863,9 @@ static void transport_callback(void *token, pjsip_tx_data *tdata, err = pj_strerror(-sent, errmsg, sizeof(errmsg)); PJ_LOG(2,(tsx->obj_name, "Transport failed to send %s! Err=%d (%s)", - pjsip_tx_data_get_info(tdata), -sent, errmsg)); + pjsip_tx_data_get_info(tdata), -sent, errmsg)); - lock_tsx(tsx, &lck); + pj_grp_lock_acquire(tsx->grp_lock); /* Release transport. */ tsx_update_transport(tsx, NULL); @@ -1924,7 +1875,7 @@ static void transport_callback(void *token, pjsip_tx_data *tdata, tsx_set_state( tsx, PJSIP_TSX_STATE_TERMINATED, PJSIP_EVENT_TRANSPORT_ERROR, tdata ); - unlock_tsx(tsx, &lck); + pj_grp_lock_release(tsx->grp_lock); } } @@ -1940,13 +1891,12 @@ static void tsx_tp_state_callback( pjsip_transport *tp, if (state == PJSIP_TP_STATE_DISCONNECTED) { pjsip_transaction *tsx; - struct tsx_lock_data lck; pj_assert(tp && info && info->user_data); tsx = (pjsip_transaction*)info->user_data; - lock_tsx(tsx, &lck); + pj_grp_lock_acquire(tsx->grp_lock); /* Terminate transaction when transport disconnected */ if (tsx->state < PJSIP_TSX_STATE_TERMINATED) { @@ -1959,7 +1909,7 @@ static void tsx_tp_state_callback( pjsip_transport *tp, PJSIP_EVENT_TRANSPORT_ERROR, NULL); } - unlock_tsx(tsx, &lck); + pj_grp_lock_release(tsx->grp_lock); } } @@ -1991,12 +1941,9 @@ static pj_status_t tsx_send_msg( pjsip_transaction *tsx, status = PJ_SUCCESS; if (status != PJ_SUCCESS) { - char errmsg[PJ_ERR_MSG_SIZE]; - - PJ_LOG(2,(tsx->obj_name, - "Error sending %s: Err=%d (%s)", - pjsip_tx_data_get_info(tdata), status, - pj_strerror(status, errmsg, sizeof(errmsg)).ptr)); + PJ_PERROR(2,(tsx->obj_name, status, + "Error sending %s", + pjsip_tx_data_get_info(tdata))); /* On error, release transport to force using full transport * resolution procedure. @@ -2109,15 +2056,14 @@ static pj_status_t tsx_send_msg( pjsip_transaction *tsx, PJ_DEF(pj_status_t) pjsip_tsx_retransmit_no_state(pjsip_transaction *tsx, pjsip_tx_data *tdata) { - struct tsx_lock_data lck; pj_status_t status; - lock_tsx(tsx, &lck); + pj_grp_lock_acquire(tsx->grp_lock); if (tdata == NULL) { tdata = tsx->last_tx; } status = tsx_send_msg(tsx, tdata); - unlock_tsx(tsx, &lck); + pj_grp_lock_release(tsx->grp_lock); /* Only decrement reference counter when it returns success. * (This is the specification from the .PDF design document). diff --git a/pjsip/src/pjsip/sip_ua_layer.c b/pjsip/src/pjsip/sip_ua_layer.c index 1279d738..073b25e3 100644 --- a/pjsip/src/pjsip/sip_ua_layer.c +++ b/pjsip/src/pjsip/sip_ua_layer.c @@ -545,7 +545,7 @@ static struct dlg_set *find_dlg_set_for_msg( pjsip_rx_data *rdata ) /* We should find the dialog attached to the INVITE transaction */ if (tsx) { dlg = (pjsip_dialog*) tsx->mod_data[mod_ua.mod.id]; - pj_mutex_unlock(tsx->mutex); + pj_grp_lock_release(tsx->grp_lock); /* Dlg may be NULL on some extreme condition * (e.g. during debugging where initially there is a dialog) diff --git a/pjsip/src/test/regc_test.c b/pjsip/src/test/regc_test.c index 7a5b8065..17cb399a 100644 --- a/pjsip/src/test/regc_test.c +++ b/pjsip/src/test/regc_test.c @@ -347,7 +347,10 @@ static int do_test(const char *title, client_cfg->error, client_result.error)); return -210; } - if (client_result.code != client_cfg->code) { + if (client_result.code != client_cfg->code && + client_cfg->code != 502 && client_cfg->code != 503 && + client_result.code != 502 && client_result.code != 503) + { PJ_LOG(3,(THIS_FILE, " error: expecting code=%d, got code=%d", client_cfg->code, client_result.code)); return -220; diff --git a/pjsip/src/test/test.c b/pjsip/src/test/test.c index 8ffe5664..0361e617 100644 --- a/pjsip/src/test/test.c +++ b/pjsip/src/test/test.c @@ -47,9 +47,10 @@ pjsip_endpoint *endpt; +pj_caching_pool caching_pool; int log_level = 3; int param_log_decor = PJ_LOG_HAS_NEWLINE | PJ_LOG_HAS_TIME | - PJ_LOG_HAS_MICRO_SEC; + PJ_LOG_HAS_MICRO_SEC | PJ_LOG_HAS_INDENT; static pj_oshandle_t fd_report; const char *system_name = "Unknown"; @@ -223,7 +224,6 @@ static void close_report(void) int test_main(void) { pj_status_t rc; - pj_caching_pool caching_pool; const char *filename; unsigned tsx_test_cnt=0; struct tsx_test_param tsx_test[10]; @@ -369,6 +369,12 @@ int test_main(void) DO_TEST(regc_test()); #endif + /* + * Better be last because it recreates the endpt + */ +#if INCLUDE_TSX_DESTROY_TEST + DO_TEST(tsx_destroy_test()); +#endif on_return: flush_events(500); diff --git a/pjsip/src/test/test.h b/pjsip/src/test/test.h index 358f9302..690598b9 100644 --- a/pjsip/src/test/test.h +++ b/pjsip/src/test/test.h @@ -23,6 +23,7 @@ #include extern pjsip_endpoint *endpt; +extern pj_caching_pool caching_pool; #define TEST_UDP_PORT 15060 #define TEST_UDP_PORT_STR "15060" @@ -64,6 +65,7 @@ extern pjsip_endpoint *endpt; #define INCLUDE_TCP_TEST INCLUDE_TRANSPORT_GROUP #define INCLUDE_RESOLVE_TEST INCLUDE_TRANSPORT_GROUP #define INCLUDE_TSX_TEST INCLUDE_TSX_GROUP +#define INCLUDE_TSX_DESTROY_TEST INCLUDE_TSX_GROUP #define INCLUDE_INV_OA_TEST INCLUDE_INV_GROUP #define INCLUDE_REGC_TEST INCLUDE_REGC_GROUP @@ -75,6 +77,7 @@ int msg_err_test(void); int multipart_test(void); int txdata_test(void); int tsx_bench(void); +int tsx_destroy_test(void); int transport_udp_test(void); int transport_loop_test(void); int transport_tcp_test(void); diff --git a/pjsip/src/test/tsx_basic_test.c b/pjsip/src/test/tsx_basic_test.c index 163c3b9f..2de73d75 100644 --- a/pjsip/src/test/tsx_basic_test.c +++ b/pjsip/src/test/tsx_basic_test.c @@ -125,7 +125,7 @@ static int double_terminate(void) app_perror(" error: unable to terminate transaction", status); return -40; } - pj_mutex_unlock(tsx->mutex); + pj_grp_lock_release(tsx->grp_lock); } flush_events(500); @@ -155,3 +155,189 @@ int tsx_basic_test(struct tsx_test_param *param) return 0; } + +/**************************************************************************/ + +struct tsx_test_state +{ + int pool_cnt; +}; + +static void save_tsx_test_state(struct tsx_test_state *st) +{ + st->pool_cnt = caching_pool.used_count; +} + +static pj_status_t check_tsx_test_state(struct tsx_test_state *st) +{ + if (caching_pool.used_count > st->pool_cnt) + return -1; + + return 0; +} + +static void destroy_endpt() +{ + pjsip_endpt_destroy(endpt); + endpt = NULL; +} + +static pj_status_t init_endpt() +{ + pj_str_t ns = { "10.187.27.172", 13}; /* just a random, unreachable IP */ + pj_dns_resolver *resolver; + pj_status_t rc; + + rc = pjsip_endpt_create(&caching_pool.factory, "endpt", &endpt); + if (rc != PJ_SUCCESS) { + app_perror("pjsip_endpt_create", rc); + return rc; + } + + /* Start transaction layer module. */ + rc = pjsip_tsx_layer_init_module(endpt); + if (rc != PJ_SUCCESS) { + app_perror("tsx_layer_init", rc); + return rc; + } + + rc = pjsip_udp_transport_start(endpt, NULL, NULL, 1, NULL); + if (rc != PJ_SUCCESS) { + app_perror("udp init", rc); + return rc; + } + + rc = pjsip_tcp_transport_start(endpt, NULL, 1, NULL); + if (rc != PJ_SUCCESS) { + app_perror("tcp init", rc); + return rc; + } + + rc = pjsip_endpt_create_resolver(endpt, &resolver); + if (rc != PJ_SUCCESS) { + app_perror("create resolver", rc); + return rc; + } + + pj_dns_resolver_set_ns(resolver, 1, &ns, NULL); + + rc = pjsip_endpt_set_resolver(endpt, resolver); + if (rc != PJ_SUCCESS) { + app_perror("set resolver", rc); + return rc; + } + + return PJ_SUCCESS; +} + +static int tsx_create_and_send_req(void *arg) +{ + pj_str_t dst_uri = pj_str((char*)arg); + pj_str_t from_uri = pj_str((char*)""); + pjsip_tx_data *tdata; + pj_status_t status; + + status = pjsip_endpt_create_request(endpt, &pjsip_options_method, + &dst_uri, &from_uri, &dst_uri, + NULL, NULL, -1, NULL, + &tdata); + if (status != PJ_SUCCESS) + return status; + + status = pjsip_endpt_send_request(endpt, tdata, -1, NULL, NULL); + if (status != PJ_SUCCESS) + return status; + + return PJ_SUCCESS; +} + +int tsx_destroy_test() +{ + struct tsx_test_state state; + struct test_desc + { + const char *title; + int (*func)(void*); + void *arg; + int sleep_before_unload; + int sleep_after_unload; + } test_entries[] = + { + { + "normal unable to resolve", + &tsx_create_and_send_req, + "sip:user@somehost", + 10000, + 1 + }, + { + "resolve and destroy, wait", + &tsx_create_and_send_req, + "sip:user@somehost", + 1, + 10000 + }, + { + "tcp connect and destroy", + &tsx_create_and_send_req, + "sip:user@10.125.36.63:58517;transport=tcp", + 60000, + 1000 + }, + { + "tcp connect and destroy", + &tsx_create_and_send_req, + "sip:user@10.125.36.63:58517;transport=tcp", + 1, + 60000 + }, + + }; + int rc; + unsigned i; + const int INDENT = 2; + + pj_log_add_indent(INDENT); + destroy_endpt(); + + for (i=0; ititle)); + + pj_log_add_indent(INDENT); + save_tsx_test_state(&state); + + rc = init_endpt(); + if (rc != PJ_SUCCESS) { + pj_log_add_indent(-INDENT*2); + return -10; + } + + rc = td->func(td->arg); + if (rc != PJ_SUCCESS) { + pj_log_add_indent(-INDENT*2); + return -20; + } + + flush_events(td->sleep_before_unload); + pjsip_tsx_layer_destroy(); + flush_events(td->sleep_after_unload); + destroy_endpt(); + + rc = check_tsx_test_state(&state); + if (rc != PJ_SUCCESS) { + init_endpt(); + pj_log_add_indent(-INDENT*2); + return -30; + } + + pj_log_add_indent(-INDENT); + } + + init_endpt(); + + pj_log_add_indent(-INDENT); + return 0; +} + diff --git a/pjsip/src/test/tsx_bench.c b/pjsip/src/test/tsx_bench.c index 8ae57783..2282b6a9 100644 --- a/pjsip/src/test/tsx_bench.c +++ b/pjsip/src/test/tsx_bench.c @@ -79,8 +79,13 @@ static int uac_tsx_bench(unsigned working_set, pj_timestamp *p_elapsed) on_error: for (i=0; istate == PJSIP_TSX_STATE_TERMINATED) { /* Test the status code. */ - if (tsx->status_code != PJSIP_SC_TSX_TRANSPORT_ERROR) { + if (tsx->status_code != PJSIP_SC_TSX_TRANSPORT_ERROR && + tsx->status_code != PJSIP_SC_BAD_GATEWAY) + { PJ_LOG(3,(THIS_FILE, - " error: status code is %d instead of %d", - tsx->status_code, PJSIP_SC_TSX_TRANSPORT_ERROR)); + " error: status code is %d instead of %d or %d", + tsx->status_code, PJSIP_SC_TSX_TRANSPORT_ERROR, + PJSIP_SC_BAD_GATEWAY)); test_complete = -720; } @@ -688,7 +691,7 @@ static pj_bool_t msg_receiver_on_rx_request(pjsip_rx_data *rdata) tsx = pjsip_tsx_layer_find_tsx(&key, PJ_TRUE); if (tsx) { pjsip_tsx_terminate(tsx, PJSIP_SC_REQUEST_TERMINATED); - pj_mutex_unlock(tsx->mutex); + pj_grp_lock_release(tsx->grp_lock); } else { PJ_LOG(3,(THIS_FILE, " error: uac transaction not found!")); test_complete = -633; @@ -1027,7 +1030,7 @@ static int perform_tsx_test(int dummy, char *target_uri, char *from_uri, tsx = pjsip_tsx_layer_find_tsx(&tsx_key, PJ_TRUE); if (tsx) { pjsip_tsx_terminate(tsx, PJSIP_SC_REQUEST_TERMINATED); - pj_mutex_unlock(tsx->mutex); + pj_grp_lock_release(tsx->grp_lock); flush_events(1000); } pjsip_tx_data_dec_ref(tdata); diff --git a/pjsip/src/test/tsx_uas_test.c b/pjsip/src/test/tsx_uas_test.c index 973f3315..3c43f535 100644 --- a/pjsip/src/test/tsx_uas_test.c +++ b/pjsip/src/test/tsx_uas_test.c @@ -225,12 +225,12 @@ static void send_response_timer( pj_timer_heap_t *timer_heap, if (status != PJ_SUCCESS) { // Some tests do expect failure! //PJ_LOG(3,(THIS_FILE," error: timer unable to send response")); - pj_mutex_unlock(tsx->mutex); + pj_grp_lock_release(tsx->grp_lock); pjsip_tx_data_dec_ref(r->tdata); return; } - pj_mutex_unlock(tsx->mutex); + pj_grp_lock_release(tsx->grp_lock); } /* Utility to send response. */ @@ -313,7 +313,7 @@ static void terminate_our_tsx(int status_code) } pjsip_tsx_terminate(tsx, status_code); - pj_mutex_unlock(tsx->mutex); + pj_grp_lock_release(tsx->grp_lock); } #if 0 /* Unused for now */ @@ -1259,7 +1259,7 @@ static int perform_test( char *target_uri, char *from_uri, tsx = pjsip_tsx_layer_find_tsx(&tsx_key, PJ_TRUE); if (tsx) { pjsip_tsx_terminate(tsx, PJSIP_SC_REQUEST_TERMINATED); - pj_mutex_unlock(tsx->mutex); + pj_grp_lock_release(tsx->grp_lock); flush_events(1000); } pjsip_tx_data_dec_ref(tdata); -- cgit v1.2.3