From 1321a047fb6d74374f7948eda41ad5c84884daab Mon Sep 17 00:00:00 2001 From: Nanang Izzuddin Date: Thu, 19 Jun 2014 09:42:02 +0000 Subject: Fix #1773: Added group lock to SIP transport to avoid race condition between transport callback and destroy. git-svn-id: http://svn.pjsip.org/repos/pjproject/trunk@4862 74dad513-b988-da41-8d7b-12977e46ad98 --- pjsip/src/pjsip/sip_transport_tcp.c | 108 +++++++++++++++++----- pjsip/src/pjsip/sip_transport_tls.c | 179 ++++++++++++++++++++++++++---------- pjsip/src/pjsip/sip_transport_udp.c | 62 +++++++++---- 3 files changed, 260 insertions(+), 89 deletions(-) (limited to 'pjsip') diff --git a/pjsip/src/pjsip/sip_transport_tcp.c b/pjsip/src/pjsip/sip_transport_tcp.c index 70ae300e..1b55cdb0 100644 --- a/pjsip/src/pjsip/sip_transport_tcp.c +++ b/pjsip/src/pjsip/sip_transport_tcp.c @@ -61,6 +61,9 @@ struct tcp_listener pj_qos_type qos_type; pj_qos_params qos_params; pj_sockopt_params sockopt_params; + + /* Group lock to be used by TCP listener and ioqueue key */ + pj_grp_lock_t *grp_lock; }; @@ -115,6 +118,9 @@ struct tcp_transport /* Pending transmission list. */ struct delayed_tdata delayed_list; + + /* Group lock to be used by TCP transport and ioqueue key */ + pj_grp_lock_t *grp_lock; }; @@ -131,6 +137,9 @@ static pj_bool_t on_accept_complete(pj_activesock_t *asock, /* This callback is called by transport manager to destroy listener */ static pj_status_t lis_destroy(pjsip_tpfactory *factory); +/* Clean up listener resources (group lock handler) */ +static void lis_on_destroy(void *arg); + /* This callback is called by transport manager to create transport */ static pj_status_t lis_create_transport(pjsip_tpfactory *factory, pjsip_tpmgr *mgr, @@ -399,6 +408,17 @@ PJ_DEF(pj_status_t) pjsip_tcp_transport_start3( else asock_cfg.async_cnt = cfg->async_cnt; + /* Create group lock */ + status = pj_grp_lock_create(pool, NULL, &listener->grp_lock); + if (status != PJ_SUCCESS) + return status; + + pj_grp_lock_add_ref(listener->grp_lock); + pj_grp_lock_add_handler(listener->grp_lock, pool, listener, + &lis_on_destroy); + + asock_cfg.grp_lock = listener->grp_lock; + pj_bzero(&listener_cb, sizeof(listener_cb)); listener_cb.on_accept_complete = &on_accept_complete; status = pj_activesock_create(pool, sock, pj_SOCK_STREAM(), &asock_cfg, @@ -485,6 +505,27 @@ PJ_DEF(pj_status_t) pjsip_tcp_transport_start( pjsip_endpoint *endpt, } +/* Clean up listener resources */ +static void lis_on_destroy(void *arg) +{ + struct tcp_listener *listener = (struct tcp_listener *)arg; + + if (listener->factory.lock) { + pj_lock_destroy(listener->factory.lock); + listener->factory.lock = NULL; + } + + if (listener->factory.pool) { + pj_pool_t *pool = listener->factory.pool; + + PJ_LOG(4,(listener->factory.obj_name, "SIP TCP listener destroyed")); + + listener->factory.pool = NULL; + pj_pool_release(pool); + } +} + + /* This callback is called by transport manager to destroy listener */ static pj_status_t lis_destroy(pjsip_tpfactory *factory) { @@ -500,18 +541,13 @@ static pj_status_t lis_destroy(pjsip_tpfactory *factory) listener->asock = NULL; } - if (listener->factory.lock) { - pj_lock_destroy(listener->factory.lock); - listener->factory.lock = NULL; - } - - if (listener->factory.pool) { - pj_pool_t *pool = listener->factory.pool; - - PJ_LOG(4,(listener->factory.obj_name, "SIP TCP listener destroyed")); - - listener->factory.pool = NULL; - pj_pool_release(pool); + if (listener->grp_lock) { + pj_grp_lock_t *grp_lock = listener->grp_lock; + listener->grp_lock = NULL; + pj_grp_lock_dec_ref(grp_lock); + /* Listener may have been deleted at this point */ + } else { + lis_on_destroy(listener); } return PJ_SUCCESS; @@ -563,6 +599,9 @@ static pj_bool_t on_connect_complete(pj_activesock_t *asock, /* TCP keep-alive timer callback */ static void tcp_keep_alive_timer(pj_timer_heap_t *th, pj_timer_entry *e); +/* Clean up TCP resources */ +static void tcp_on_destroy(void *arg); + /* * Common function to create TCP transport, called when pending accept() and * pending connect() complete. @@ -640,9 +679,18 @@ static pj_status_t tcp_create( struct tcp_listener *listener, tcp->base.do_shutdown = &tcp_shutdown; tcp->base.destroy = &tcp_destroy_transport; + /* Create group lock */ + status = pj_grp_lock_create(pool, NULL, &tcp->grp_lock); + if (status != PJ_SUCCESS) + goto on_error; + + pj_grp_lock_add_ref(tcp->grp_lock); + pj_grp_lock_add_handler(tcp->grp_lock, pool, tcp, &tcp_on_destroy); + /* Create active socket */ pj_activesock_cfg_default(&asock_cfg); asock_cfg.async_cnt = 1; + asock_cfg.grp_lock = tcp->grp_lock; pj_bzero(&tcp_callback, sizeof(tcp_callback)); tcp_callback.on_data_read = &on_data_read; @@ -780,11 +828,6 @@ static pj_status_t tcp_destroy(pjsip_transport *transport, on_data_sent(tcp->asock, op_key, -reason); } - if (tcp->rdata.tp_info.pool) { - pj_pool_release(tcp->rdata.tp_info.pool); - tcp->rdata.tp_info.pool = NULL; - } - if (tcp->asock) { pj_activesock_close(tcp->asock); tcp->asock = NULL; @@ -794,6 +837,23 @@ static pj_status_t tcp_destroy(pjsip_transport *transport, tcp->sock = PJ_INVALID_SOCKET; } + if (tcp->grp_lock) { + pj_grp_lock_t *grp_lock = tcp->grp_lock; + tcp->grp_lock = NULL; + pj_grp_lock_dec_ref(grp_lock); + /* Transport may have been deleted at this point */ + } else { + tcp_on_destroy(tcp); + } + + return PJ_SUCCESS; +} + +/* Clean up TCP resources */ +static void tcp_on_destroy(void *arg) +{ + struct tcp_transport *tcp = (struct tcp_transport*)arg; + if (tcp->base.lock) { pj_lock_destroy(tcp->base.lock); tcp->base.lock = NULL; @@ -804,16 +864,21 @@ static pj_status_t tcp_destroy(pjsip_transport *transport, tcp->base.ref_cnt = NULL; } + if (tcp->rdata.tp_info.pool) { + pj_pool_release(tcp->rdata.tp_info.pool); + tcp->rdata.tp_info.pool = NULL; + } + if (tcp->base.pool) { pj_pool_t *pool; - if (reason != PJ_SUCCESS) { + if (tcp->close_reason != PJ_SUCCESS) { char errmsg[PJ_ERR_MSG_SIZE]; - pj_strerror(reason, errmsg, sizeof(errmsg)); + pj_strerror(tcp->close_reason, errmsg, sizeof(errmsg)); PJ_LOG(4,(tcp->base.obj_name, "TCP transport destroyed with reason %d: %s", - reason, errmsg)); + tcp->close_reason, errmsg)); } else { @@ -826,11 +891,8 @@ static pj_status_t tcp_destroy(pjsip_transport *transport, tcp->base.pool = NULL; pj_pool_release(pool); } - - return PJ_SUCCESS; } - /* * This utility function creates receive data buffers and start * asynchronous recv() operations from the socket. It is called after diff --git a/pjsip/src/pjsip/sip_transport_tls.c b/pjsip/src/pjsip/sip_transport_tls.c index a86b398c..4e890e10 100644 --- a/pjsip/src/pjsip/sip_transport_tls.c +++ b/pjsip/src/pjsip/sip_transport_tls.c @@ -58,6 +58,9 @@ struct tls_listener pj_sockaddr bound_addr; pj_ssl_cert_t *cert; pjsip_tls_setting tls_setting; + + /* Group lock to be used by TLS transport and ioqueue key */ + pj_grp_lock_t *grp_lock; }; @@ -106,6 +109,9 @@ struct tls_transport /* Pending transmission list. */ struct delayed_tdata delayed_list; + + /* Group lock to be used by TLS transport and ioqueue key */ + pj_grp_lock_t *grp_lock; }; @@ -134,6 +140,9 @@ static pj_bool_t on_data_sent(pj_ssl_sock_t *ssock, /* This callback is called by transport manager to destroy listener */ static pj_status_t lis_destroy(pjsip_tpfactory *factory); +/* Clean up listener resources (group lock handler) */ +static void lis_on_destroy(void *arg); + /* This callback is called by transport manager to create transport */ static pj_status_t lis_create_transport(pjsip_tpfactory *factory, pjsip_tpmgr *mgr, @@ -376,6 +385,18 @@ PJ_DEF(pj_status_t) pjsip_tls_transport_start2( pjsip_endpoint *endpt, break; } + /* Create group lock */ + status = pj_grp_lock_create(pool, NULL, &listener->grp_lock); + if (status != PJ_SUCCESS) + return status; + + /* Setup group lock handler */ + pj_grp_lock_add_ref(listener->grp_lock); + pj_grp_lock_add_handler(listener->grp_lock, pool, listener, + &lis_on_destroy); + + ssock_param.grp_lock = listener->grp_lock; + /* Create SSL socket */ status = pj_ssl_sock_create(pool, &ssock_param, &listener->ssock); if (status != PJ_SUCCESS) @@ -509,6 +530,27 @@ on_error: } +/* Clean up listener resources */ +static void lis_on_destroy(void *arg) +{ + struct tls_listener *listener = (struct tls_listener*)arg; + + if (listener->factory.lock) { + pj_lock_destroy(listener->factory.lock); + listener->factory.lock = NULL; + } + + if (listener->factory.pool) { + pj_pool_t *pool = listener->factory.pool; + + PJ_LOG(4,(listener->factory.obj_name, "SIP TLS listener destroyed")); + + listener->factory.pool = NULL; + pj_pool_release(pool); + } +} + + /* This callback is called by transport manager to destroy listener */ static pj_status_t lis_destroy(pjsip_tpfactory *factory) { @@ -524,18 +566,13 @@ static pj_status_t lis_destroy(pjsip_tpfactory *factory) listener->ssock = NULL; } - if (listener->factory.lock) { - pj_lock_destroy(listener->factory.lock); - listener->factory.lock = NULL; - } - - if (listener->factory.pool) { - pj_pool_t *pool = listener->factory.pool; - - PJ_LOG(4,(listener->factory.obj_name, "SIP TLS listener destroyed")); - - listener->factory.pool = NULL; - pj_pool_release(pool); + if (listener->grp_lock) { + pj_grp_lock_t *grp_lock = listener->grp_lock; + listener->grp_lock = NULL; + pj_grp_lock_dec_ref(grp_lock); + /* Listener may have been deleted at this point */ + } else { + lis_on_destroy(listener); } return PJ_SUCCESS; @@ -752,6 +789,50 @@ static pj_status_t tls_destroy_transport(pjsip_transport *transport) } +/* Clean up TLS resources */ +static void tls_on_destroy(void *arg) +{ + struct tls_transport *tls = (struct tls_transport*)arg; + + if (tls->rdata.tp_info.pool) { + pj_pool_release(tls->rdata.tp_info.pool); + tls->rdata.tp_info.pool = NULL; + } + + if (tls->base.lock) { + pj_lock_destroy(tls->base.lock); + tls->base.lock = NULL; + } + + if (tls->base.ref_cnt) { + pj_atomic_destroy(tls->base.ref_cnt); + tls->base.ref_cnt = NULL; + } + + if (tls->base.pool) { + pj_pool_t *pool; + + if (tls->close_reason != PJ_SUCCESS) { + char errmsg[PJ_ERR_MSG_SIZE]; + + pj_strerror(tls->close_reason, errmsg, sizeof(errmsg)); + PJ_LOG(4,(tls->base.obj_name, + "TLS transport destroyed with reason %d: %s", + tls->close_reason, errmsg)); + + } else { + + PJ_LOG(4,(tls->base.obj_name, + "TLS transport destroyed normally")); + + } + + pool = tls->base.pool; + tls->base.pool = NULL; + pj_pool_release(pool); + } +} + /* Destroy TLS transport */ static pj_status_t tls_destroy(pjsip_transport *transport, pj_status_t reason) @@ -793,46 +874,18 @@ static pj_status_t tls_destroy(pjsip_transport *transport, on_data_sent(tls->ssock, op_key, -reason); } - if (tls->rdata.tp_info.pool) { - pj_pool_release(tls->rdata.tp_info.pool); - tls->rdata.tp_info.pool = NULL; - } - if (tls->ssock) { pj_ssl_sock_close(tls->ssock); tls->ssock = NULL; } - if (tls->base.lock) { - pj_lock_destroy(tls->base.lock); - tls->base.lock = NULL; - } - - if (tls->base.ref_cnt) { - pj_atomic_destroy(tls->base.ref_cnt); - tls->base.ref_cnt = NULL; - } - - if (tls->base.pool) { - pj_pool_t *pool; - - if (reason != PJ_SUCCESS) { - char errmsg[PJ_ERR_MSG_SIZE]; - - pj_strerror(reason, errmsg, sizeof(errmsg)); - PJ_LOG(4,(tls->base.obj_name, - "TLS transport destroyed with reason %d: %s", - reason, errmsg)); - - } else { - PJ_LOG(4,(tls->base.obj_name, - "TLS transport destroyed normally")); - - } - - pool = tls->base.pool; - tls->base.pool = NULL; - pj_pool_release(pool); + if (tls->grp_lock) { + pj_grp_lock_t *grp_lock = tls->grp_lock; + tls->grp_lock = NULL; + pj_grp_lock_dec_ref(grp_lock); + /* Transport may have been deleted at this point */ + } else { + tls_on_destroy(tls); } return PJ_SUCCESS; @@ -906,6 +959,7 @@ static pj_status_t lis_create_transport(pjsip_tpfactory *factory, struct tls_listener *listener; struct tls_transport *tls; pj_pool_t *pool; + pj_grp_lock_t *glock; pj_ssl_sock_t *ssock; pj_ssl_sock_param ssock_param; pj_sockaddr local_addr; @@ -985,15 +1039,25 @@ static pj_status_t lis_create_transport(pjsip_tpfactory *factory, break; } - status = pj_ssl_sock_create(pool, &ssock_param, &ssock); + /* Create group lock */ + status = pj_grp_lock_create(pool, NULL, &glock); if (status != PJ_SUCCESS) return status; + ssock_param.grp_lock = glock; + status = pj_ssl_sock_create(pool, &ssock_param, &ssock); + if (status != PJ_SUCCESS) { + pj_grp_lock_destroy(glock); + return status; + } + /* Apply SSL certificate */ if (listener->cert) { status = pj_ssl_sock_set_certificate(ssock, pool, listener->cert); - if (status != PJ_SUCCESS) + if (status != PJ_SUCCESS) { + pj_grp_lock_destroy(glock); return status; + } } /* Initially set bind address to listener's bind address */ @@ -1004,12 +1068,19 @@ static pj_status_t lis_create_transport(pjsip_tpfactory *factory, /* Create the transport descriptor */ status = tls_create(listener, pool, ssock, PJ_FALSE, &local_addr, rem_addr, &remote_name, &tls); - if (status != PJ_SUCCESS) + if (status != PJ_SUCCESS) { + pj_grp_lock_destroy(glock); return status; + } /* Set the "pending" SSL socket user data */ pj_ssl_sock_set_user_data(tls->ssock, tls); + /* Set up the group lock */ + tls->grp_lock = glock; + pj_grp_lock_add_ref(tls->grp_lock); + pj_grp_lock_add_handler(tls->grp_lock, pool, tls, &tls_on_destroy); + /* Start asynchronous connect() operation */ tls->has_pending_connect = PJ_TRUE; status = pj_ssl_sock_start_connect(tls->ssock, tls->base.pool, @@ -1130,6 +1201,14 @@ static pj_bool_t on_accept_complete(pj_ssl_sock_t *ssock, /* Set the "pending" SSL socket user data */ pj_ssl_sock_set_user_data(new_ssock, tls); + /* Set up the group lock */ + if (ssl_info.grp_lock) { + tls->grp_lock = ssl_info.grp_lock; + pj_grp_lock_add_ref(tls->grp_lock); + pj_grp_lock_add_handler(tls->grp_lock, tls->base.pool, tls, + &tls_on_destroy); + } + /* Prevent immediate transport destroy as application may access it * (getting info, etc) in transport state notification callback. */ diff --git a/pjsip/src/pjsip/sip_transport_udp.c b/pjsip/src/pjsip/sip_transport_udp.c index 36995b08..ef9ecbdd 100644 --- a/pjsip/src/pjsip/sip_transport_udp.c +++ b/pjsip/src/pjsip/sip_transport_udp.c @@ -76,6 +76,9 @@ struct udp_transport pjsip_rx_data **rdata; int is_closing; pj_bool_t is_paused; + + /* Group lock to be used by UDP transport and ioqueue key */ + pj_grp_lock_t *grp_lock; }; @@ -345,6 +348,31 @@ static pj_status_t udp_send_msg( pjsip_transport *transport, return status; } + +/* Clean up UDP resources */ +static void udp_on_destroy(void *arg) +{ + struct udp_transport *tp = (struct udp_transport*)arg; + int i; + + /* Destroy rdata */ + for (i=0; irdata_cnt; ++i) { + pj_pool_release(tp->rdata[i]->tp_info.pool); + } + + /* Destroy reference counter. */ + if (tp->base.ref_cnt) + pj_atomic_destroy(tp->base.ref_cnt); + + /* Destroy lock */ + if (tp->base.lock) + pj_lock_destroy(tp->base.lock); + + /* Destroy pool. */ + pjsip_endpt_release_pool(tp->base.endpt, tp->base.pool); +} + + /* * udp_destroy() * @@ -397,22 +425,15 @@ static pj_status_t udp_destroy( pjsip_transport *transport ) break; } - /* Destroy rdata */ - for (i=0; irdata_cnt; ++i) { - pj_pool_release(tp->rdata[i]->tp_info.pool); + if (tp->grp_lock) { + pj_grp_lock_t *grp_lock = tp->grp_lock; + tp->grp_lock = NULL; + pj_grp_lock_dec_ref(grp_lock); + /* Transport may have been deleted at this point */ + } else { + udp_on_destroy(tp); } - /* Destroy reference counter. */ - if (tp->base.ref_cnt) - pj_atomic_destroy(tp->base.ref_cnt); - - /* Destroy lock */ - if (tp->base.lock) - pj_lock_destroy(tp->base.lock); - - /* Destroy pool. */ - pjsip_endpt_release_pool(tp->base.endpt, tp->base.pool); - return PJ_SUCCESS; } @@ -602,10 +623,19 @@ static pj_status_t register_to_ioqueue(struct udp_transport *tp) { pj_ioqueue_t *ioqueue; pj_ioqueue_callback ioqueue_cb; + pj_status_t status; /* Ignore if already registered */ if (tp->key != NULL) return PJ_SUCCESS; + + /* Create group lock */ + status = pj_grp_lock_create(tp->base.pool, NULL, &tp->grp_lock); + if (status != PJ_SUCCESS) + return status; + + pj_grp_lock_add_ref(tp->grp_lock); + pj_grp_lock_add_handler(tp->grp_lock, tp->base.pool, tp, &udp_on_destroy); /* Register to ioqueue. */ ioqueue = pjsip_endpt_get_ioqueue(tp->base.endpt); @@ -613,8 +643,8 @@ static pj_status_t register_to_ioqueue(struct udp_transport *tp) ioqueue_cb.on_read_complete = &udp_on_read_complete; ioqueue_cb.on_write_complete = &udp_on_write_complete; - return pj_ioqueue_register_sock(tp->base.pool, ioqueue, tp->sock, tp, - &ioqueue_cb, &tp->key); + return pj_ioqueue_register_sock2(tp->base.pool, ioqueue, tp->sock, + tp->grp_lock, tp, &ioqueue_cb, &tp->key); } /* Start ioqueue asynchronous reading to all rdata */ -- cgit v1.2.3