summaryrefslogtreecommitdiff
path: root/pjnath/src
diff options
context:
space:
mode:
Diffstat (limited to 'pjnath/src')
-rw-r--r--pjnath/src/pjnath-test/ice_test.c878
-rw-r--r--pjnath/src/pjnath-test/main.c62
-rw-r--r--pjnath/src/pjnath-test/main_win32.c1
-rw-r--r--pjnath/src/pjnath-test/server.c754
-rw-r--r--pjnath/src/pjnath-test/server.h110
-rw-r--r--pjnath/src/pjnath-test/sess_auth.c1146
-rw-r--r--pjnath/src/pjnath-test/stun.c983
-rw-r--r--pjnath/src/pjnath-test/stun_sock_test.c849
-rw-r--r--pjnath/src/pjnath-test/test.c212
-rw-r--r--pjnath/src/pjnath-test/test.h63
-rw-r--r--pjnath/src/pjnath-test/turn_sock_test.c516
-rw-r--r--pjnath/src/pjnath/errno.c216
-rw-r--r--pjnath/src/pjnath/ice_session.c2968
-rw-r--r--pjnath/src/pjnath/ice_strans.c1757
-rw-r--r--pjnath/src/pjnath/nat_detect.c911
-rw-r--r--pjnath/src/pjnath/stun_auth.c631
-rw-r--r--pjnath/src/pjnath/stun_msg.c2827
-rw-r--r--pjnath/src/pjnath/stun_msg_dump.c298
-rw-r--r--pjnath/src/pjnath/stun_session.c1436
-rw-r--r--pjnath/src/pjnath/stun_sock.c856
-rw-r--r--pjnath/src/pjnath/stun_transaction.c448
-rw-r--r--pjnath/src/pjnath/turn_session.c2040
-rw-r--r--pjnath/src/pjnath/turn_sock.c808
-rw-r--r--pjnath/src/pjturn-client/client_main.c631
-rw-r--r--pjnath/src/pjturn-srv/allocation.c1377
-rw-r--r--pjnath/src/pjturn-srv/auth.c145
-rw-r--r--pjnath/src/pjturn-srv/auth.h116
-rw-r--r--pjnath/src/pjturn-srv/listener_tcp.c490
-rw-r--r--pjnath/src/pjturn-srv/listener_udp.c266
-rw-r--r--pjnath/src/pjturn-srv/main.c174
-rw-r--r--pjnath/src/pjturn-srv/server.c699
-rw-r--r--pjnath/src/pjturn-srv/turn.h508
32 files changed, 25176 insertions, 0 deletions
diff --git a/pjnath/src/pjnath-test/ice_test.c b/pjnath/src/pjnath-test/ice_test.c
new file mode 100644
index 0000000..fe0ee8d
--- /dev/null
+++ b/pjnath/src/pjnath-test/ice_test.c
@@ -0,0 +1,878 @@
+/* $Id: ice_test.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "test.h"
+#include "server.h"
+
+enum
+{
+ NO = 0,
+ YES = 1,
+ SRV = 3,
+};
+
+#define NODELAY 0xFFFFFFFF
+#define SRV_DOMAIN "pjsip.lab.domain"
+
+#define INDENT " "
+
+/* Client flags */
+enum
+{
+ WRONG_TURN = 1,
+ DEL_ON_ERR = 2,
+};
+
+
+/* Test results */
+struct test_result
+{
+ pj_status_t init_status; /* init successful? */
+ pj_status_t nego_status; /* negotiation successful? */
+ unsigned rx_cnt[4]; /* Number of data received */
+};
+
+
+/* Test session configuration */
+struct test_cfg
+{
+ pj_ice_sess_role role; /* Role. */
+ unsigned comp_cnt; /* Component count */
+ unsigned enable_host; /* Enable host candidates */
+ unsigned enable_stun; /* Enable srflx candidates */
+ unsigned enable_turn; /* Enable turn candidates */
+ unsigned client_flag; /* Client flags */
+
+ unsigned answer_delay; /* Delay before sending SDP */
+ unsigned send_delay; /* Delay before sending data */
+ unsigned destroy_delay; /* Delay before destroy() */
+
+ struct test_result expected;/* Expected result */
+
+ pj_bool_t nom_regular; /* Use regular nomination? */
+};
+
+/* ICE endpoint state */
+struct ice_ept
+{
+ struct test_cfg cfg; /* Configuratino. */
+ pj_ice_strans *ice; /* ICE stream transport */
+ struct test_result result;/* Test result. */
+
+ pj_str_t ufrag; /* username fragment. */
+ pj_str_t pass; /* password */
+};
+
+/* The test session */
+struct test_sess
+{
+ pj_pool_t *pool;
+ pj_stun_config *stun_cfg;
+ pj_dns_resolver *resolver;
+
+ test_server *server;
+
+ unsigned server_flag;
+ struct ice_ept caller;
+ struct ice_ept callee;
+};
+
+
+static void ice_on_rx_data(pj_ice_strans *ice_st,
+ unsigned comp_id,
+ void *pkt, pj_size_t size,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len);
+static void ice_on_ice_complete(pj_ice_strans *ice_st,
+ pj_ice_strans_op op,
+ pj_status_t status);
+static void destroy_sess(struct test_sess *sess, unsigned wait_msec);
+
+/* Create ICE stream transport */
+static int create_ice_strans(struct test_sess *test_sess,
+ struct ice_ept *ept,
+ pj_ice_strans **p_ice)
+{
+ pj_ice_strans *ice;
+ pj_ice_strans_cb ice_cb;
+ pj_ice_strans_cfg ice_cfg;
+ pj_sockaddr hostip;
+ char serverip[PJ_INET6_ADDRSTRLEN];
+ pj_status_t status;
+
+ status = pj_gethostip(pj_AF_INET(), &hostip);
+ if (status != PJ_SUCCESS)
+ return -1030;
+
+ pj_sockaddr_print(&hostip, serverip, sizeof(serverip), 0);
+
+ /* Init callback structure */
+ pj_bzero(&ice_cb, sizeof(ice_cb));
+ ice_cb.on_rx_data = &ice_on_rx_data;
+ ice_cb.on_ice_complete = &ice_on_ice_complete;
+
+ /* Init ICE stream transport configuration structure */
+ pj_ice_strans_cfg_default(&ice_cfg);
+ pj_memcpy(&ice_cfg.stun_cfg, test_sess->stun_cfg, sizeof(pj_stun_config));
+ if ((ept->cfg.enable_stun & SRV)==SRV || (ept->cfg.enable_turn & SRV)==SRV)
+ ice_cfg.resolver = test_sess->resolver;
+
+ if (ept->cfg.enable_stun & YES) {
+ if ((ept->cfg.enable_stun & SRV) == SRV) {
+ ice_cfg.stun.server = pj_str(SRV_DOMAIN);
+ } else {
+ ice_cfg.stun.server = pj_str(serverip);
+ }
+ ice_cfg.stun.port = STUN_SERVER_PORT;
+ }
+
+ if (ept->cfg.enable_host == 0) {
+ ice_cfg.stun.max_host_cands = 0;
+ } else {
+ //ice_cfg.stun.no_host_cands = PJ_FALSE;
+ ice_cfg.stun.loop_addr = PJ_TRUE;
+ }
+
+
+ if (ept->cfg.enable_turn & YES) {
+ if ((ept->cfg.enable_turn & SRV) == SRV) {
+ ice_cfg.turn.server = pj_str(SRV_DOMAIN);
+ } else {
+ ice_cfg.turn.server = pj_str(serverip);
+ }
+ ice_cfg.turn.port = TURN_SERVER_PORT;
+ ice_cfg.turn.conn_type = PJ_TURN_TP_UDP;
+ ice_cfg.turn.auth_cred.type = PJ_STUN_AUTH_CRED_STATIC;
+ ice_cfg.turn.auth_cred.data.static_cred.realm = pj_str(SRV_DOMAIN);
+ if (ept->cfg.client_flag & WRONG_TURN)
+ ice_cfg.turn.auth_cred.data.static_cred.username = pj_str("xxx");
+ else
+ ice_cfg.turn.auth_cred.data.static_cred.username = pj_str(TURN_USERNAME);
+ ice_cfg.turn.auth_cred.data.static_cred.data_type = PJ_STUN_PASSWD_PLAIN;
+ ice_cfg.turn.auth_cred.data.static_cred.data = pj_str(TURN_PASSWD);
+ }
+
+ /* Create ICE stream transport */
+ status = pj_ice_strans_create(NULL, &ice_cfg, ept->cfg.comp_cnt,
+ (void*)ept, &ice_cb,
+ &ice);
+ if (status != PJ_SUCCESS) {
+ app_perror(INDENT "err: pj_ice_strans_create()", status);
+ return status;
+ }
+
+ pj_create_unique_string(test_sess->pool, &ept->ufrag);
+ pj_create_unique_string(test_sess->pool, &ept->pass);
+
+ /* Looks alright */
+ *p_ice = ice;
+ return PJ_SUCCESS;
+}
+
+/* Create test session */
+static int create_sess(pj_stun_config *stun_cfg,
+ unsigned server_flag,
+ struct test_cfg *caller_cfg,
+ struct test_cfg *callee_cfg,
+ struct test_sess **p_sess)
+{
+ pj_pool_t *pool;
+ struct test_sess *sess;
+ pj_str_t ns_ip;
+ pj_uint16_t ns_port;
+ unsigned flags;
+ pj_status_t status;
+
+ /* Create session structure */
+ pool = pj_pool_create(mem, "testsess", 512, 512, NULL);
+ sess = PJ_POOL_ZALLOC_T(pool, struct test_sess);
+ sess->pool = pool;
+ sess->stun_cfg = stun_cfg;
+
+ pj_memcpy(&sess->caller.cfg, caller_cfg, sizeof(*caller_cfg));
+ sess->caller.result.init_status = sess->caller.result.nego_status = PJ_EPENDING;
+
+ pj_memcpy(&sess->callee.cfg, callee_cfg, sizeof(*callee_cfg));
+ sess->callee.result.init_status = sess->callee.result.nego_status = PJ_EPENDING;
+
+ /* Create server */
+ flags = server_flag;
+ status = create_test_server(stun_cfg, flags, SRV_DOMAIN, &sess->server);
+ if (status != PJ_SUCCESS) {
+ app_perror(INDENT "error: create_test_server()", status);
+ destroy_sess(sess, 500);
+ return -10;
+ }
+ sess->server->turn_respond_allocate =
+ sess->server->turn_respond_refresh = PJ_TRUE;
+
+ /* Create resolver */
+ status = pj_dns_resolver_create(mem, NULL, 0, stun_cfg->timer_heap,
+ stun_cfg->ioqueue, &sess->resolver);
+ if (status != PJ_SUCCESS) {
+ app_perror(INDENT "error: pj_dns_resolver_create()", status);
+ destroy_sess(sess, 500);
+ return -20;
+ }
+
+ ns_ip = pj_str("127.0.0.1");
+ ns_port = (pj_uint16_t)DNS_SERVER_PORT;
+ status = pj_dns_resolver_set_ns(sess->resolver, 1, &ns_ip, &ns_port);
+ if (status != PJ_SUCCESS) {
+ app_perror( INDENT "error: pj_dns_resolver_set_ns()", status);
+ destroy_sess(sess, 500);
+ return -21;
+ }
+
+ /* Create caller ICE stream transport */
+ status = create_ice_strans(sess, &sess->caller, &sess->caller.ice);
+ if (status != PJ_SUCCESS) {
+ destroy_sess(sess, 500);
+ return -30;
+ }
+
+ /* Create callee ICE stream transport */
+ status = create_ice_strans(sess, &sess->callee, &sess->callee.ice);
+ if (status != PJ_SUCCESS) {
+ destroy_sess(sess, 500);
+ return -40;
+ }
+
+ *p_sess = sess;
+ return 0;
+}
+
+/* Destroy test session */
+static void destroy_sess(struct test_sess *sess, unsigned wait_msec)
+{
+ if (sess->caller.ice) {
+ pj_ice_strans_destroy(sess->caller.ice);
+ sess->caller.ice = NULL;
+ }
+
+ if (sess->callee.ice) {
+ pj_ice_strans_destroy(sess->callee.ice);
+ sess->callee.ice = NULL;
+ }
+
+ poll_events(sess->stun_cfg, wait_msec, PJ_FALSE);
+
+ if (sess->resolver) {
+ pj_dns_resolver_destroy(sess->resolver, PJ_FALSE);
+ sess->resolver = NULL;
+ }
+
+ if (sess->server) {
+ destroy_test_server(sess->server);
+ sess->server = NULL;
+ }
+
+ if (sess->pool) {
+ pj_pool_t *pool = sess->pool;
+ sess->pool = NULL;
+ pj_pool_release(pool);
+ }
+}
+
+static void ice_on_rx_data(pj_ice_strans *ice_st,
+ unsigned comp_id,
+ void *pkt, pj_size_t size,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ struct ice_ept *ept;
+
+ PJ_UNUSED_ARG(pkt);
+ PJ_UNUSED_ARG(size);
+ PJ_UNUSED_ARG(src_addr);
+ PJ_UNUSED_ARG(src_addr_len);
+
+ ept = (struct ice_ept*) pj_ice_strans_get_user_data(ice_st);
+ ept->result.rx_cnt[comp_id]++;
+}
+
+
+static void ice_on_ice_complete(pj_ice_strans *ice_st,
+ pj_ice_strans_op op,
+ pj_status_t status)
+{
+ struct ice_ept *ept;
+
+ ept = (struct ice_ept*) pj_ice_strans_get_user_data(ice_st);
+ switch (op) {
+ case PJ_ICE_STRANS_OP_INIT:
+ ept->result.init_status = status;
+ if (status != PJ_SUCCESS && (ept->cfg.client_flag & DEL_ON_ERR)) {
+ pj_ice_strans_destroy(ice_st);
+ ept->ice = NULL;
+ }
+ break;
+ case PJ_ICE_STRANS_OP_NEGOTIATION:
+ ept->result.nego_status = status;
+ break;
+ default:
+ pj_assert(!"Unknown op");
+ }
+}
+
+
+/* Start ICE negotiation on the endpoint, based on parameter from
+ * the other endpoint.
+ */
+static pj_status_t start_ice(struct ice_ept *ept, const struct ice_ept *remote)
+{
+ pj_ice_sess_cand rcand[32];
+ unsigned i, rcand_cnt = 0;
+ pj_status_t status;
+
+ /* Enum remote candidates */
+ for (i=0; i<remote->cfg.comp_cnt; ++i) {
+ unsigned cnt = PJ_ARRAY_SIZE(rcand) - rcand_cnt;
+ status = pj_ice_strans_enum_cands(remote->ice, i+1, &cnt, rcand+rcand_cnt);
+ if (status != PJ_SUCCESS) {
+ app_perror(INDENT "err: pj_ice_strans_enum_cands()", status);
+ return status;
+ }
+ rcand_cnt += cnt;
+ }
+
+ status = pj_ice_strans_start_ice(ept->ice, &remote->ufrag, &remote->pass,
+ rcand_cnt, rcand);
+ if (status != PJ_SUCCESS) {
+ app_perror(INDENT "err: pj_ice_strans_start_ice()", status);
+ return status;
+ }
+
+ return PJ_SUCCESS;
+}
+
+
+/* Check that the pair in both agents are matched */
+static int check_pair(const struct ice_ept *ept1, const struct ice_ept *ept2,
+ int start_err)
+{
+ unsigned i, min_cnt, max_cnt;
+
+ if (ept1->cfg.comp_cnt < ept2->cfg.comp_cnt) {
+ min_cnt = ept1->cfg.comp_cnt;
+ max_cnt = ept2->cfg.comp_cnt;
+ } else {
+ min_cnt = ept2->cfg.comp_cnt;
+ max_cnt = ept1->cfg.comp_cnt;
+ }
+
+ /* Must have valid pair for common components */
+ for (i=0; i<min_cnt; ++i) {
+ const pj_ice_sess_check *c1;
+ const pj_ice_sess_check *c2;
+
+ c1 = pj_ice_strans_get_valid_pair(ept1->ice, i+1);
+ if (c1 == NULL) {
+ PJ_LOG(3,("", INDENT "err: unable to get valid pair for ice1 "
+ "component %d", i+1));
+ return start_err - 2;
+ }
+
+ c2 = pj_ice_strans_get_valid_pair(ept2->ice, i+1);
+ if (c2 == NULL) {
+ PJ_LOG(3,("", INDENT "err: unable to get valid pair for ice2 "
+ "component %d", i+1));
+ return start_err - 4;
+ }
+
+ if (pj_sockaddr_cmp(&c1->rcand->addr, &c2->lcand->addr) != 0) {
+ PJ_LOG(3,("", INDENT "err: candidate pair does not match "
+ "for component %d", i+1));
+ return start_err - 6;
+ }
+ }
+
+ /* Extra components must not have valid pair */
+ for (; i<max_cnt; ++i) {
+ if (ept1->cfg.comp_cnt>i &&
+ pj_ice_strans_get_valid_pair(ept1->ice, i+1) != NULL)
+ {
+ PJ_LOG(3,("", INDENT "err: ice1 shouldn't have valid pair "
+ "for component %d", i+1));
+ return start_err - 8;
+ }
+ if (ept2->cfg.comp_cnt>i &&
+ pj_ice_strans_get_valid_pair(ept2->ice, i+1) != NULL)
+ {
+ PJ_LOG(3,("", INDENT "err: ice2 shouldn't have valid pair "
+ "for component %d", i+1));
+ return start_err - 9;
+ }
+ }
+
+ return 0;
+}
+
+
+#define WAIT_UNTIL(timeout,expr, RC) { \
+ pj_time_val t0, t; \
+ pj_gettimeofday(&t0); \
+ RC = -1; \
+ for (;;) { \
+ poll_events(stun_cfg, 10, PJ_FALSE); \
+ pj_gettimeofday(&t); \
+ if (expr) { \
+ rc = PJ_SUCCESS; \
+ break; \
+ } \
+ if (t.sec - t0.sec > (timeout)) break; \
+ } \
+ }
+
+
+static int perform_test(const char *title,
+ pj_stun_config *stun_cfg,
+ unsigned server_flag,
+ struct test_cfg *caller_cfg,
+ struct test_cfg *callee_cfg)
+{
+ pjlib_state pjlib_state;
+ struct test_sess *sess;
+ int rc;
+
+ PJ_LOG(3,("", INDENT "%s", title));
+
+ capture_pjlib_state(stun_cfg, &pjlib_state);
+
+ rc = create_sess(stun_cfg, server_flag, caller_cfg, callee_cfg, &sess);
+ if (rc != 0)
+ return rc;
+
+#define ALL_READY (sess->caller.result.init_status!=PJ_EPENDING && \
+ sess->callee.result.init_status!=PJ_EPENDING)
+
+ /* Wait until both ICE transports are initialized */
+ WAIT_UNTIL(30, ALL_READY, rc);
+
+ if (!ALL_READY) {
+ PJ_LOG(3,("", INDENT "err: init timed-out"));
+ destroy_sess(sess, 500);
+ return -100;
+ }
+
+ if (sess->caller.result.init_status != sess->caller.cfg.expected.init_status) {
+ app_perror(INDENT "err: caller init", sess->caller.result.init_status);
+ destroy_sess(sess, 500);
+ return -102;
+ }
+ if (sess->callee.result.init_status != sess->callee.cfg.expected.init_status) {
+ app_perror(INDENT "err: callee init", sess->callee.result.init_status);
+ destroy_sess(sess, 500);
+ return -104;
+ }
+
+ /* Failure condition */
+ if (sess->caller.result.init_status != PJ_SUCCESS ||
+ sess->callee.result.init_status != PJ_SUCCESS)
+ {
+ rc = 0;
+ goto on_return;
+ }
+
+ /* Init ICE on caller */
+ rc = pj_ice_strans_init_ice(sess->caller.ice, sess->caller.cfg.role,
+ &sess->caller.ufrag, &sess->caller.pass);
+ if (rc != PJ_SUCCESS) {
+ app_perror(INDENT "err: caller pj_ice_strans_init_ice()", rc);
+ destroy_sess(sess, 500);
+ return -100;
+ }
+
+ /* Init ICE on callee */
+ rc = pj_ice_strans_init_ice(sess->callee.ice, sess->callee.cfg.role,
+ &sess->callee.ufrag, &sess->callee.pass);
+ if (rc != PJ_SUCCESS) {
+ app_perror(INDENT "err: callee pj_ice_strans_init_ice()", rc);
+ destroy_sess(sess, 500);
+ return -110;
+ }
+
+ /* Start ICE on callee */
+ rc = start_ice(&sess->callee, &sess->caller);
+ if (rc != PJ_SUCCESS) {
+ destroy_sess(sess, 500);
+ return -120;
+ }
+
+ /* Wait for callee's answer_delay */
+ poll_events(stun_cfg, sess->callee.cfg.answer_delay, PJ_FALSE);
+
+ /* Start ICE on caller */
+ rc = start_ice(&sess->caller, &sess->callee);
+ if (rc != PJ_SUCCESS) {
+ destroy_sess(sess, 500);
+ return -130;
+ }
+
+ /* Wait until negotiation is complete on both endpoints */
+#define ALL_DONE (sess->caller.result.nego_status!=PJ_EPENDING && \
+ sess->callee.result.nego_status!=PJ_EPENDING)
+ WAIT_UNTIL(30, ALL_DONE, rc);
+
+ if (!ALL_DONE) {
+ PJ_LOG(3,("", INDENT "err: negotiation timed-out"));
+ destroy_sess(sess, 500);
+ return -140;
+ }
+
+ if (sess->caller.result.nego_status != sess->caller.cfg.expected.nego_status) {
+ app_perror(INDENT "err: caller negotiation failed", sess->caller.result.nego_status);
+ destroy_sess(sess, 500);
+ return -150;
+ }
+
+ if (sess->callee.result.nego_status != sess->callee.cfg.expected.nego_status) {
+ app_perror(INDENT "err: callee negotiation failed", sess->callee.result.nego_status);
+ destroy_sess(sess, 500);
+ return -160;
+ }
+
+ /* Verify that both agents have agreed on the same pair */
+ rc = check_pair(&sess->caller, &sess->callee, -170);
+ if (rc != 0) {
+ destroy_sess(sess, 500);
+ return rc;
+ }
+ rc = check_pair(&sess->callee, &sess->caller, -180);
+ if (rc != 0) {
+ destroy_sess(sess, 500);
+ return rc;
+ }
+
+ /* Looks like everything is okay */
+
+ /* Destroy ICE stream transports first to let it de-allocate
+ * TURN relay (otherwise there'll be timer/memory leak, unless
+ * we wait for long time in the last poll_events() below).
+ */
+ if (sess->caller.ice) {
+ pj_ice_strans_destroy(sess->caller.ice);
+ sess->caller.ice = NULL;
+ }
+
+ if (sess->callee.ice) {
+ pj_ice_strans_destroy(sess->callee.ice);
+ sess->callee.ice = NULL;
+ }
+
+on_return:
+ /* Wait.. */
+ poll_events(stun_cfg, 500, PJ_FALSE);
+
+ /* Now destroy everything */
+ destroy_sess(sess, 500);
+
+ /* Flush events */
+ poll_events(stun_cfg, 100, PJ_FALSE);
+
+ rc = check_pjlib_state(stun_cfg, &pjlib_state);
+ if (rc != 0) {
+ return rc;
+ }
+
+ return 0;
+}
+
+#define ROLE1 PJ_ICE_SESS_ROLE_CONTROLLED
+#define ROLE2 PJ_ICE_SESS_ROLE_CONTROLLING
+
+int ice_test(void)
+{
+ pj_pool_t *pool;
+ pj_stun_config stun_cfg;
+ unsigned i;
+ int rc;
+ struct sess_cfg_t {
+ const char *title;
+ unsigned server_flag;
+ struct test_cfg ua1;
+ struct test_cfg ua2;
+ } sess_cfg[] =
+ {
+ /* Role comp# host? stun? turn? flag? ans_del snd_del des_del */
+ {
+ "hosts candidates only",
+ 0xFFFF,
+ {ROLE1, 1, YES, NO, NO, NO, 0, 0, 0, {PJ_SUCCESS, PJ_SUCCESS}},
+ {ROLE2, 1, YES, NO, NO, NO, 0, 0, 0, {PJ_SUCCESS, PJ_SUCCESS}}
+ },
+ {
+ "host and srflxes",
+ 0xFFFF,
+ {ROLE1, 1, YES, YES, NO, NO, 0, 0, 0, {PJ_SUCCESS, PJ_SUCCESS}},
+ {ROLE2, 1, YES, YES, NO, NO, 0, 0, 0, {PJ_SUCCESS, PJ_SUCCESS}}
+ },
+ {
+ "host vs relay",
+ 0xFFFF,
+ {ROLE1, 1, YES, NO, NO, NO, 0, 0, 0, {PJ_SUCCESS, PJ_SUCCESS}},
+ {ROLE2, 1, NO, NO, YES, NO, 0, 0, 0, {PJ_SUCCESS, PJ_SUCCESS}}
+ },
+ {
+ "relay vs host",
+ 0xFFFF,
+ {ROLE1, 1, NO, NO, YES, NO, 0, 0, 0, {PJ_SUCCESS, PJ_SUCCESS}},
+ {ROLE2, 1, YES, NO, NO, NO, 0, 0, 0, {PJ_SUCCESS, PJ_SUCCESS}}
+ },
+ {
+ "relay vs relay",
+ 0xFFFF,
+ {ROLE1, 1, NO, NO, YES, NO, 0, 0, 0, {PJ_SUCCESS, PJ_SUCCESS}},
+ {ROLE2, 1, NO, NO, YES, NO, 0, 0, 0, {PJ_SUCCESS, PJ_SUCCESS}}
+ },
+ {
+ "all candidates",
+ 0xFFFF,
+ {ROLE1, 1, YES, YES, YES, NO, 0, 0, 0, {PJ_SUCCESS, PJ_SUCCESS}},
+ {ROLE2, 1, YES, YES, YES, NO, 0, 0, 0, {PJ_SUCCESS, PJ_SUCCESS}}
+ },
+ };
+
+ pool = pj_pool_create(mem, NULL, 512, 512, NULL);
+ rc = create_stun_config(pool, &stun_cfg);
+ if (rc != PJ_SUCCESS) {
+ pj_pool_release(pool);
+ return -7;
+ }
+
+ /* Simple test first with host candidate */
+ if (1) {
+ struct sess_cfg_t cfg =
+ {
+ "Basic with host candidates",
+ 0x0,
+ /* Role comp# host? stun? turn? flag? ans_del snd_del des_del */
+ {ROLE1, 1, YES, NO, NO, 0, 0, 0, 0, {PJ_SUCCESS, PJ_SUCCESS}},
+ {ROLE2, 1, YES, NO, NO, 0, 0, 0, 0, {PJ_SUCCESS, PJ_SUCCESS}}
+ };
+
+ rc = perform_test(cfg.title, &stun_cfg, cfg.server_flag,
+ &cfg.ua1, &cfg.ua2);
+ if (rc != 0)
+ goto on_return;
+
+ cfg.ua1.comp_cnt = 2;
+ cfg.ua2.comp_cnt = 2;
+ rc = perform_test("Basic with host candidates, 2 components",
+ &stun_cfg, cfg.server_flag,
+ &cfg.ua1, &cfg.ua2);
+ if (rc != 0)
+ goto on_return;
+ }
+
+ /* Simple test first with srflx candidate */
+ if (1) {
+ struct sess_cfg_t cfg =
+ {
+ "Basic with srflx candidates",
+ 0xFFFF,
+ /* Role comp# host? stun? turn? flag? ans_del snd_del des_del */
+ {ROLE1, 1, YES, YES, NO, 0, 0, 0, 0, {PJ_SUCCESS, PJ_SUCCESS}},
+ {ROLE2, 1, YES, YES, NO, 0, 0, 0, 0, {PJ_SUCCESS, PJ_SUCCESS}}
+ };
+
+ rc = perform_test(cfg.title, &stun_cfg, cfg.server_flag,
+ &cfg.ua1, &cfg.ua2);
+ if (rc != 0)
+ goto on_return;
+
+ cfg.ua1.comp_cnt = 2;
+ cfg.ua2.comp_cnt = 2;
+
+ rc = perform_test("Basic with srflx candidates, 2 components",
+ &stun_cfg, cfg.server_flag,
+ &cfg.ua1, &cfg.ua2);
+ if (rc != 0)
+ goto on_return;
+ }
+
+ /* Simple test with relay candidate */
+ if (1) {
+ struct sess_cfg_t cfg =
+ {
+ "Basic with relay candidates",
+ 0xFFFF,
+ /* Role comp# host? stun? turn? flag? ans_del snd_del des_del */
+ {ROLE1, 1, NO, NO, YES, 0, 0, 0, 0, {PJ_SUCCESS, PJ_SUCCESS}},
+ {ROLE2, 1, NO, NO, YES, 0, 0, 0, 0, {PJ_SUCCESS, PJ_SUCCESS}}
+ };
+
+ rc = perform_test(cfg.title, &stun_cfg, cfg.server_flag,
+ &cfg.ua1, &cfg.ua2);
+ if (rc != 0)
+ goto on_return;
+
+ cfg.ua1.comp_cnt = 2;
+ cfg.ua2.comp_cnt = 2;
+
+ rc = perform_test("Basic with relay candidates, 2 components",
+ &stun_cfg, cfg.server_flag,
+ &cfg.ua1, &cfg.ua2);
+ if (rc != 0)
+ goto on_return;
+ }
+
+ /* Failure test with STUN resolution */
+ if (1) {
+ struct sess_cfg_t cfg =
+ {
+ "STUN resolution failure",
+ 0x0,
+ /* Role comp# host? stun? turn? flag? ans_del snd_del des_del */
+ {ROLE1, 2, NO, YES, NO, 0, 0, 0, 0, {PJNATH_ESTUNTIMEDOUT, -1}},
+ {ROLE2, 2, NO, YES, NO, 0, 0, 0, 0, {PJNATH_ESTUNTIMEDOUT, -1}}
+ };
+
+ rc = perform_test(cfg.title, &stun_cfg, cfg.server_flag,
+ &cfg.ua1, &cfg.ua2);
+ if (rc != 0)
+ goto on_return;
+
+ cfg.ua1.client_flag |= DEL_ON_ERR;
+ cfg.ua2.client_flag |= DEL_ON_ERR;
+
+ rc = perform_test("STUN resolution failure with destroy on callback",
+ &stun_cfg, cfg.server_flag,
+ &cfg.ua1, &cfg.ua2);
+ if (rc != 0)
+ goto on_return;
+ }
+
+ /* Failure test with TURN resolution */
+ if (1) {
+ struct sess_cfg_t cfg =
+ {
+ "TURN allocation failure",
+ 0xFFFF,
+ /* Role comp# host? stun? turn? flag? ans_del snd_del des_del */
+ {ROLE1, 2, NO, NO, YES, WRONG_TURN, 0, 0, 0, {PJ_STATUS_FROM_STUN_CODE(401), -1}},
+ {ROLE2, 2, NO, NO, YES, WRONG_TURN, 0, 0, 0, {PJ_STATUS_FROM_STUN_CODE(401), -1}}
+ };
+
+ rc = perform_test(cfg.title, &stun_cfg, cfg.server_flag,
+ &cfg.ua1, &cfg.ua2);
+ if (rc != 0)
+ goto on_return;
+
+ cfg.ua1.client_flag |= DEL_ON_ERR;
+ cfg.ua2.client_flag |= DEL_ON_ERR;
+
+ rc = perform_test("TURN allocation failure with destroy on callback",
+ &stun_cfg, cfg.server_flag,
+ &cfg.ua1, &cfg.ua2);
+ if (rc != 0)
+ goto on_return;
+ }
+
+ /* STUN failure, testing TURN deallocation */
+ if (1) {
+ struct sess_cfg_t cfg =
+ {
+ "STUN failure, testing TURN deallocation",
+ 0xFFFF & (~(CREATE_STUN_SERVER)),
+ /* Role comp# host? stun? turn? flag? ans_del snd_del des_del */
+ {ROLE1, 2, YES, YES, YES, 0, 0, 0, 0, {PJNATH_ESTUNTIMEDOUT, -1}},
+ {ROLE2, 2, YES, YES, YES, 0, 0, 0, 0, {PJNATH_ESTUNTIMEDOUT, -1}}
+ };
+
+ rc = perform_test(cfg.title, &stun_cfg, cfg.server_flag,
+ &cfg.ua1, &cfg.ua2);
+ if (rc != 0)
+ goto on_return;
+
+ cfg.ua1.client_flag |= DEL_ON_ERR;
+ cfg.ua2.client_flag |= DEL_ON_ERR;
+
+ rc = perform_test("STUN failure, testing TURN deallocation (cb)",
+ &stun_cfg, cfg.server_flag,
+ &cfg.ua1, &cfg.ua2);
+ if (rc != 0)
+ goto on_return;
+ }
+
+ rc = 0;
+ /* Iterate each test item */
+ for (i=0; i<PJ_ARRAY_SIZE(sess_cfg); ++i) {
+ struct sess_cfg_t *cfg = &sess_cfg[i];
+ unsigned delay[] = { 50, 2000 };
+ unsigned d;
+
+ PJ_LOG(3,("", " %s", cfg->title));
+
+ /* For each test item, test with various answer delay */
+ for (d=0; d<PJ_ARRAY_SIZE(delay); ++d) {
+ struct role_t {
+ pj_ice_sess_role ua1;
+ pj_ice_sess_role ua2;
+ } role[] =
+ {
+ { ROLE1, ROLE2},
+ { ROLE2, ROLE1},
+ { ROLE1, ROLE1},
+ { ROLE2, ROLE2}
+ };
+ unsigned j;
+
+ cfg->ua1.answer_delay = delay[d];
+ cfg->ua2.answer_delay = delay[d];
+
+ /* For each test item, test with role conflict scenarios */
+ for (j=0; j<PJ_ARRAY_SIZE(role); ++j) {
+ unsigned k1;
+
+ cfg->ua1.role = role[j].ua1;
+ cfg->ua2.role = role[j].ua2;
+
+ /* For each test item, test with different number of components */
+ for (k1=1; k1<=2; ++k1) {
+ unsigned k2;
+
+ cfg->ua1.comp_cnt = k1;
+
+ for (k2=1; k2<=2; ++k2) {
+ char title[120];
+
+ sprintf(title,
+ "%s/%s, %dms answer delay, %d vs %d components",
+ pj_ice_sess_role_name(role[j].ua1),
+ pj_ice_sess_role_name(role[j].ua2),
+ delay[d], k1, k2);
+
+ cfg->ua2.comp_cnt = k2;
+ rc = perform_test(title, &stun_cfg, cfg->server_flag,
+ &cfg->ua1, &cfg->ua2);
+ if (rc != 0)
+ goto on_return;
+ }
+ }
+ }
+ }
+ }
+
+on_return:
+ destroy_stun_config(&stun_cfg);
+ pj_pool_release(pool);
+ return rc;
+}
+
diff --git a/pjnath/src/pjnath-test/main.c b/pjnath/src/pjnath-test/main.c
new file mode 100644
index 0000000..c12d09d
--- /dev/null
+++ b/pjnath/src/pjnath-test/main.c
@@ -0,0 +1,62 @@
+/* $Id: main.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "test.h"
+
+#if defined(PJ_SUNOS) && PJ_SUNOS!=0
+#include <signal.h>
+static void init_signals()
+{
+ struct sigaction act;
+
+ memset(&act, 0, sizeof(act));
+ act.sa_handler = SIG_IGN;
+
+ sigaction(SIGALRM, &act, NULL);
+}
+
+#else
+#define init_signals()
+#endif
+
+#define boost()
+
+int main(int argc, char *argv[])
+{
+ int rc;
+
+ PJ_UNUSED_ARG(argc);
+ PJ_UNUSED_ARG(argv);
+
+ boost();
+ init_signals();
+
+ rc = test_main();
+
+ if (argc == 2 && pj_ansi_strcmp(argv[1], "-i")==0) {
+ char buf[10];
+
+ puts("Press <ENTER> to exit");
+ if (fgets(buf, sizeof(buf), stdin) == NULL)
+ return rc;
+ }
+
+ return rc;
+}
+
diff --git a/pjnath/src/pjnath-test/main_win32.c b/pjnath/src/pjnath-test/main_win32.c
new file mode 100644
index 0000000..3043a39
--- /dev/null
+++ b/pjnath/src/pjnath-test/main_win32.c
@@ -0,0 +1 @@
+#include "../../pjlib/src/pjlib-test/main_win32.c"
diff --git a/pjnath/src/pjnath-test/server.c b/pjnath/src/pjnath-test/server.c
new file mode 100644
index 0000000..4d8565e
--- /dev/null
+++ b/pjnath/src/pjnath-test/server.c
@@ -0,0 +1,754 @@
+/* $Id: server.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "server.h"
+#include "test.h"
+
+#define THIS_FILE "server.c"
+#define MAX_STUN_PKT 1500
+#define TURN_NONCE "thenonce"
+
+static pj_bool_t stun_on_data_recvfrom(pj_activesock_t *asock,
+ void *data,
+ pj_size_t size,
+ const pj_sockaddr_t *src_addr,
+ int addr_len,
+ pj_status_t status);
+static pj_bool_t turn_on_data_recvfrom(pj_activesock_t *asock,
+ void *data,
+ pj_size_t size,
+ const pj_sockaddr_t *src_addr,
+ int addr_len,
+ pj_status_t status);
+static pj_bool_t alloc_on_data_recvfrom(pj_activesock_t *asock,
+ void *data,
+ pj_size_t size,
+ const pj_sockaddr_t *src_addr,
+ int addr_len,
+ pj_status_t status);
+
+pj_status_t create_test_server(pj_stun_config *stun_cfg,
+ pj_uint32_t flags,
+ const char *domain,
+ test_server **p_test_srv)
+{
+ pj_pool_t *pool;
+ test_server *test_srv;
+ pj_sockaddr hostip;
+ char strbuf[100];
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(stun_cfg && domain && p_test_srv, PJ_EINVAL);
+
+ status = pj_gethostip(pj_AF_INET(), &hostip);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ pool = pj_pool_create(mem, THIS_FILE, 512, 512, NULL);
+ test_srv = (test_server*) PJ_POOL_ZALLOC_T(pool, test_server);
+ test_srv->pool = pool;
+ test_srv->flags = flags;
+ test_srv->stun_cfg = stun_cfg;
+
+ pj_strdup2(pool, &test_srv->domain, domain);
+ test_srv->username = pj_str(TURN_USERNAME);
+ test_srv->passwd = pj_str(TURN_PASSWD);
+
+ pj_ioqueue_op_key_init(&test_srv->send_key, sizeof(test_srv->send_key));
+
+ if (flags & CREATE_DNS_SERVER) {
+ status = pj_dns_server_create(mem, test_srv->stun_cfg->ioqueue,
+ pj_AF_INET(), DNS_SERVER_PORT,
+ 0, &test_srv->dns_server);
+ if (status != PJ_SUCCESS) {
+ destroy_test_server(test_srv);
+ return status;
+ }
+
+ /* Add DNS A record for the domain, for fallback */
+ if (flags & CREATE_A_RECORD_FOR_DOMAIN) {
+ pj_dns_parsed_rr rr;
+ pj_str_t res_name;
+ pj_in_addr ip_addr;
+
+ pj_strdup2(pool, &res_name, domain);
+ ip_addr = hostip.ipv4.sin_addr;
+ pj_dns_init_a_rr(&rr, &res_name, PJ_DNS_CLASS_IN, 60, &ip_addr);
+ pj_dns_server_add_rec(test_srv->dns_server, 1, &rr);
+ }
+
+ }
+
+ if (flags & CREATE_STUN_SERVER) {
+ pj_activesock_cb stun_sock_cb;
+ pj_sockaddr bound_addr;
+
+ pj_bzero(&stun_sock_cb, sizeof(stun_sock_cb));
+ stun_sock_cb.on_data_recvfrom = &stun_on_data_recvfrom;
+
+ pj_sockaddr_in_init(&bound_addr.ipv4, NULL, STUN_SERVER_PORT);
+
+ status = pj_activesock_create_udp(pool, &bound_addr, NULL,
+ test_srv->stun_cfg->ioqueue,
+ &stun_sock_cb, test_srv,
+ &test_srv->stun_sock, NULL);
+ if (status != PJ_SUCCESS) {
+ destroy_test_server(test_srv);
+ return status;
+ }
+
+ status = pj_activesock_start_recvfrom(test_srv->stun_sock, pool,
+ MAX_STUN_PKT, 0);
+ if (status != PJ_SUCCESS) {
+ destroy_test_server(test_srv);
+ return status;
+ }
+
+ if (test_srv->dns_server && (flags & CREATE_STUN_SERVER_DNS_SRV)) {
+ pj_str_t res_name, target;
+ pj_dns_parsed_rr rr;
+ pj_in_addr ip_addr;
+
+ /* Add DNS entries:
+ * _stun._udp.domain 60 IN SRV 0 0 PORT stun.domain.
+ * stun.domain IN A 127.0.0.1
+ */
+ pj_ansi_snprintf(strbuf, sizeof(strbuf),
+ "_stun._udp.%s", domain);
+ pj_strdup2(pool, &res_name, strbuf);
+ pj_ansi_snprintf(strbuf, sizeof(strbuf),
+ "stun.%s", domain);
+ pj_strdup2(pool, &target, strbuf);
+ pj_dns_init_srv_rr(&rr, &res_name, PJ_DNS_CLASS_IN, 60, 0, 0,
+ STUN_SERVER_PORT, &target);
+ pj_dns_server_add_rec(test_srv->dns_server, 1, &rr);
+
+ res_name = target;
+ ip_addr = hostip.ipv4.sin_addr;
+ pj_dns_init_a_rr(&rr, &res_name, PJ_DNS_CLASS_IN, 60, &ip_addr);
+ pj_dns_server_add_rec(test_srv->dns_server, 1, &rr);
+ }
+
+ }
+
+ if (flags & CREATE_TURN_SERVER) {
+ pj_activesock_cb turn_sock_cb;
+ pj_sockaddr bound_addr;
+
+ pj_bzero(&turn_sock_cb, sizeof(turn_sock_cb));
+ turn_sock_cb.on_data_recvfrom = &turn_on_data_recvfrom;
+
+ pj_sockaddr_in_init(&bound_addr.ipv4, NULL, TURN_SERVER_PORT);
+
+ status = pj_activesock_create_udp(pool, &bound_addr, NULL,
+ test_srv->stun_cfg->ioqueue,
+ &turn_sock_cb, test_srv,
+ &test_srv->turn_sock, NULL);
+ if (status != PJ_SUCCESS) {
+ destroy_test_server(test_srv);
+ return status;
+ }
+
+ status = pj_activesock_start_recvfrom(test_srv->turn_sock, pool,
+ MAX_STUN_PKT, 0);
+ if (status != PJ_SUCCESS) {
+ destroy_test_server(test_srv);
+ return status;
+ }
+
+ if (test_srv->dns_server && (flags & CREATE_TURN_SERVER_DNS_SRV)) {
+ pj_str_t res_name, target;
+ pj_dns_parsed_rr rr;
+ pj_in_addr ip_addr;
+
+ /* Add DNS entries:
+ * _turn._udp.domain 60 IN SRV 0 0 PORT turn.domain.
+ * turn.domain IN A 127.0.0.1
+ */
+ pj_ansi_snprintf(strbuf, sizeof(strbuf),
+ "_turn._udp.%s", domain);
+ pj_strdup2(pool, &res_name, strbuf);
+ pj_ansi_snprintf(strbuf, sizeof(strbuf),
+ "turn.%s", domain);
+ pj_strdup2(pool, &target, strbuf);
+ pj_dns_init_srv_rr(&rr, &res_name, PJ_DNS_CLASS_IN, 60, 0, 0,
+ TURN_SERVER_PORT, &target);
+ pj_dns_server_add_rec(test_srv->dns_server, 1, &rr);
+
+ res_name = target;
+ ip_addr = hostip.ipv4.sin_addr;
+ pj_dns_init_a_rr(&rr, &res_name, PJ_DNS_CLASS_IN, 60, &ip_addr);
+ pj_dns_server_add_rec(test_srv->dns_server, 1, &rr);
+ }
+ }
+
+ *p_test_srv = test_srv;
+ return PJ_SUCCESS;
+}
+
+void destroy_test_server(test_server *test_srv)
+{
+ unsigned i;
+
+ PJ_ASSERT_ON_FAIL(test_srv, return);
+
+ for (i=0; i<test_srv->turn_alloc_cnt; ++i) {
+ pj_activesock_close(test_srv->turn_alloc[i].sock);
+ pj_pool_release(test_srv->turn_alloc[i].pool);
+ }
+ test_srv->turn_alloc_cnt = 0;
+
+ if (test_srv->turn_sock) {
+ pj_activesock_close(test_srv->turn_sock);
+ test_srv->turn_sock = NULL;
+ }
+
+ if (test_srv->stun_sock) {
+ pj_activesock_close(test_srv->stun_sock);
+ test_srv->stun_sock = NULL;
+ }
+
+ if (test_srv->dns_server) {
+ pj_dns_server_destroy(test_srv->dns_server);
+ test_srv->dns_server = NULL;
+ }
+
+ if (test_srv->pool) {
+ pj_pool_t *pool = test_srv->pool;
+ test_srv->pool = NULL;
+ pj_pool_release(pool);
+ }
+}
+
+static pj_bool_t stun_on_data_recvfrom(pj_activesock_t *asock,
+ void *data,
+ pj_size_t size,
+ const pj_sockaddr_t *src_addr,
+ int addr_len,
+ pj_status_t status)
+{
+ test_server *test_srv;
+ pj_stun_msg *req, *resp = NULL;
+ pj_pool_t *pool;
+ pj_ssize_t len;
+
+ if (status != PJ_SUCCESS)
+ return PJ_TRUE;
+
+ test_srv = (test_server*) pj_activesock_get_user_data(asock);
+ pool = pj_pool_create(test_srv->stun_cfg->pf, NULL, 512, 512, NULL);
+
+ status = pj_stun_msg_decode(pool, (pj_uint8_t*)data, size,
+ PJ_STUN_IS_DATAGRAM | PJ_STUN_CHECK_PACKET,
+ &req, NULL, NULL);
+ if (status != PJ_SUCCESS)
+ goto on_return;
+
+ if (req->hdr.type != PJ_STUN_BINDING_REQUEST) {
+ pj_stun_msg_create_response(pool, req, PJ_STUN_SC_BAD_REQUEST,
+ NULL, &resp);
+ goto send_pkt;
+ }
+
+ status = pj_stun_msg_create_response(pool, req, 0, NULL, &resp);
+ if (status != PJ_SUCCESS)
+ goto on_return;
+
+ pj_stun_msg_add_sockaddr_attr(pool, resp, PJ_STUN_ATTR_XOR_MAPPED_ADDR,
+ PJ_TRUE, src_addr, addr_len);
+
+send_pkt:
+ status = pj_stun_msg_encode(resp, (pj_uint8_t*)data, MAX_STUN_PKT,
+ 0, NULL, &size);
+ if (status != PJ_SUCCESS)
+ goto on_return;
+
+ len = size;
+ status = pj_activesock_sendto(asock, &test_srv->send_key, data, &len,
+ 0, src_addr, addr_len);
+
+on_return:
+ pj_pool_release(pool);
+ return PJ_TRUE;
+}
+
+
+static pj_stun_msg* create_success_response(test_server *test_srv,
+ turn_allocation *alloc,
+ pj_stun_msg *req,
+ pj_pool_t *pool,
+ unsigned lifetime,
+ pj_str_t *auth_key)
+{
+ pj_stun_msg *resp;
+ pj_str_t tmp;
+ pj_status_t status;
+
+ /* Create response */
+ status = pj_stun_msg_create_response(pool, req, 0, NULL, &resp);
+ if (status != PJ_SUCCESS) {
+ return NULL;
+ }
+ /* Add TURN_NONCE */
+ pj_stun_msg_add_string_attr(pool, resp, PJ_STUN_ATTR_NONCE, pj_cstr(&tmp, TURN_NONCE));
+ /* Add LIFETIME */
+ pj_stun_msg_add_uint_attr(pool, resp, PJ_STUN_ATTR_LIFETIME, lifetime);
+ if (lifetime != 0) {
+ /* Add XOR-RELAYED-ADDRESS */
+ pj_stun_msg_add_sockaddr_attr(pool, resp, PJ_STUN_ATTR_XOR_RELAYED_ADDR, PJ_TRUE, &alloc->alloc_addr,
+ pj_sockaddr_get_len(&alloc->alloc_addr));
+ /* Add XOR-MAPPED-ADDRESS */
+ pj_stun_msg_add_sockaddr_attr(pool, resp, PJ_STUN_ATTR_XOR_MAPPED_ADDR, PJ_TRUE, &alloc->client_addr,
+ pj_sockaddr_get_len(&alloc->client_addr));
+ }
+
+ /* Add blank MESSAGE-INTEGRITY */
+ pj_stun_msg_add_msgint_attr(pool, resp);
+
+ /* Set auth key */
+ pj_stun_create_key(pool, auth_key, &test_srv->domain, &test_srv->username,
+ PJ_STUN_PASSWD_PLAIN, &test_srv->passwd);
+
+ return resp;
+}
+
+
+static pj_bool_t turn_on_data_recvfrom(pj_activesock_t *asock,
+ void *data,
+ pj_size_t size,
+ const pj_sockaddr_t *src_addr,
+ int addr_len,
+ pj_status_t status)
+{
+ test_server *test_srv;
+ pj_pool_t *pool;
+ turn_allocation *alloc;
+ pj_stun_msg *req, *resp = NULL;
+ pj_str_t auth_key = { NULL, 0 };
+ char client_info[PJ_INET6_ADDRSTRLEN+10];
+ unsigned i;
+ pj_ssize_t len;
+
+ if (status != PJ_SUCCESS)
+ return PJ_TRUE;
+
+ pj_sockaddr_print(src_addr, client_info, sizeof(client_info), 3);
+
+ test_srv = (test_server*) pj_activesock_get_user_data(asock);
+ pool = pj_pool_create(test_srv->stun_cfg->pf, NULL, 512, 512, NULL);
+
+ /* Find the client */
+ for (i=0; i<test_srv->turn_alloc_cnt; i++) {
+ if (pj_sockaddr_cmp(&test_srv->turn_alloc[i].client_addr, src_addr)==0)
+ break;
+ }
+
+
+ if (pj_stun_msg_check((pj_uint8_t*)data, size, PJ_STUN_NO_FINGERPRINT_CHECK)!=PJ_SUCCESS) {
+ /* Not STUN message, this probably is a ChannelData */
+ pj_turn_channel_data cd;
+ const pj_turn_channel_data *pcd = (const pj_turn_channel_data*)data;
+ pj_ssize_t sent;
+
+ if (i==test_srv->turn_alloc_cnt) {
+ /* Invalid data */
+ PJ_LOG(1,(THIS_FILE,
+ "TURN Server received strayed data"));
+ goto on_return;
+ }
+
+ alloc = &test_srv->turn_alloc[i];
+
+ cd.ch_number = pj_ntohs(pcd->ch_number);
+ cd.length = pj_ntohs(pcd->length);
+
+ /* For UDP check the packet length */
+ if (size < cd.length+sizeof(cd)) {
+ PJ_LOG(1,(THIS_FILE,
+ "TURN Server: ChannelData discarded: UDP size error"));
+ goto on_return;
+ }
+
+ /* Lookup peer */
+ for (i=0; i<alloc->perm_cnt; ++i) {
+ if (alloc->chnum[i] == cd.ch_number)
+ break;
+ }
+
+ if (i==alloc->perm_cnt) {
+ PJ_LOG(1,(THIS_FILE,
+ "TURN Server: ChannelData discarded: invalid channel number"));
+ goto on_return;
+ }
+
+ /* Relay the data to peer */
+ sent = cd.length;
+ pj_activesock_sendto(alloc->sock, &alloc->send_key,
+ pcd+1, &sent, 0,
+ &alloc->perm[i],
+ pj_sockaddr_get_len(&alloc->perm[i]));
+
+ /* Done */
+ goto on_return;
+ }
+
+ status = pj_stun_msg_decode(pool, (pj_uint8_t*)data, size,
+ PJ_STUN_IS_DATAGRAM | PJ_STUN_CHECK_PACKET |
+ PJ_STUN_NO_FINGERPRINT_CHECK,
+ &req, NULL, NULL);
+ if (status != PJ_SUCCESS) {
+ char errmsg[PJ_ERR_MSG_SIZE];
+ pj_strerror(status, errmsg, sizeof(errmsg));
+ PJ_LOG(1,("", "STUN message decode error from client %s: %s", client_info, errmsg));
+ goto on_return;
+ }
+
+ if (i==test_srv->turn_alloc_cnt) {
+ /* New client */
+ //pj_str_t ip_addr;
+ pj_stun_username_attr *uname;
+ pj_activesock_cb alloc_sock_cb;
+ turn_allocation *alloc;
+
+ /* Must be Allocate request */
+ if (req->hdr.type != PJ_STUN_ALLOCATE_REQUEST) {
+ PJ_LOG(1,(THIS_FILE, "Invalid %s %s from client %s",
+ pj_stun_get_method_name(req->hdr.type),
+ pj_stun_get_class_name(req->hdr.type),
+ client_info));
+
+ if (PJ_STUN_IS_REQUEST(req->hdr.type))
+ pj_stun_msg_create_response(pool, req, PJ_STUN_SC_BAD_REQUEST, NULL, &resp);
+ goto send_pkt;
+ }
+
+ test_srv->turn_stat.rx_allocate_cnt++;
+
+ /* Skip if we're not responding to Allocate request */
+ if (!test_srv->turn_respond_allocate)
+ return PJ_TRUE;
+
+ /* Check if we have too many clients */
+ if (test_srv->turn_alloc_cnt == MAX_TURN_ALLOC) {
+ pj_stun_msg_create_response(pool, req, PJ_STUN_SC_INSUFFICIENT_CAPACITY, NULL, &resp);
+ goto send_pkt;
+ }
+
+ /* Get USERNAME attribute */
+ uname = (pj_stun_username_attr*)
+ pj_stun_msg_find_attr(req, PJ_STUN_ATTR_USERNAME, 0);
+
+ /* Reject if it doesn't have MESSAGE-INTEGRITY or USERNAME attributes or
+ * the user is incorrect
+ */
+ if (pj_stun_msg_find_attr(req, PJ_STUN_ATTR_MESSAGE_INTEGRITY, 0) == NULL ||
+ uname==NULL || pj_stricmp2(&uname->value, TURN_USERNAME) != 0)
+ {
+ pj_str_t tmp;
+
+ pj_stun_msg_create_response(pool, req, PJ_STUN_SC_UNAUTHORIZED, NULL, &resp);
+ pj_stun_msg_add_string_attr(pool, resp, PJ_STUN_ATTR_REALM, &test_srv->domain);
+ pj_stun_msg_add_string_attr(pool, resp, PJ_STUN_ATTR_NONCE, pj_cstr(&tmp, TURN_NONCE));
+ goto send_pkt;
+ }
+
+ pj_bzero(&alloc_sock_cb, sizeof(alloc_sock_cb));
+ alloc_sock_cb.on_data_recvfrom = &alloc_on_data_recvfrom;
+
+ /* Create allocation */
+ alloc = &test_srv->turn_alloc[test_srv->turn_alloc_cnt];
+ alloc->perm_cnt = 0;
+ alloc->test_srv = test_srv;
+ pj_memcpy(&alloc->client_addr, src_addr, addr_len);
+ pj_ioqueue_op_key_init(&alloc->send_key, sizeof(alloc->send_key));
+
+ alloc->pool = pj_pool_create(test_srv->stun_cfg->pf, "alloc", 512, 512, NULL);
+
+ /* Create relay socket */
+ pj_sockaddr_in_init(&alloc->alloc_addr.ipv4, NULL, 0);
+ pj_gethostip(pj_AF_INET(), &alloc->alloc_addr);
+
+ status = pj_activesock_create_udp(alloc->pool, &alloc->alloc_addr, NULL,
+ test_srv->stun_cfg->ioqueue,
+ &alloc_sock_cb, alloc,
+ &alloc->sock, &alloc->alloc_addr);
+ if (status != PJ_SUCCESS) {
+ pj_pool_release(alloc->pool);
+ pj_stun_msg_create_response(pool, req, PJ_STUN_SC_SERVER_ERROR, NULL, &resp);
+ goto send_pkt;
+ }
+ //pj_sockaddr_set_str_addr(pj_AF_INET(), &alloc->alloc_addr, &ip_addr);
+
+ pj_activesock_set_user_data(alloc->sock, alloc);
+
+ status = pj_activesock_start_recvfrom(alloc->sock, alloc->pool, 1500, 0);
+ if (status != PJ_SUCCESS) {
+ pj_activesock_close(alloc->sock);
+ pj_pool_release(alloc->pool);
+ pj_stun_msg_create_response(pool, req, PJ_STUN_SC_SERVER_ERROR, NULL, &resp);
+ goto send_pkt;
+ }
+
+ /* Create Data indication */
+ status = pj_stun_msg_create(alloc->pool, PJ_STUN_DATA_INDICATION,
+ PJ_STUN_MAGIC, NULL, &alloc->data_ind);
+ if (status != PJ_SUCCESS) {
+ pj_activesock_close(alloc->sock);
+ pj_pool_release(alloc->pool);
+ pj_stun_msg_create_response(pool, req, PJ_STUN_SC_SERVER_ERROR, NULL, &resp);
+ goto send_pkt;
+ }
+ pj_stun_msg_add_sockaddr_attr(alloc->pool, alloc->data_ind,
+ PJ_STUN_ATTR_XOR_PEER_ADDR, PJ_TRUE,
+ &alloc->alloc_addr,
+ pj_sockaddr_get_len(&alloc->alloc_addr));
+ pj_stun_msg_add_binary_attr(alloc->pool, alloc->data_ind,
+ PJ_STUN_ATTR_DATA, (pj_uint8_t*)"", 1);
+
+ /* Create response */
+ resp = create_success_response(test_srv, alloc, req, pool, 600, &auth_key);
+ if (resp == NULL) {
+ pj_activesock_close(alloc->sock);
+ pj_pool_release(alloc->pool);
+ pj_stun_msg_create_response(pool, req, PJ_STUN_SC_SERVER_ERROR, NULL, &resp);
+ goto send_pkt;
+ }
+
+ ++test_srv->turn_alloc_cnt;
+
+ } else {
+ alloc = &test_srv->turn_alloc[i];
+
+ if (req->hdr.type == PJ_STUN_ALLOCATE_REQUEST) {
+
+ test_srv->turn_stat.rx_allocate_cnt++;
+
+ /* Skip if we're not responding to Allocate request */
+ if (!test_srv->turn_respond_allocate)
+ return PJ_TRUE;
+
+ resp = create_success_response(test_srv, alloc, req, pool, 0, &auth_key);
+
+ } else if (req->hdr.type == PJ_STUN_REFRESH_REQUEST) {
+ pj_stun_lifetime_attr *lf_attr;
+
+ test_srv->turn_stat.rx_refresh_cnt++;
+
+ /* Skip if we're not responding to Refresh request */
+ if (!test_srv->turn_respond_refresh)
+ return PJ_TRUE;
+
+ lf_attr = (pj_stun_lifetime_attr*)
+ pj_stun_msg_find_attr(req, PJ_STUN_ATTR_LIFETIME, 0);
+ if (lf_attr && lf_attr->value != 0) {
+ resp = create_success_response(test_srv, alloc, req, pool, 600, &auth_key);
+ pj_array_erase(test_srv->turn_alloc, sizeof(test_srv->turn_alloc[0]),
+ test_srv->turn_alloc_cnt, i);
+ --test_srv->turn_alloc_cnt;
+ } else
+ resp = create_success_response(test_srv, alloc, req, pool, 0, &auth_key);
+ } else if (req->hdr.type == PJ_STUN_CREATE_PERM_REQUEST) {
+ for (i=0; i<req->attr_count; ++i) {
+ if (req->attr[i]->type == PJ_STUN_ATTR_XOR_PEER_ADDR) {
+ pj_stun_xor_peer_addr_attr *pa = (pj_stun_xor_peer_addr_attr*)req->attr[i];
+ unsigned j;
+
+ for (j=0; j<alloc->perm_cnt; ++j) {
+ if (pj_sockaddr_cmp(&alloc->perm[j], &pa->sockaddr)==0)
+ break;
+ }
+
+ if (j==alloc->perm_cnt && alloc->perm_cnt < MAX_TURN_PERM) {
+ char peer_info[PJ_INET6_ADDRSTRLEN];
+ pj_sockaddr_print(&pa->sockaddr, peer_info, sizeof(peer_info), 3);
+
+ pj_sockaddr_cp(&alloc->perm[alloc->perm_cnt], &pa->sockaddr);
+ ++alloc->perm_cnt;
+
+ PJ_LOG(5,("", "Permission %s added to client %s, perm_cnt=%d",
+ peer_info, client_info, alloc->perm_cnt));
+ }
+
+ }
+ }
+ resp = create_success_response(test_srv, alloc, req, pool, 0, &auth_key);
+ } else if (req->hdr.type == PJ_STUN_SEND_INDICATION) {
+ pj_stun_xor_peer_addr_attr *pa;
+ pj_stun_data_attr *da;
+
+ test_srv->turn_stat.rx_send_ind_cnt++;
+
+ pa = (pj_stun_xor_peer_addr_attr*)
+ pj_stun_msg_find_attr(req, PJ_STUN_ATTR_XOR_PEER_ADDR, 0);
+ da = (pj_stun_data_attr*)
+ pj_stun_msg_find_attr(req, PJ_STUN_ATTR_DATA, 0);
+ if (pa && da) {
+ unsigned j;
+ char peer_info[PJ_INET6_ADDRSTRLEN];
+ pj_ssize_t sent;
+
+ pj_sockaddr_print(&pa->sockaddr, peer_info, sizeof(peer_info), 3);
+
+ for (j=0; j<alloc->perm_cnt; ++j) {
+ if (pj_sockaddr_cmp(&alloc->perm[j], &pa->sockaddr)==0)
+ break;
+ }
+
+ if (j==alloc->perm_cnt) {
+ PJ_LOG(5,("", "SendIndication to %s is rejected (no permission)",
+ peer_info, client_info, alloc->perm_cnt));
+ } else {
+ PJ_LOG(5,(THIS_FILE, "Relaying %d bytes data from client %s to peer %s, "
+ "perm_cnt=%d",
+ da->length, client_info, peer_info, alloc->perm_cnt));
+
+ sent = da->length;
+ pj_activesock_sendto(alloc->sock, &alloc->send_key,
+ da->data, &sent, 0,
+ &pa->sockaddr,
+ pj_sockaddr_get_len(&pa->sockaddr));
+ }
+ } else {
+ PJ_LOG(1,(THIS_FILE, "Invalid Send Indication from %s", client_info));
+ }
+ } else if (req->hdr.type == PJ_STUN_CHANNEL_BIND_REQUEST) {
+ pj_stun_xor_peer_addr_attr *pa;
+ pj_stun_channel_number_attr *cna;
+ unsigned j, cn;
+
+ pa = (pj_stun_xor_peer_addr_attr*)
+ pj_stun_msg_find_attr(req, PJ_STUN_ATTR_XOR_PEER_ADDR, 0);
+ cna = (pj_stun_channel_number_attr*)
+ pj_stun_msg_find_attr(req, PJ_STUN_ATTR_CHANNEL_NUMBER, 0);
+ cn = PJ_STUN_GET_CH_NB(cna->value);
+
+ resp = create_success_response(test_srv, alloc, req, pool, 0, &auth_key);
+
+ for (j=0; j<alloc->perm_cnt; ++j) {
+ if (pj_sockaddr_cmp(&alloc->perm[j], &pa->sockaddr)==0)
+ break;
+ }
+
+ if (i==alloc->perm_cnt) {
+ if (alloc->perm_cnt==MAX_TURN_PERM) {
+ pj_stun_msg_create_response(pool, req, PJ_STUN_SC_INSUFFICIENT_CAPACITY, NULL, &resp);
+ goto send_pkt;
+ }
+ pj_sockaddr_cp(&alloc->perm[i], &pa->sockaddr);
+ ++alloc->perm_cnt;
+ }
+ alloc->chnum[i] = cn;
+
+ resp = create_success_response(test_srv, alloc, req, pool, 0, &auth_key);
+
+ } else if (PJ_STUN_IS_REQUEST(req->hdr.type)) {
+ pj_stun_msg_create_response(pool, req, PJ_STUN_SC_BAD_REQUEST, NULL, &resp);
+ }
+ }
+
+
+send_pkt:
+ if (resp) {
+ status = pj_stun_msg_encode(resp, (pj_uint8_t*)data, MAX_STUN_PKT,
+ 0, &auth_key, &size);
+ if (status != PJ_SUCCESS)
+ goto on_return;
+
+ len = size;
+ status = pj_activesock_sendto(asock, &test_srv->send_key, data, &len,
+ 0, src_addr, addr_len);
+ }
+
+on_return:
+ pj_pool_release(pool);
+ return PJ_TRUE;
+}
+
+/* On received data from peer */
+static pj_bool_t alloc_on_data_recvfrom(pj_activesock_t *asock,
+ void *data,
+ pj_size_t size,
+ const pj_sockaddr_t *src_addr,
+ int addr_len,
+ pj_status_t status)
+{
+ turn_allocation *alloc;
+ pj_stun_xor_peer_addr_attr *pa;
+ pj_stun_data_attr *da;
+ char peer_info[PJ_INET6_ADDRSTRLEN+10];
+ char client_info[PJ_INET6_ADDRSTRLEN+10];
+ pj_uint8_t buffer[1500];
+ pj_ssize_t sent;
+ unsigned i;
+
+ if (status != PJ_SUCCESS)
+ return PJ_TRUE;
+
+ alloc = (turn_allocation*) pj_activesock_get_user_data(asock);
+
+ pj_sockaddr_print(&alloc->client_addr, client_info, sizeof(client_info), 3);
+ pj_sockaddr_print(src_addr, peer_info, sizeof(peer_info), 3);
+
+ /* Check that this peer has a permission */
+ for (i=0; i<alloc->perm_cnt; ++i) {
+ if (pj_sockaddr_get_len(&alloc->perm[i]) == (unsigned)addr_len &&
+ pj_memcmp(pj_sockaddr_get_addr(&alloc->perm[i]),
+ pj_sockaddr_get_addr(src_addr),
+ addr_len) == 0)
+ {
+ break;
+ }
+ }
+ if (i==alloc->perm_cnt) {
+ PJ_LOG(5,("", "Client %s received %d bytes unauthorized data from peer %s",
+ client_info, size, peer_info));
+ if (alloc->perm_cnt == 0)
+ PJ_LOG(5,("", "Client %s has no permission", client_info));
+ return PJ_TRUE;
+ }
+
+ /* Format a Data indication */
+ pa = (pj_stun_xor_peer_addr_attr*)
+ pj_stun_msg_find_attr(alloc->data_ind, PJ_STUN_ATTR_XOR_PEER_ADDR, 0);
+ da = (pj_stun_data_attr*)
+ pj_stun_msg_find_attr(alloc->data_ind, PJ_STUN_ATTR_DATA, 0);
+ pj_assert(pa && da);
+
+ pj_sockaddr_cp(&pa->sockaddr, src_addr);
+ da->data = (pj_uint8_t*)data;
+ da->length = size;
+
+ /* Encode Data indication */
+ status = pj_stun_msg_encode(alloc->data_ind, buffer, sizeof(buffer), 0,
+ NULL, &size);
+ if (status != PJ_SUCCESS)
+ return PJ_TRUE;
+
+ /* Send */
+ sent = size;
+ PJ_LOG(5,("", "Forwarding %d bytes data from peer %s to client %s",
+ sent, peer_info, client_info));
+
+ pj_activesock_sendto(alloc->test_srv->turn_sock, &alloc->send_key, buffer,
+ &sent, 0, &alloc->client_addr,
+ pj_sockaddr_get_len(&alloc->client_addr));
+
+ return PJ_TRUE;
+}
+
diff --git a/pjnath/src/pjnath-test/server.h b/pjnath/src/pjnath-test/server.h
new file mode 100644
index 0000000..f8093bb
--- /dev/null
+++ b/pjnath/src/pjnath-test/server.h
@@ -0,0 +1,110 @@
+/* $Id: server.h 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __PJNATH_TEST_SERVER_H__
+#define __PJNATH_TEST_SERVER_H__
+
+#include <pjnath.h>
+#include <pjlib-util.h>
+#include <pjlib.h>
+
+#define DNS_SERVER_PORT 55533
+#define STUN_SERVER_PORT 33478
+#define TURN_SERVER_PORT 33479
+
+#define TURN_USERNAME "auser"
+#define TURN_PASSWD "apass"
+
+#define MAX_TURN_ALLOC 16
+#define MAX_TURN_PERM 16
+
+enum test_server_flags
+{
+ CREATE_DNS_SERVER = (1 << 0),
+ CREATE_A_RECORD_FOR_DOMAIN = (1 << 1),
+
+ CREATE_STUN_SERVER = (1 << 5),
+ CREATE_STUN_SERVER_DNS_SRV = (1 << 6),
+
+ CREATE_TURN_SERVER = (1 << 10),
+ CREATE_TURN_SERVER_DNS_SRV = (1 << 11),
+
+};
+
+typedef struct test_server test_server;
+
+/* TURN allocation */
+typedef struct turn_allocation
+{
+ test_server *test_srv;
+ pj_pool_t *pool;
+ pj_activesock_t *sock;
+ pj_ioqueue_op_key_t send_key;
+ pj_sockaddr client_addr;
+ pj_sockaddr alloc_addr;
+ unsigned perm_cnt;
+ pj_sockaddr perm[MAX_TURN_PERM];
+ unsigned chnum[MAX_TURN_PERM];
+ pj_stun_msg *data_ind;
+} turn_allocation;
+
+/*
+ * Server installation for testing.
+ * This comprises of DNS server, STUN server, and TURN server.
+ */
+struct test_server
+{
+ pj_pool_t *pool;
+ pj_uint32_t flags;
+ pj_stun_config *stun_cfg;
+ pj_ioqueue_op_key_t send_key;
+
+ pj_dns_server *dns_server;
+
+ pj_activesock_t *stun_sock;
+
+ pj_activesock_t *turn_sock;
+ unsigned turn_alloc_cnt;
+ turn_allocation turn_alloc[MAX_TURN_ALLOC];
+ pj_bool_t turn_respond_allocate;
+ pj_bool_t turn_respond_refresh;
+
+ struct turn_stat {
+ unsigned rx_allocate_cnt;
+ unsigned rx_refresh_cnt;
+ unsigned rx_send_ind_cnt;
+ } turn_stat;
+
+ pj_str_t domain;
+ pj_str_t username;
+ pj_str_t passwd;
+
+};
+
+
+pj_status_t create_test_server(pj_stun_config *stun_cfg,
+ pj_uint32_t flags,
+ const char *domain,
+ test_server **p_test_srv);
+void destroy_test_server(test_server *test_srv);
+void test_server_poll_events(test_server *test_srv);
+
+
+#endif /* __PJNATH_TEST_SERVER_H__ */
+
diff --git a/pjnath/src/pjnath-test/sess_auth.c b/pjnath/src/pjnath-test/sess_auth.c
new file mode 100644
index 0000000..05a6209
--- /dev/null
+++ b/pjnath/src/pjnath-test/sess_auth.c
@@ -0,0 +1,1146 @@
+/* $Id: sess_auth.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "test.h"
+
+#define THIS_FILE "sess_auth.c"
+
+#define REALM "STUN session test"
+#define USERNAME "theusername"
+#define PASSWORD "thepassword"
+#define NONCE "thenonce"
+
+
+/* STUN config */
+static pj_stun_config stun_cfg;
+
+
+//////////////////////////////////////////////////////////////////////////////////////////
+//
+// SERVER PART
+//
+
+
+/* Server instance */
+static struct server
+{
+ pj_pool_t *pool;
+ pj_sockaddr addr;
+ pj_stun_session *sess;
+
+ pj_bool_t responding;
+ unsigned recv_count;
+ pj_stun_auth_type auth_type;
+
+ pj_sock_t sock;
+
+ pj_bool_t quit;
+ pj_thread_t *thread;
+} *server;
+
+
+static pj_status_t server_send_msg(pj_stun_session *sess,
+ void *token,
+ const void *pkt,
+ pj_size_t pkt_size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len)
+{
+ pj_ssize_t len = pkt_size;
+
+ PJ_UNUSED_ARG(sess);
+ PJ_UNUSED_ARG(token);
+
+ return pj_sock_sendto(server->sock, pkt, &len, 0, dst_addr, addr_len);
+}
+
+static pj_status_t server_on_rx_request(pj_stun_session *sess,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_stun_rx_data *rdata,
+ void *token,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ PJ_UNUSED_ARG(pkt);
+ PJ_UNUSED_ARG(pkt_len);
+ PJ_UNUSED_ARG(token);
+
+ return pj_stun_session_respond(sess, rdata, 0, NULL, NULL, PJ_TRUE,
+ src_addr, src_addr_len);
+}
+
+
+static pj_status_t server_get_auth(void *user_data,
+ pj_pool_t *pool,
+ pj_str_t *realm,
+ pj_str_t *nonce)
+{
+ PJ_UNUSED_ARG(user_data);
+ PJ_UNUSED_ARG(pool);
+
+ if (server->auth_type == PJ_STUN_AUTH_SHORT_TERM) {
+ realm->slen = nonce->slen = 0;
+ } else {
+ *realm = pj_str(REALM);
+ *nonce = pj_str(NONCE);
+ }
+
+ return PJ_SUCCESS;
+}
+
+
+static pj_status_t server_get_password( const pj_stun_msg *msg,
+ void *user_data,
+ const pj_str_t *realm,
+ const pj_str_t *username,
+ pj_pool_t *pool,
+ pj_stun_passwd_type *data_type,
+ pj_str_t *data)
+{
+ PJ_UNUSED_ARG(msg);
+ PJ_UNUSED_ARG(user_data);
+ PJ_UNUSED_ARG(pool);
+
+ if (server->auth_type == PJ_STUN_AUTH_SHORT_TERM) {
+ if (realm && realm->slen) {
+ PJ_LOG(4,(THIS_FILE, " server expecting short term"));
+ return -1;
+ }
+ } else {
+ if (realm==NULL || realm->slen==0) {
+ PJ_LOG(4,(THIS_FILE, " realm not present"));
+ return -1;
+ }
+ }
+
+ if (pj_strcmp2(username, USERNAME) != 0) {
+ PJ_LOG(4,(THIS_FILE, " wrong username"));
+ return -1;
+ }
+
+ *data_type = PJ_STUN_PASSWD_PLAIN;
+ *data = pj_str(PASSWORD);
+
+ return PJ_SUCCESS;
+}
+
+
+static pj_bool_t server_verify_nonce(const pj_stun_msg *msg,
+ void *user_data,
+ const pj_str_t *realm,
+ const pj_str_t *username,
+ const pj_str_t *nonce)
+{
+ PJ_UNUSED_ARG(msg);
+ PJ_UNUSED_ARG(user_data);
+ PJ_UNUSED_ARG(realm);
+ PJ_UNUSED_ARG(username);
+
+ if (pj_strcmp2(nonce, NONCE) != 0)
+ return PJ_FALSE;
+
+ return PJ_TRUE;
+}
+
+
+static int server_thread(void *unused)
+{
+ PJ_UNUSED_ARG(unused);
+
+ PJ_LOG(5,("", " server thread started"));
+
+ while (!server->quit) {
+ pj_fd_set_t readset;
+ pj_time_val delay = {0, 10};
+
+ PJ_FD_ZERO(&readset);
+ PJ_FD_SET(server->sock, &readset);
+
+ if (pj_sock_select(server->sock+1, &readset, NULL, NULL, &delay)==1 &&
+ PJ_FD_ISSET(server->sock, &readset))
+ {
+ char pkt[1000];
+ pj_ssize_t len;
+ pj_status_t status;
+ pj_sockaddr src_addr;
+ int src_addr_len;
+
+ len = sizeof(pkt);
+ src_addr_len = sizeof(src_addr);
+
+ status = pj_sock_recvfrom(server->sock, pkt, &len, 0, &src_addr, &src_addr_len);
+ if (status != PJ_SUCCESS)
+ continue;
+
+ /* Increment server's receive count */
+ server->recv_count++;
+
+ /* Only pass to server if we allow to respond */
+ if (!server->responding)
+ continue;
+
+ pj_stun_session_on_rx_pkt(server->sess, pkt, len,
+ PJ_STUN_CHECK_PACKET | PJ_STUN_IS_DATAGRAM,
+ NULL, NULL, &src_addr, src_addr_len);
+ }
+ }
+
+ return 0;
+}
+
+
+/* Destroy server */
+static void destroy_server(void)
+{
+ if (server->thread) {
+ server->quit = PJ_TRUE;
+ pj_thread_join(server->thread);
+ pj_thread_destroy(server->thread);
+ }
+
+ if (server->sock) {
+ pj_sock_close(server->sock);
+ }
+
+ if (server->sess) {
+ pj_stun_session_destroy(server->sess);
+ }
+
+ pj_pool_release(server->pool);
+ server = NULL;
+}
+
+/* Instantiate standard server */
+static int create_std_server(pj_stun_auth_type auth_type,
+ pj_bool_t responding)
+{
+ pj_pool_t *pool;
+ pj_stun_session_cb sess_cb;
+ pj_stun_auth_cred cred;
+ pj_status_t status;
+
+ /* Create server */
+ pool = pj_pool_create(mem, "server", 1000, 1000, NULL);
+ server = PJ_POOL_ZALLOC_T(pool, struct server);
+ server->pool = pool;
+ server->auth_type = auth_type;
+ server->responding = responding;
+
+ /* Create STUN session */
+ pj_bzero(&sess_cb, sizeof(sess_cb));
+ sess_cb.on_rx_request = &server_on_rx_request;
+ sess_cb.on_send_msg = &server_send_msg;
+ status = pj_stun_session_create(&stun_cfg, "server", &sess_cb, PJ_FALSE, &server->sess);
+ if (status != PJ_SUCCESS) {
+ destroy_server();
+ return -10;
+ }
+
+ /* Configure credential */
+ pj_bzero(&cred, sizeof(cred));
+ cred.type = PJ_STUN_AUTH_CRED_DYNAMIC;
+ cred.data.dyn_cred.get_auth = &server_get_auth;
+ cred.data.dyn_cred.get_password = &server_get_password;
+ cred.data.dyn_cred.verify_nonce = &server_verify_nonce;
+ status = pj_stun_session_set_credential(server->sess, auth_type, &cred);
+ if (status != PJ_SUCCESS) {
+ destroy_server();
+ return -20;
+ }
+
+ /* Create socket */
+ status = pj_sock_socket(pj_AF_INET(), pj_SOCK_DGRAM(), 0, &server->sock);
+ if (status != PJ_SUCCESS) {
+ destroy_server();
+ return -30;
+ }
+
+ /* Bind */
+ pj_sockaddr_in_init(&server->addr.ipv4, NULL, 0);
+ status = pj_sock_bind(server->sock, &server->addr, pj_sockaddr_get_len(&server->addr));
+ if (status != PJ_SUCCESS) {
+ destroy_server();
+ return -40;
+ } else {
+ /* Get the bound IP address */
+ int namelen = sizeof(server->addr);
+ pj_sockaddr addr;
+
+ status = pj_sock_getsockname(server->sock, &server->addr, &namelen);
+ if (status != PJ_SUCCESS) {
+ destroy_server();
+ return -43;
+ }
+
+ status = pj_gethostip(pj_AF_INET(), &addr);
+ if (status != PJ_SUCCESS) {
+ destroy_server();
+ return -45;
+ }
+
+ pj_sockaddr_copy_addr(&server->addr, &addr);
+ }
+
+
+ /* Create worker thread */
+ status = pj_thread_create(pool, "server", &server_thread, 0, 0, 0, &server->thread);
+ if (status != PJ_SUCCESS) {
+ destroy_server();
+ return -30;
+ }
+
+ return 0;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////////////////
+//
+// CLIENT PART
+//
+
+static struct client
+{
+ pj_pool_t *pool;
+ pj_stun_session *sess;
+ pj_sem_t *test_complete;
+ pj_sock_t sock;
+
+ pj_bool_t responding;
+ unsigned recv_count;
+
+ pj_status_t response_status;
+ pj_stun_msg *response;
+
+ pj_bool_t quit;
+ pj_thread_t *thread;
+} *client;
+
+
+static pj_status_t client_send_msg(pj_stun_session *sess,
+ void *token,
+ const void *pkt,
+ pj_size_t pkt_size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len)
+{
+ pj_ssize_t len = pkt_size;
+
+ PJ_UNUSED_ARG(sess);
+ PJ_UNUSED_ARG(token);
+
+ return pj_sock_sendto(client->sock, pkt, &len, 0, dst_addr, addr_len);
+}
+
+
+static void client_on_request_complete( pj_stun_session *sess,
+ pj_status_t status,
+ void *token,
+ pj_stun_tx_data *tdata,
+ const pj_stun_msg *response,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ PJ_UNUSED_ARG(sess);
+ PJ_UNUSED_ARG(token);
+ PJ_UNUSED_ARG(tdata);
+ PJ_UNUSED_ARG(src_addr);
+ PJ_UNUSED_ARG(src_addr_len);
+
+ client->response_status = status;
+ if (response)
+ client->response = pj_stun_msg_clone(client->pool, response);
+
+ pj_sem_post(client->test_complete);
+}
+
+
+static int client_thread(void *unused)
+{
+ PJ_UNUSED_ARG(unused);
+
+ while (!client->quit) {
+ pj_fd_set_t readset;
+ pj_time_val delay = {0, 10};
+
+ /* Also poll the timer heap */
+ pj_timer_heap_poll(stun_cfg.timer_heap, NULL);
+
+ /* Poll client socket */
+ PJ_FD_ZERO(&readset);
+ PJ_FD_SET(client->sock, &readset);
+
+ if (pj_sock_select(client->sock+1, &readset, NULL, NULL, &delay)==1 &&
+ PJ_FD_ISSET(client->sock, &readset))
+ {
+ char pkt[1000];
+ pj_ssize_t len;
+ pj_status_t status;
+ pj_sockaddr src_addr;
+ int src_addr_len;
+
+ len = sizeof(pkt);
+ src_addr_len = sizeof(src_addr);
+
+ status = pj_sock_recvfrom(client->sock, pkt, &len, 0, &src_addr, &src_addr_len);
+ if (status != PJ_SUCCESS)
+ continue;
+
+ /* Increment client's receive count */
+ client->recv_count++;
+
+ /* Only pass to client if we allow to respond */
+ if (!client->responding)
+ continue;
+
+ pj_stun_session_on_rx_pkt(client->sess, pkt, len,
+ PJ_STUN_CHECK_PACKET | PJ_STUN_IS_DATAGRAM,
+ NULL, NULL, &src_addr, src_addr_len);
+ }
+
+ }
+
+ return 0;
+}
+
+
+static void destroy_client_server(void)
+{
+ if (client->thread) {
+ client->quit = 1;
+ pj_thread_join(client->thread);
+ pj_thread_destroy(client->thread);
+ }
+
+ if (client->sess)
+ pj_stun_session_destroy(client->sess);
+
+ if (client->sock)
+ pj_sock_close(client->sock);
+
+ if (client->test_complete)
+ pj_sem_destroy(client->test_complete);
+
+ if (server)
+ destroy_server();
+}
+
+static int run_client_test(const char *title,
+
+ pj_bool_t server_responding,
+ pj_stun_auth_type server_auth_type,
+
+ pj_stun_auth_type client_auth_type,
+ const char *realm,
+ const char *username,
+ const char *nonce,
+ const char *password,
+ pj_bool_t dummy_mi,
+
+ pj_bool_t expected_error,
+ pj_status_t expected_code,
+ const char *expected_realm,
+ const char *expected_nonce,
+
+ int (*more_check)(void))
+{
+ pj_pool_t *pool;
+ pj_stun_session_cb sess_cb;
+ pj_stun_auth_cred cred;
+ pj_stun_tx_data *tdata;
+ pj_status_t status;
+ int rc = 0;
+
+ PJ_LOG(3,(THIS_FILE, " %s test", title));
+
+ /* Create client */
+ pool = pj_pool_create(mem, "client", 1000, 1000, NULL);
+ client = PJ_POOL_ZALLOC_T(pool, struct client);
+ client->pool = pool;
+ client->responding = PJ_TRUE;
+
+ /* Create STUN session */
+ pj_bzero(&sess_cb, sizeof(sess_cb));
+ sess_cb.on_request_complete = &client_on_request_complete;
+ sess_cb.on_send_msg = &client_send_msg;
+ status = pj_stun_session_create(&stun_cfg, "client", &sess_cb, PJ_FALSE, &client->sess);
+ if (status != PJ_SUCCESS) {
+ destroy_client_server();
+ return -200;
+ }
+
+ /* Create semaphore */
+ status = pj_sem_create(pool, "client", 0, 1, &client->test_complete);
+ if (status != PJ_SUCCESS) {
+ destroy_client_server();
+ return -205;
+ }
+
+ /* Create client socket */
+ status = pj_sock_socket(pj_AF_INET(), pj_SOCK_DGRAM(), 0, &client->sock);
+ if (status != PJ_SUCCESS) {
+ destroy_client_server();
+ return -210;
+ }
+
+ /* Bind client socket */
+ status = pj_sock_bind_in(client->sock, 0, 0);
+ if (status != PJ_SUCCESS) {
+ destroy_client_server();
+ return -220;
+ }
+
+ /* Create client thread */
+ status = pj_thread_create(pool, "client", &client_thread, NULL, 0, 0, &client->thread);
+ if (status != PJ_SUCCESS) {
+ destroy_client_server();
+ return -230;
+ }
+
+ /* Initialize credential */
+ pj_bzero(&cred, sizeof(cred));
+ cred.type = PJ_STUN_AUTH_CRED_STATIC;
+ if (realm) cred.data.static_cred.realm = pj_str((char*)realm);
+ if (username) cred.data.static_cred.username = pj_str((char*)username);
+ if (nonce) cred.data.static_cred.nonce = pj_str((char*)nonce);
+ if (password) cred.data.static_cred.data = pj_str((char*)password);
+ cred.data.static_cred.data_type = PJ_STUN_PASSWD_PLAIN;
+ status = pj_stun_session_set_credential(client->sess, client_auth_type, &cred);
+ if (status != PJ_SUCCESS) {
+ destroy_client_server();
+ return -240;
+ }
+
+ /* Create the server */
+ status = create_std_server(server_auth_type, server_responding);
+ if (status != 0) {
+ destroy_client_server();
+ return status;
+ }
+
+ /* Create request */
+ status = pj_stun_session_create_req(client->sess, PJ_STUN_BINDING_REQUEST,
+ PJ_STUN_MAGIC, NULL, &tdata);
+ if (status != PJ_SUCCESS) {
+ destroy_client_server();
+ return -250;
+ }
+
+ /* Add our own attributes if client authentication is set to none */
+ if (client_auth_type == PJ_STUN_AUTH_NONE) {
+ pj_str_t tmp;
+ if (realm)
+ pj_stun_msg_add_string_attr(tdata->pool, tdata->msg, PJ_STUN_ATTR_REALM, pj_cstr(&tmp, realm));
+ if (username)
+ pj_stun_msg_add_string_attr(tdata->pool, tdata->msg, PJ_STUN_ATTR_USERNAME, pj_cstr(&tmp, username));
+ if (nonce)
+ pj_stun_msg_add_string_attr(tdata->pool, tdata->msg, PJ_STUN_ATTR_NONCE, pj_cstr(&tmp, nonce));
+ if (password) {
+ // ignored
+ }
+ if (dummy_mi) {
+ pj_stun_msgint_attr *mi;
+
+ pj_stun_msgint_attr_create(tdata->pool, &mi);
+ pj_stun_msg_add_attr(tdata->msg, &mi->hdr);
+ }
+
+ }
+
+ /* Send the request */
+ status = pj_stun_session_send_msg(client->sess, NULL, PJ_FALSE, PJ_TRUE, &server->addr,
+ pj_sockaddr_get_len(&server->addr), tdata);
+ if (status != PJ_SUCCESS) {
+ destroy_client_server();
+ return -270;
+ }
+
+ /* Wait until test complete */
+ pj_sem_wait(client->test_complete);
+
+
+ /* Verify response */
+ if (expected_error) {
+ if (expected_code != client->response_status) {
+ char e1[PJ_ERR_MSG_SIZE], e2[PJ_ERR_MSG_SIZE];
+
+ pj_strerror(expected_code, e1, sizeof(e1));
+ pj_strerror(client->response_status, e2, sizeof(e2));
+
+ PJ_LOG(3,(THIS_FILE, " err: expecting %d (%s) but got %d (%s) response",
+ expected_code, e1, client->response_status, e2));
+ rc = -500;
+ }
+
+ } else {
+ int res_code = 0;
+ pj_stun_realm_attr *arealm;
+ pj_stun_nonce_attr *anonce;
+
+ if (client->response_status != 0) {
+ PJ_LOG(3,(THIS_FILE, " err: expecting successful operation but got error %d",
+ client->response_status));
+ rc = -600;
+ goto done;
+ }
+
+ if (PJ_STUN_IS_ERROR_RESPONSE(client->response->hdr.type)) {
+ pj_stun_errcode_attr *aerr = NULL;
+
+ aerr = (pj_stun_errcode_attr*)
+ pj_stun_msg_find_attr(client->response,
+ PJ_STUN_ATTR_ERROR_CODE, 0);
+ if (aerr == NULL) {
+ PJ_LOG(3,(THIS_FILE, " err: received error response without ERROR-CODE"));
+ rc = -610;
+ goto done;
+ }
+
+ res_code = aerr->err_code;
+ } else {
+ res_code = 0;
+ }
+
+ /* Check that code matches */
+ if (expected_code != res_code) {
+ PJ_LOG(3,(THIS_FILE, " err: expecting response code %d but got %d",
+ expected_code, res_code));
+ rc = -620;
+ goto done;
+ }
+
+ /* Find REALM and NONCE attributes */
+ arealm = (pj_stun_realm_attr*)
+ pj_stun_msg_find_attr(client->response, PJ_STUN_ATTR_REALM, 0);
+ anonce = (pj_stun_nonce_attr*)
+ pj_stun_msg_find_attr(client->response, PJ_STUN_ATTR_NONCE, 0);
+
+ if (expected_realm) {
+ if (arealm == NULL) {
+ PJ_LOG(3,(THIS_FILE, " err: expecting REALM in esponse"));
+ rc = -630;
+ goto done;
+ }
+ if (pj_strcmp2(&arealm->value, expected_realm)!=0) {
+ PJ_LOG(3,(THIS_FILE, " err: REALM mismatch in response"));
+ rc = -640;
+ goto done;
+ }
+ } else {
+ if (arealm != NULL) {
+ PJ_LOG(3,(THIS_FILE, " err: non expecting REALM in response"));
+ rc = -650;
+ goto done;
+ }
+ }
+
+ if (expected_nonce) {
+ if (anonce == NULL) {
+ PJ_LOG(3,(THIS_FILE, " err: expecting NONCE in esponse"));
+ rc = -660;
+ goto done;
+ }
+ if (pj_strcmp2(&anonce->value, expected_nonce)!=0) {
+ PJ_LOG(3,(THIS_FILE, " err: NONCE mismatch in response"));
+ rc = -670;
+ goto done;
+ }
+ } else {
+ if (anonce != NULL) {
+ PJ_LOG(3,(THIS_FILE, " err: non expecting NONCE in response"));
+ rc = -680;
+ goto done;
+ }
+ }
+ }
+
+ /* Our tests are okay so far. Let caller do some more tests if
+ * it wants to.
+ */
+ if (rc==0 && more_check) {
+ rc = (*more_check)();
+ }
+
+
+done:
+ destroy_client_server();
+ return rc;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////////////////
+//
+// More verification
+//
+
+/* Retransmission test */
+static int retransmit_check(void)
+{
+
+ if (server->recv_count != PJ_STUN_MAX_TRANSMIT_COUNT) {
+ PJ_LOG(3,("", " expecting %d retransmissions, got %d",
+ PJ_STUN_MAX_TRANSMIT_COUNT, server->recv_count));
+ return -700;
+ }
+ if (client->recv_count != 0)
+ return -710;
+
+ return 0;
+}
+
+static int long_term_check1(void)
+{
+ /* SHOULD NOT contain USERNAME or MESSAGE-INTEGRITY */
+ if (pj_stun_msg_find_attr(client->response, PJ_STUN_ATTR_USERNAME, 0))
+ return -800;
+ if (pj_stun_msg_find_attr(client->response, PJ_STUN_ATTR_MESSAGE_INTEGRITY, 0))
+ return -800;
+
+ return 0;
+}
+
+static int long_term_check2(void)
+{
+ /* response SHOULD NOT include a USERNAME, NONCE, REALM or
+ * MESSAGE-INTEGRITY attribute.
+ */
+ if (pj_stun_msg_find_attr(client->response, PJ_STUN_ATTR_USERNAME, 0))
+ return -900;
+ if (pj_stun_msg_find_attr(client->response, PJ_STUN_ATTR_NONCE, 0))
+ return -910;
+ if (pj_stun_msg_find_attr(client->response, PJ_STUN_ATTR_REALM, 0))
+ return -920;
+ if (pj_stun_msg_find_attr(client->response, PJ_STUN_ATTR_MESSAGE_INTEGRITY, 0))
+ return -930;
+
+ return 0;
+}
+
+static int long_term_check3(void)
+{
+ /* response SHOULD NOT include a USERNAME, NONCE, and REALM */
+ if (pj_stun_msg_find_attr(client->response, PJ_STUN_ATTR_USERNAME, 0))
+ return -1000;
+ if (pj_stun_msg_find_attr(client->response, PJ_STUN_ATTR_NONCE, 0))
+ return -1010;
+ if (pj_stun_msg_find_attr(client->response, PJ_STUN_ATTR_REALM, 0))
+ return -1020;
+
+ return 0;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+//
+// TEST MAIN
+//
+
+
+int sess_auth_test(void)
+{
+ pj_pool_t *pool;
+ int rc;
+
+ PJ_LOG(3,(THIS_FILE, " STUN session authentication test"));
+
+ /* Init STUN config */
+ pj_stun_config_init(&stun_cfg, mem, 0, NULL, NULL);
+
+ /* Create pool and timer heap */
+ pool = pj_pool_create(mem, "authtest", 200, 200, NULL);
+ if (pj_timer_heap_create(pool, 20, &stun_cfg.timer_heap)) {
+ pj_pool_release(pool);
+ return -5;
+ }
+
+ /* Basic retransmission test */
+ rc = run_client_test("Retransmission", // title
+ PJ_FALSE, // server responding
+ PJ_STUN_AUTH_NONE, // server auth
+ PJ_STUN_AUTH_NONE, // client auth
+ NULL, // realm
+ NULL, // username
+ NULL, // nonce
+ NULL, // password
+ PJ_FALSE, // dummy MI
+ PJ_TRUE, // expected error
+ PJNATH_ESTUNTIMEDOUT,// expected code
+ NULL, // expected realm
+ NULL, // expected nonce
+ &retransmit_check // more check
+ );
+ if (rc != 0) {
+ goto done;
+ }
+
+ /*
+ * Short term credential.
+ * draft-ietf-behave-rfc3489bis-15#section-10.1.2
+ */
+
+ /*
+ * If the message does not contain both a MESSAGE-INTEGRITY and a
+ * USERNAME attribute, If the message is a request, the server MUST
+ * reject the request with an error response. This response MUST
+ * use an error code of 400 (Bad Request).
+ */
+ rc = run_client_test("Missing MESSAGE-INTEGRITY (short term)", // title
+ PJ_TRUE, // server responding
+ PJ_STUN_AUTH_SHORT_TERM, // server auth
+ PJ_STUN_AUTH_NONE, // client auth
+ NULL, // realm
+ NULL, // username
+ NULL, // nonce
+ NULL, // password
+ PJ_FALSE, // dummy MI
+ PJ_TRUE, // expected error
+ PJ_STATUS_FROM_STUN_CODE(400),// expected code
+ NULL, // expected realm
+ NULL, // expected nonce
+ NULL // more check
+ );
+ if (rc != 0) {
+ goto done;
+ }
+
+ /* If the USERNAME does not contain a username value currently valid
+ * within the server: If the message is a request, the server MUST
+ * reject the request with an error response. This response MUST use
+ * an error code of 401 (Unauthorized).
+ */
+ rc = run_client_test("USERNAME mismatch (short term)", // title
+ PJ_TRUE, // server responding
+ PJ_STUN_AUTH_SHORT_TERM, // server auth
+ PJ_STUN_AUTH_SHORT_TERM, // client auth
+ NULL, // realm
+ "anotheruser", // username
+ NULL, // nonce
+ "anotherpass", // password
+ PJ_FALSE, // dummy MI
+ PJ_TRUE, // expected error
+ PJ_STATUS_FROM_STUN_CODE(401),// expected code
+ NULL, // expected realm
+ NULL, // expected nonce
+ NULL // more check
+ );
+ if (rc != 0) {
+ goto done;
+ }
+
+ /* Using the password associated with the username, compute the value
+ * for the message-integrity as described in Section 15.4. If the
+ * resulting value does not match the contents of the MESSAGE-
+ * INTEGRITY attribute:
+ *
+ * - If the message is a request, the server MUST reject the request
+ * with an error response. This response MUST use an error code
+ * of 401 (Unauthorized).
+ */
+ rc = run_client_test("MESSAGE-INTEGRITY mismatch (short term)", // title
+ PJ_TRUE, // server responding
+ PJ_STUN_AUTH_SHORT_TERM, // server auth
+ PJ_STUN_AUTH_SHORT_TERM, // client auth
+ NULL, // realm
+ USERNAME, // username
+ NULL, // nonce
+ "anotherpass", // password
+ PJ_FALSE, // dummy MI
+ PJ_TRUE, // expected error
+ PJ_STATUS_FROM_STUN_CODE(401),// expected code
+ NULL, // expected realm
+ NULL, // expected nonce
+ NULL // more check
+ );
+ if (rc != 0) {
+ goto done;
+ }
+
+ /* USERNAME is not present, server must respond with 400 (Bad
+ * Request).
+ */
+ rc = run_client_test("Missing USERNAME (short term)",// title
+ PJ_TRUE, // server responding
+ PJ_STUN_AUTH_SHORT_TERM, // server auth
+ PJ_STUN_AUTH_NONE, // client auth
+ NULL, // realm
+ NULL, // username
+ NULL, // nonce
+ NULL, // password
+ PJ_TRUE, // dummy MI
+ PJ_TRUE, // expected error
+ PJ_STATUS_FROM_STUN_CODE(400), // expected code
+ NULL, // expected realm
+ NULL, // expected nonce
+ NULL // more check
+ );
+ if (rc != 0) {
+ goto done;
+ }
+
+ /* Successful short term authentication */
+ rc = run_client_test("Successful scenario (short term)", // title
+ PJ_TRUE, // server responding
+ PJ_STUN_AUTH_SHORT_TERM, // server auth
+ PJ_STUN_AUTH_SHORT_TERM, // client auth
+ NULL, // realm
+ USERNAME, // username
+ NULL, // nonce
+ PASSWORD, // password
+ PJ_FALSE, // dummy MI
+ PJ_FALSE, // expected error
+ PJ_SUCCESS, // expected code
+ NULL, // expected realm
+ NULL, // expected nonce
+ NULL // more check
+ );
+ if (rc != 0) {
+ goto done;
+ }
+
+ /*
+ * (our own) Extended tests for long term credential
+ */
+
+ /* When server wants to use short term credential, but request has
+ * REALM, reject with .... 401 ???
+ */
+ rc = run_client_test("Unwanted REALM (short term)", // title
+ PJ_TRUE, // server responding
+ PJ_STUN_AUTH_SHORT_TERM, // server auth
+ PJ_STUN_AUTH_NONE, // client auth
+ REALM, // realm
+ USERNAME, // username
+ NULL, // nonce
+ PASSWORD, // password
+ PJ_TRUE, // dummy MI
+ PJ_TRUE, // expected error
+ PJ_STATUS_FROM_STUN_CODE(401), // expected code
+ NULL, // expected realm
+ NULL, // expected nonce
+ &long_term_check2 // more check
+ );
+ if (rc != 0) {
+ goto done;
+ }
+
+
+ /*
+ * Long term credential.
+ * draft-ietf-behave-rfc3489bis-15#section-10.2.2
+ */
+
+ /* If the message does not contain a MESSAGE-INTEGRITY attribute, the
+ * server MUST generate an error response with an error code of 401
+ * (Unauthorized). This response MUST include a REALM value. It is
+ * RECOMMENDED that the REALM value be the domain name of the
+ * provider of the STUN server. The response MUST include a NONCE,
+ * selected by the server. The response SHOULD NOT contain a
+ * USERNAME or MESSAGE-INTEGRITY attribute.
+ */
+ rc = run_client_test("Missing M-I (long term)", // title
+ PJ_TRUE, // server responding
+ PJ_STUN_AUTH_LONG_TERM, // server auth
+ PJ_STUN_AUTH_NONE, // client auth
+ NULL, // client realm
+ NULL, // client username
+ NULL, // client nonce
+ NULL, // client password
+ PJ_FALSE, // client dummy MI
+ PJ_TRUE, // expected error
+ PJ_STATUS_FROM_STUN_CODE(401), // expected code
+ REALM, // expected realm
+ NONCE, // expected nonce
+ &long_term_check1 // more check
+ );
+ if (rc != 0) {
+ goto done;
+ }
+
+ /* If the message contains a MESSAGE-INTEGRITY attribute, but is
+ * missing the USERNAME, REALM or NONCE attributes, the server MUST
+ * generate an error response with an error code of 400 (Bad
+ * Request). This response SHOULD NOT include a USERNAME, NONCE,
+ * REALM or MESSAGE-INTEGRITY attribute.
+ */
+ /* Missing USERNAME */
+ rc = run_client_test("Missing USERNAME (long term)", // title
+ PJ_TRUE, // server responding
+ PJ_STUN_AUTH_LONG_TERM, // server auth
+ PJ_STUN_AUTH_NONE, // client auth
+ REALM, // client realm
+ NULL, // client username
+ NONCE, // client nonce
+ PASSWORD, // client password
+ PJ_TRUE, // client dummy MI
+ PJ_TRUE, // expected error
+ PJ_STATUS_FROM_STUN_CODE(400), // expected code
+ NULL, // expected realm
+ NULL, // expected nonce
+ &long_term_check2 // more check
+ );
+ if (rc != 0) {
+ goto done;
+ }
+
+ /* Missing REALM */
+ rc = run_client_test("Missing REALM (long term)", // title
+ PJ_TRUE, // server responding
+ PJ_STUN_AUTH_LONG_TERM, // server auth
+ PJ_STUN_AUTH_NONE, // client auth
+ NULL, // client realm
+ USERNAME, // client username
+ NONCE, // client nonce
+ PASSWORD, // client password
+ PJ_TRUE, // client dummy MI
+ PJ_TRUE, // expected error
+ PJ_STATUS_FROM_STUN_CODE(400), // expected code
+ NULL, // expected realm
+ NULL, // expected nonce
+ &long_term_check2 // more check
+ );
+ if (rc != 0) {
+ goto done;
+ }
+
+ /* Missing NONCE */
+ rc = run_client_test("Missing NONCE (long term)", // title
+ PJ_TRUE, // server responding
+ PJ_STUN_AUTH_LONG_TERM, // server auth
+ PJ_STUN_AUTH_NONE, // client auth
+ REALM, // client realm
+ USERNAME, // client username
+ NULL, // client nonce
+ PASSWORD, // client password
+ PJ_TRUE, // client dummy MI
+ PJ_TRUE, // expected error
+ PJ_STATUS_FROM_STUN_CODE(400), // expected code
+ NULL, // expected realm
+ NULL, // expected nonce
+ &long_term_check2 // more check
+ );
+ if (rc != 0) {
+ goto done;
+ }
+
+ /* If the NONCE is no longer valid, the server MUST generate an error
+ * response with an error code of 438 (Stale Nonce). This response
+ * MUST include a NONCE and REALM attribute and SHOULD NOT incude the
+ * USERNAME or MESSAGE-INTEGRITY attribute. Servers can invalidate
+ * nonces in order to provide additional security. See Section 4.3
+ * of [RFC2617] for guidelines.
+ */
+ // how??
+
+ /* If the username in the USERNAME attribute is not valid, the server
+ * MUST generate an error response with an error code of 401
+ * (Unauthorized). This response MUST include a REALM value. It is
+ * RECOMMENDED that the REALM value be the domain name of the
+ * provider of the STUN server. The response MUST include a NONCE,
+ * selected by the server. The response SHOULD NOT contain a
+ * USERNAME or MESSAGE-INTEGRITY attribute.
+ */
+ rc = run_client_test("Invalid username (long term)", // title
+ PJ_TRUE, // server responding
+ PJ_STUN_AUTH_LONG_TERM, // server auth
+ PJ_STUN_AUTH_LONG_TERM, // client auth
+ REALM, // client realm
+ "anotheruser", // client username
+ "a nonce", // client nonce
+ "somepassword", // client password
+ PJ_FALSE, // client dummy MI
+ PJ_TRUE, // expected error
+ PJ_STATUS_FROM_STUN_CODE(401), // expected code
+ REALM, // expected realm
+ NONCE, // expected nonce
+ &long_term_check1 // more check
+ );
+ if (rc != 0) {
+ goto done;
+ }
+
+ /* Successful long term authentication */
+ rc = run_client_test("Successful scenario (long term)", // title
+ PJ_TRUE, // server responding
+ PJ_STUN_AUTH_LONG_TERM, // server auth
+ PJ_STUN_AUTH_LONG_TERM, // client auth
+ REALM, // client realm
+ USERNAME, // client username
+ "anothernonce", // client nonce
+ PASSWORD, // client password
+ PJ_FALSE, // client dummy MI
+ PJ_FALSE, // expected error
+ 0, // expected code
+ NULL, // expected realm
+ NULL, // expected nonce
+ &long_term_check3 // more check
+ );
+ if (rc != 0) {
+ goto done;
+ }
+
+ /*
+ * (our own) Extended tests for long term credential
+ */
+
+ /* If REALM doesn't match, server must respond with 401
+ */
+#if 0
+ // STUN session now will just use the realm sent in the
+ // response, so this test will fail because it will
+ // authenticate successfully.
+
+ rc = run_client_test("Invalid REALM (long term)", // title
+ PJ_TRUE, // server responding
+ PJ_STUN_AUTH_LONG_TERM, // server auth
+ PJ_STUN_AUTH_LONG_TERM, // client auth
+ "anotherrealm", // client realm
+ USERNAME, // client username
+ NONCE, // client nonce
+ PASSWORD, // client password
+ PJ_FALSE, // client dummy MI
+ PJ_TRUE, // expected error
+ PJ_STATUS_FROM_STUN_CODE(401), // expected code
+ REALM, // expected realm
+ NONCE, // expected nonce
+ &long_term_check1 // more check
+ );
+ if (rc != 0) {
+ goto done;
+ }
+#endif
+
+ /* Invalid HMAC */
+
+ /* Valid static short term, without NONCE */
+
+ /* Valid static short term, WITH NONCE */
+
+ /* Valid static long term (with NONCE */
+
+ /* Valid dynamic short term (without NONCE) */
+
+ /* Valid dynamic short term (with NONCE) */
+
+ /* Valid dynamic long term (with NONCE) */
+
+
+done:
+ pj_timer_heap_destroy(stun_cfg.timer_heap);
+ pj_pool_release(pool);
+ return rc;
+}
diff --git a/pjnath/src/pjnath-test/stun.c b/pjnath/src/pjnath-test/stun.c
new file mode 100644
index 0000000..092f90e
--- /dev/null
+++ b/pjnath/src/pjnath-test/stun.c
@@ -0,0 +1,983 @@
+/* $Id: stun.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "test.h"
+
+#define THIS_FILE "stun.c"
+
+static pj_stun_msg* create1(pj_pool_t*);
+static int verify1(pj_stun_msg*);
+static int verify2(pj_stun_msg*);
+static int verify5(pj_stun_msg*);
+
+static struct test
+{
+ const char *title;
+ char *pdu;
+ unsigned pdu_len;
+ pj_stun_msg* (*create)(pj_pool_t*);
+ pj_status_t expected_status;
+ int (*verify)(pj_stun_msg*);
+} tests[] =
+{
+ {
+ "Invalid message type",
+ "\x11\x01\x00\x00\x21\x12\xa4\x42"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
+ 20,
+ NULL,
+ PJNATH_EINSTUNMSGTYPE,
+ NULL
+ },
+ {
+ "Short message (1) (partial header)",
+ "\x00\x01",
+ 2,
+ NULL,
+ PJNATH_EINSTUNMSGLEN,
+ NULL
+ },
+ {
+ "Short message (2) (partial header)",
+ "\x00\x01\x00\x00\x21\x12\xa4\x42"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ 16,
+ NULL,
+ PJNATH_EINSTUNMSGLEN,
+ NULL
+ },
+ {
+ "Short message (3), (missing attribute)",
+ "\x00\x01\x00\x08\x21\x12\xa4\x42"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
+ 20,
+ NULL,
+ PJNATH_EINSTUNMSGLEN,
+ NULL
+ },
+ {
+ "Short message (4), (partial attribute header)",
+ "\x00\x01\x00\x08\x21\x12\xa4\x42"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x80\x28",
+ 22,
+ NULL,
+ PJNATH_EINSTUNMSGLEN,
+ NULL
+ },
+ {
+ "Short message (5), (partial attribute header)",
+ "\x00\x01\x00\x08\x21\x12\xa4\x42"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x80\x28\x00",
+ 23,
+ NULL,
+ PJNATH_EINSTUNMSGLEN,
+ NULL
+ },
+ {
+ "Short message (6), (partial attribute header)",
+ "\x00\x01\x00\x08\x21\x12\xa4\x42"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x80\x28\x00\x04",
+ 24,
+ NULL,
+ PJNATH_EINSTUNMSGLEN,
+ NULL
+ },
+ {
+ "Short message (7), (partial attribute body)",
+ "\x00\x01\x00\x08\x21\x12\xa4\x42"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x80\x28\x00\x04\x00\x00\x00",
+ 27,
+ NULL,
+ PJNATH_EINSTUNMSGLEN,
+ NULL
+ },
+ {
+ "Message length in header is too long",
+ "\x00\x01\xff\xff\x21\x12\xa4\x42"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x80\x28\x00\x04\x00\x00\x00",
+ 27,
+ NULL,
+ PJNATH_EINSTUNMSGLEN,
+ NULL
+ },
+ {
+ "Message length in header is shorter",
+ "\x00\x01\x00\x04\x21\x12\xa4\x42"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x80\x28\x00\x04\x00\x00\x00\x00",
+ 28,
+ NULL,
+ PJNATH_EINSTUNMSGLEN,
+ NULL
+ },
+ {
+ "Invalid magic",
+ "\x00\x01\x00\x08\x00\x12\xa4\x42"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x80\x28\x00\x04\x00\x00\x00\x00",
+ 28,
+ NULL,
+ PJ_SUCCESS,
+ NULL
+ },
+ {
+ "Character beyond message",
+ "\x00\x01\x00\x08\x21\x12\xa4\x42"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x80\x28\x00\x04\x00\x00\x00\x00\x0a",
+ 29,
+ NULL,
+ PJNATH_EINSTUNMSGLEN,
+ NULL
+ },
+ {
+ "Respond unknown mandatory attribute with 420 and "
+ "UNKNOWN-ATTRIBUTES attribute",
+ NULL,
+ 0,
+ &create1,
+ 0,
+ &verify1
+ },
+ {
+ "Unknown but non-mandatory should be okay",
+ "\x00\x01\x00\x08\x21\x12\xa4\x42"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x80\xff\x00\x03\x00\x00\x00\x00",
+ 28,
+ NULL,
+ PJ_SUCCESS,
+ &verify2
+ },
+ {
+ "String attr length larger than message",
+ "\x00\x01\x00\x08\x00\x12\xa4\x42"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x06\x00\xff\x00\x00\x00\x00",
+ 28,
+ NULL,
+ PJNATH_ESTUNINATTRLEN,
+ NULL
+ },
+ {
+ "Attribute other than FINGERPRINT after MESSAGE-INTEGRITY is allowed",
+ "\x00\x01\x00\x20\x21\x12\xa4\x42"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x08\x00\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" // M-I
+ "\x80\x24\x00\x04\x00\x00\x00\x00", // REFRESH-INTERVAL
+ 52,
+ NULL,
+ PJ_SUCCESS,
+ NULL
+ },
+ {
+ "Attribute between MESSAGE-INTEGRITY and FINGERPRINT is allowed",
+ "\x00\x01\x00\x28\x21\x12\xa4\x42"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x08\x00\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" // M-I
+ "\x80\x24\x00\x04\x00\x00\x00\x00" // REFRESH-INTERVAL
+ "\x80\x28\x00\x04\xc7\xde\xdd\x65", // FINGERPRINT
+ 60,
+ NULL,
+ PJ_SUCCESS,
+ &verify5
+ },
+ {
+ "Attribute past FINGERPRINT is not allowed",
+ "\x00\x01\x00\x10\x21\x12\xa4\x42"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x80\x28\x00\x04\x00\x00\x00\x00"
+ "\x80\x24\x00\x04\x00\x00\x00\x00",
+ 36,
+ NULL,
+ PJNATH_ESTUNFINGERPOS,
+ NULL
+ }
+};
+
+static const char *err(pj_status_t status)
+{
+ static char errmsg[PJ_ERR_MSG_SIZE];
+ pj_strerror(status, errmsg, sizeof(errmsg));
+ return errmsg;
+}
+
+static const pj_str_t USERNAME = {"user", 4};
+static const pj_str_t PASSWORD = {"password", 8};
+
+static int decode_test(void)
+{
+ unsigned i;
+ pj_pool_t *pool;
+ int rc = 0;
+
+ pool = pj_pool_create(mem, "decode_test", 1024, 1024, NULL);
+
+ PJ_LOG(3,(THIS_FILE, " STUN decode test"));
+
+ for (i=0; i<PJ_ARRAY_SIZE(tests); ++i) {
+ struct test *t = &tests[i];
+ pj_stun_msg *msg, *msg2;
+ pj_uint8_t buf[1500];
+ pj_str_t key;
+ pj_size_t len;
+ pj_status_t status;
+
+ PJ_LOG(3,(THIS_FILE, " %s", t->title));
+
+ if (t->pdu) {
+ status = pj_stun_msg_decode(pool, (pj_uint8_t*)t->pdu, t->pdu_len,
+ PJ_STUN_IS_DATAGRAM | PJ_STUN_CHECK_PACKET,
+ &msg, NULL, NULL);
+
+ /* Check expected decode result */
+ if (t->expected_status != status) {
+ PJ_LOG(1,(THIS_FILE, " expecting status %d, got %d",
+ t->expected_status, status));
+ rc = -10;
+ goto on_return;
+ }
+
+ } else {
+ msg = t->create(pool);
+ status = PJ_SUCCESS;
+ }
+
+ if (status != PJ_SUCCESS)
+ continue;
+
+ /* Try to encode message */
+ pj_stun_create_key(pool, &key, NULL, &USERNAME, PJ_STUN_PASSWD_PLAIN, &PASSWORD);
+ status = pj_stun_msg_encode(msg, buf, sizeof(buf), 0, &key, &len);
+ if (status != PJ_SUCCESS) {
+ PJ_LOG(1,(THIS_FILE, " encode error: %s", err(status)));
+ rc = -40;
+ goto on_return;
+ }
+
+ /* Try to decode it once more */
+ status = pj_stun_msg_decode(pool, buf, len,
+ PJ_STUN_IS_DATAGRAM | PJ_STUN_CHECK_PACKET,
+ &msg2, NULL, NULL);
+ if (status != PJ_SUCCESS) {
+ PJ_LOG(1,(THIS_FILE, " subsequent decoding failed: %s", err(status)));
+ rc = -50;
+ goto on_return;
+ }
+
+ /* Verify */
+ if (t->verify) {
+ rc = t->verify(msg);
+ if (rc != 0) {
+ goto on_return;
+ }
+ }
+ }
+
+on_return:
+ pj_pool_release(pool);
+ if (rc == 0)
+ PJ_LOG(3,(THIS_FILE, "...success!"));
+ return rc;
+}
+
+/* Create 420 response */
+static pj_stun_msg* create1(pj_pool_t *pool)
+{
+ char *pdu = "\x00\x01\x00\x08\x21\x12\xa4\x42"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\xff\x00\x04\x00\x00\x00\x00";
+ unsigned pdu_len = 28;
+ pj_stun_msg *msg, *res;
+ pj_status_t status;
+
+ status = pj_stun_msg_decode(pool, (pj_uint8_t*)pdu, pdu_len,
+ PJ_STUN_IS_DATAGRAM | PJ_STUN_CHECK_PACKET,
+ &msg, NULL, &res);
+ pj_assert(status != PJ_SUCCESS);
+ pj_assert(res != NULL);
+
+ return res;
+}
+
+/* Error response MUST have ERROR-CODE attribute */
+/* 420 response MUST contain UNKNOWN-ATTRIBUTES */
+static int verify1(pj_stun_msg *msg)
+{
+ pj_stun_errcode_attr *aerr;
+ pj_stun_unknown_attr *aunk;
+
+ if (!PJ_STUN_IS_ERROR_RESPONSE(msg->hdr.type)) {
+ PJ_LOG(1,(THIS_FILE, " expecting error message"));
+ return -100;
+ }
+
+ aerr = (pj_stun_errcode_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_ERROR_CODE, 0);
+ if (aerr == NULL) {
+ PJ_LOG(1,(THIS_FILE, " missing ERROR-CODE attribute"));
+ return -110;
+ }
+
+ if (aerr->err_code != 420) {
+ PJ_LOG(1,(THIS_FILE, " expecting 420 error"));
+ return -120;
+ }
+
+ aunk = (pj_stun_unknown_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_UNKNOWN_ATTRIBUTES, 0);
+ if (aunk == NULL) {
+ PJ_LOG(1,(THIS_FILE, " missing UNKNOWN-ATTRIBUTE attribute"));
+ return -130;
+ }
+
+ if (aunk->attr_count != 1) {
+ PJ_LOG(1,(THIS_FILE, " expecting one unknown attribute"));
+ return -140;
+ }
+
+ if (aunk->attrs[0] != 0xff) {
+ PJ_LOG(1,(THIS_FILE, " expecting 0xff as unknown attribute"));
+ return -150;
+ }
+
+ return 0;
+}
+
+/* Attribute count should be zero since unknown attribute is not parsed */
+static int verify2(pj_stun_msg *msg)
+{
+ pj_stun_binary_attr *bin_attr;
+
+ if (msg->attr_count != 1) {
+ PJ_LOG(1,(THIS_FILE, " expecting one attribute count"));
+ return -200;
+ }
+
+ bin_attr = (pj_stun_binary_attr*)msg->attr[0];
+ if (bin_attr->hdr.type != 0x80ff) {
+ PJ_LOG(1,(THIS_FILE, " expecting attribute type 0x80ff"));
+ return -210;
+ }
+ if (bin_attr->hdr.length != 3) {
+ PJ_LOG(1,(THIS_FILE, " expecting attribute length = 4"));
+ return -220;
+ }
+ if (bin_attr->magic != PJ_STUN_MAGIC) {
+ PJ_LOG(1,(THIS_FILE, " expecting PJ_STUN_MAGIC for unknown attr"));
+ return -230;
+ }
+ if (bin_attr->length != 3) {
+ PJ_LOG(1,(THIS_FILE, " expecting data length 4"));
+ return -240;
+ }
+
+ return 0;
+}
+
+
+/* Attribute between MESSAGE-INTEGRITY and FINGERPRINT is allowed */
+static int verify5(pj_stun_msg *msg)
+{
+ if (msg->attr_count != 3) {
+ PJ_LOG(1,(THIS_FILE, " expecting 3 attribute count"));
+ return -500;
+ }
+
+ if (msg->attr[0]->type != PJ_STUN_ATTR_MESSAGE_INTEGRITY) {
+ PJ_LOG(1,(THIS_FILE, " expecting MESSAGE-INTEGRITY"));
+ return -510;
+ }
+ if (msg->attr[1]->type != PJ_STUN_ATTR_REFRESH_INTERVAL) {
+ PJ_LOG(1,(THIS_FILE, " expecting REFRESH-INTERVAL"));
+ return -520;
+ }
+ if (msg->attr[2]->type != PJ_STUN_ATTR_FINGERPRINT) {
+ PJ_LOG(1,(THIS_FILE, " expecting FINGERPRINT"));
+ return -530;
+ }
+
+ return 0;
+}
+
+
+static int decode_verify(void)
+{
+ /* Decode all attribute types */
+ return 0;
+}
+
+/*
+ * Test vectors, from:
+ * http://tools.ietf.org/html/draft-denis-behave-rfc3489bis-test-vectors-02
+ */
+typedef struct test_vector test_vector;
+
+static pj_stun_msg* create_msgint1(pj_pool_t *pool, test_vector *v);
+static pj_stun_msg* create_msgint2(pj_pool_t *pool, test_vector *v);
+static pj_stun_msg* create_msgint3(pj_pool_t *pool, test_vector *v);
+
+enum
+{
+ USE_MESSAGE_INTEGRITY = 1,
+ USE_FINGERPRINT = 2
+};
+
+static struct test_vector
+{
+ unsigned msg_type;
+ char *tsx_id;
+ char *pdu;
+ unsigned pdu_len;
+ unsigned options;
+ char *username;
+ char *password;
+ char *realm;
+ char *nonce;
+ pj_stun_msg* (*create)(pj_pool_t*, test_vector*);
+} test_vectors[] =
+{
+ {
+ PJ_STUN_BINDING_REQUEST,
+ "\xb7\xe7\xa7\x01\xbc\x34\xd6\x86\xfa\x87\xdf\xae",
+ "\x00\x01\x00\x44\x21\x12\xa4\x42\xb7\xe7"
+ "\xa7\x01\xbc\x34\xd6\x86\xfa\x87\xdf\xae"
+ "\x00\x24\x00\x04\x6e\x00\x01\xff\x80\x29"
+ "\x00\x08\x93\x2f\xf9\xb1\x51\x26\x3b\x36"
+ "\x00\x06\x00\x09\x65\x76\x74\x6a\x3a\x68"
+ "\x36\x76\x59\x20\x20\x20\x00\x08\x00\x14"
+ "\x62\x4e\xeb\xdc\x3c\xc9\x2d\xd8\x4b\x74"
+ "\xbf\x85\xd1\xc0\xf5\xde\x36\x87\xbd\x33"
+ "\x80\x28\x00\x04\xad\x8a\x85\xff",
+ 88,
+ USE_MESSAGE_INTEGRITY | USE_FINGERPRINT,
+ "evtj:h6vY",
+ "VOkJxbRl1RmTxUk/WvJxBt",
+ "",
+ "",
+ &create_msgint1
+ }
+ /* disabled: see http://trac.pjsip.org/repos/ticket/960
+ ,
+ {
+ PJ_STUN_BINDING_RESPONSE,
+ "\xb7\xe7\xa7\x01\xbc\x34\xd6\x86\xfa\x87\xdf\xae",
+ "\x01\x01\x00\x3c"
+ "\x21\x12\xa4\x42"
+ "\xb7\xe7\xa7\x01\xbc\x34\xd6\x86\xfa\x87\xdf\xae"
+ "\x80\x22\x00\x0b"
+ "\x74\x65\x73\x74\x20\x76\x65\x63\x74\x6f\x72\x20"
+ "\x00\x20\x00\x08"
+ "\x00\x01\xa1\x47\xe1\x12\xa6\x43"
+ "\x00\x08\x00\x14"
+ "\x2b\x91\xf5\x99\xfd\x9e\x90\xc3\x8c\x74\x89\xf9"
+ "\x2a\xf9\xba\x53\xf0\x6b\xe7\xd7"
+ "\x80\x28\x00\x04"
+ "\xc0\x7d\x4c\x96",
+ 80,
+ USE_MESSAGE_INTEGRITY | USE_FINGERPRINT,
+ "evtj:h6vY",
+ "VOkJxbRl1RmTxUk/WvJxBt",
+ "",
+ "",
+ &create_msgint2
+ }
+ */
+
+ /* disabled: see http://trac.pjsip.org/repos/ticket/960
+#if defined(PJ_HAS_IPV6) && PJ_HAS_IPV6!=0
+ ,
+ {
+ PJ_STUN_BINDING_RESPONSE,
+ "\xb7\xe7\xa7\x01\xbc\x34\xd6\x86\xfa\x87\xdf\xae",
+ "\x01\x01\x00\x48" // Response type and message length
+ "\x21\x12\xa4\x42" // Message cookie
+ "\xb7\xe7\xa7\x01" // }
+ "\xbc\x34\xd6\x86" // } Transaction ID
+ "\xfa\x87\xdf\xae" // }
+
+ "\x80\x22\x00\x0b" // SOFTWARE, length=11
+ "\x74\x65\x73\x74"
+ "\x20\x76\x65\x63"
+ "\x74\x6f\x72\x20"
+ "\x00\x20\x00\x14" // XOR-MAPPED-ADDRESS
+ "\x00\x02\xa1\x47"
+ "\x01\x13\xa9\xfa"
+ "\xa5\xd3\xf1\x79"
+ "\xbc\x25\xf4\xb5"
+ "\xbe\xd2\xb9\xd9"
+ "\x00\x08\x00\x14" // MESSAGE-INTEGRITY attribute header
+ "\xa3\x82\x95\x4e" // }
+ "\x4b\xe6\x7b\xf1" // }
+ "\x17\x84\xc9\x7c" // } HMAC-SHA1 fingerprint
+ "\x82\x92\xc2\x75" // }
+ "\xbf\xe3\xed\x41" // }
+ "\x80\x28\x00\x04" // FINGERPRINT attribute header
+ "\xc8\xfb\x0b\x4c" // CRC32 fingerprint
+ ,
+ 92,
+ USE_MESSAGE_INTEGRITY | USE_FINGERPRINT,
+ "evtj:h6vY",
+ "VOkJxbRl1RmTxUk/WvJxBt",
+ "",
+ "",
+ &create_msgint3
+ }
+#endif
+ */
+};
+
+
+static char* print_binary(const pj_uint8_t *data, unsigned data_len)
+{
+ static char buf[1500];
+ unsigned length = sizeof(buf);
+ char *p = buf;
+ unsigned i;
+
+ for (i=0; i<data_len;) {
+ unsigned j;
+
+ pj_ansi_snprintf(p, 1500-(p-buf),
+ "%04d-%04d ",
+ i, (i+20 < data_len) ? i+20 : data_len);
+ p += 12;
+
+ for (j=0; j<20 && i<data_len && p<(buf+length-10); ++j, ++i) {
+ pj_ansi_sprintf(p, "%02x ", (*data) & 0xFF);
+ p += 3;
+ data++;
+ }
+
+ pj_ansi_sprintf(p, "\n");
+ p++;
+ }
+
+ return buf;
+}
+
+static int cmp_buf(const pj_uint8_t *s1, const pj_uint8_t *s2, unsigned len)
+{
+ unsigned i;
+ for (i=0; i<len; ++i) {
+ if (s1[i] != s2[i])
+ return i;
+ }
+
+ return -1;
+}
+
+static int fingerprint_test_vector()
+{
+ pj_pool_t *pool;
+ pj_status_t status;
+ unsigned i;
+ int rc = 0;
+
+ /* To avoid function not referenced warnings */
+ (void)create_msgint2;
+ (void)create_msgint3;
+
+ PJ_LOG(3,(THIS_FILE, " draft-denis-behave-rfc3489bis-test-vectors-02"));
+
+ pool = pj_pool_create(mem, "fingerprint", 1024, 1024, NULL);
+
+ for (i=0; i<PJ_ARRAY_SIZE(test_vectors); ++i) {
+ struct test_vector *v;
+ pj_stun_msg *ref_msg, *msg;
+ pj_size_t parsed_len;
+ pj_size_t len;
+ unsigned pos;
+ pj_uint8_t buf[1500];
+ char print[1500];
+ pj_str_t key;
+
+ PJ_LOG(3,(THIS_FILE, " Running test %d/%d", i,
+ PJ_ARRAY_SIZE(test_vectors)));
+
+ v = &test_vectors[i];
+
+ /* Print reference message */
+ PJ_LOG(4,(THIS_FILE, "Reference message PDU:\n%s",
+ print_binary((pj_uint8_t*)v->pdu, v->pdu_len)));
+
+ /* Try to parse the reference message first */
+ status = pj_stun_msg_decode(pool, (pj_uint8_t*)v->pdu, v->pdu_len,
+ PJ_STUN_IS_DATAGRAM | PJ_STUN_CHECK_PACKET,
+ &ref_msg, &parsed_len, NULL);
+ if (status != PJ_SUCCESS) {
+ PJ_LOG(1,(THIS_FILE, " Error decoding reference message"));
+ rc = -1010;
+ goto on_return;
+ }
+
+ if (parsed_len != v->pdu_len) {
+ PJ_LOG(1,(THIS_FILE, " Parsed len error"));
+ rc = -1020;
+ goto on_return;
+ }
+
+ /* Print the reference message */
+ pj_stun_msg_dump(ref_msg, print, sizeof(print), NULL);
+ PJ_LOG(4,(THIS_FILE, "Reference message:\n%s", print));
+
+ /* Create our message */
+ msg = v->create(pool, v);
+ if (msg == NULL) {
+ PJ_LOG(1,(THIS_FILE, " Error creating stun message"));
+ rc = -1030;
+ goto on_return;
+ }
+
+ /* Encode message */
+ if (v->options & USE_MESSAGE_INTEGRITY) {
+ pj_str_t s1, s2, r;
+
+ pj_stun_create_key(pool, &key, pj_cstr(&r, v->realm),
+ pj_cstr(&s1, v->username),
+ PJ_STUN_PASSWD_PLAIN,
+ pj_cstr(&s2, v->password));
+ pj_stun_msg_encode(msg, buf, sizeof(buf), 0, &key, &len);
+
+ } else {
+ pj_stun_msg_encode(msg, buf, sizeof(buf), 0, NULL, &len);
+ }
+
+ /* Print our raw message */
+ PJ_LOG(4,(THIS_FILE, "Message PDU:\n%s",
+ print_binary((pj_uint8_t*)buf, len)));
+
+ /* Print our message */
+ pj_stun_msg_dump(msg, print, sizeof(print), NULL);
+ PJ_LOG(4,(THIS_FILE, "Message is:\n%s", print));
+
+ /* Compare message length */
+ if (len != v->pdu_len) {
+ PJ_LOG(1,(THIS_FILE, " Message length mismatch"));
+ rc = -1050;
+ goto on_return;
+ }
+
+ pos = cmp_buf(buf, (const pj_uint8_t*)v->pdu, len);
+ if (pos != (unsigned)-1) {
+ PJ_LOG(1,(THIS_FILE, " Message mismatch at byte %d", pos));
+ rc = -1060;
+ goto on_return;
+ }
+
+ /* Authenticate the request/response */
+ if (v->options & USE_MESSAGE_INTEGRITY) {
+ if (PJ_STUN_IS_REQUEST(msg->hdr.type)) {
+ pj_stun_auth_cred cred;
+ pj_status_t status;
+
+ pj_bzero(&cred, sizeof(cred));
+ cred.type = PJ_STUN_AUTH_CRED_STATIC;
+ cred.data.static_cred.realm = pj_str(v->realm);
+ cred.data.static_cred.username = pj_str(v->username);
+ cred.data.static_cred.data = pj_str(v->password);
+ cred.data.static_cred.nonce = pj_str(v->nonce);
+
+ status = pj_stun_authenticate_request(buf, len, msg,
+ &cred, pool, NULL, NULL);
+ if (status != PJ_SUCCESS) {
+ char errmsg[PJ_ERR_MSG_SIZE];
+ pj_strerror(status, errmsg, sizeof(errmsg));
+ PJ_LOG(1,(THIS_FILE,
+ " Request authentication failed: %s",
+ errmsg));
+ rc = -1070;
+ goto on_return;
+ }
+
+ } else if (PJ_STUN_IS_RESPONSE(msg->hdr.type)) {
+ pj_status_t status;
+ status = pj_stun_authenticate_response(buf, len, msg, &key);
+ if (status != PJ_SUCCESS) {
+ char errmsg[PJ_ERR_MSG_SIZE];
+ pj_strerror(status, errmsg, sizeof(errmsg));
+ PJ_LOG(1,(THIS_FILE,
+ " Response authentication failed: %s",
+ errmsg));
+ rc = -1080;
+ goto on_return;
+ }
+ }
+ }
+ }
+
+
+on_return:
+ pj_pool_release(pool);
+ return rc;
+}
+
+static pj_stun_msg* create_msgint1(pj_pool_t *pool, test_vector *v)
+{
+ pj_stun_msg *msg;
+ pj_timestamp u64;
+ pj_str_t s1;
+ pj_status_t status;
+
+ status = pj_stun_msg_create(pool, v->msg_type, PJ_STUN_MAGIC,
+ (pj_uint8_t*)v->tsx_id, &msg);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ status = pj_stun_msg_add_uint_attr(pool, msg, PJ_STUN_ATTR_PRIORITY,
+ 0x6e0001ff);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ u64.u32.hi = 0x932ff9b1;
+ u64.u32.lo = 0x51263b36;
+ status = pj_stun_msg_add_uint64_attr(pool, msg,
+ PJ_STUN_ATTR_ICE_CONTROLLED, &u64);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ status = pj_stun_msg_add_string_attr(pool, msg, PJ_STUN_ATTR_USERNAME,
+ pj_cstr(&s1, v->username));
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ status = pj_stun_msg_add_msgint_attr(pool, msg);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ status = pj_stun_msg_add_uint_attr(pool, msg, PJ_STUN_ATTR_FINGERPRINT, 0);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ return msg;
+
+on_error:
+ app_perror(" error: create_msgint1()", status);
+ return NULL;
+}
+
+static pj_stun_msg* create_msgint2(pj_pool_t *pool, test_vector *v)
+{
+ pj_stun_msg *msg;
+ pj_sockaddr_in mapped_addr;
+ pj_str_t s1;
+ pj_status_t status;
+
+ status = pj_stun_msg_create(pool, v->msg_type, PJ_STUN_MAGIC,
+ (pj_uint8_t*)v->tsx_id, &msg);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ status = pj_stun_msg_add_string_attr(pool, msg, PJ_STUN_ATTR_SOFTWARE,
+ pj_cstr(&s1, "test vector"));
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ status = pj_sockaddr_in_init(&mapped_addr, pj_cstr(&s1, "192.0.2.1"),
+ 32853);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ status = pj_stun_msg_add_sockaddr_attr(pool, msg,
+ PJ_STUN_ATTR_XOR_MAPPED_ADDR,
+ PJ_TRUE, &mapped_addr,
+ sizeof(pj_sockaddr_in));
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ status = pj_stun_msg_add_msgint_attr(pool, msg);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ status = pj_stun_msg_add_uint_attr(pool, msg, PJ_STUN_ATTR_FINGERPRINT, 0);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ return msg;
+
+on_error:
+ app_perror(" error: create_msgint2()", status);
+ return NULL;
+}
+
+
+static pj_stun_msg* create_msgint3(pj_pool_t *pool, test_vector *v)
+{
+ pj_stun_msg *msg;
+ pj_sockaddr mapped_addr;
+ pj_str_t s1;
+ pj_status_t status;
+
+ status = pj_stun_msg_create(pool, v->msg_type, PJ_STUN_MAGIC,
+ (pj_uint8_t*)v->tsx_id, &msg);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ status = pj_stun_msg_add_string_attr(pool, msg, PJ_STUN_ATTR_SOFTWARE,
+ pj_cstr(&s1, "test vector"));
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ status = pj_sockaddr_init(pj_AF_INET6(), &mapped_addr,
+ pj_cstr(&s1, "2001:db8:1234:5678:11:2233:4455:6677"),
+ 32853);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ status = pj_stun_msg_add_sockaddr_attr(pool, msg,
+ PJ_STUN_ATTR_XOR_MAPPED_ADDR,
+ PJ_TRUE, &mapped_addr,
+ sizeof(pj_sockaddr));
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ status = pj_stun_msg_add_msgint_attr(pool, msg);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ status = pj_stun_msg_add_uint_attr(pool, msg, PJ_STUN_ATTR_FINGERPRINT, 0);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ return msg;
+
+on_error:
+ app_perror(" error: create_msgint3()", status);
+ return NULL;
+}
+
+
+/* Compare two messages */
+static int cmp_msg(const pj_stun_msg *msg1, const pj_stun_msg *msg2)
+{
+ unsigned i;
+
+ if (msg1->hdr.type != msg2->hdr.type)
+ return -10;
+ if (msg1->hdr.length != msg2->hdr.length)
+ return -20;
+ if (msg1->hdr.magic != msg2->hdr.magic)
+ return -30;
+ if (pj_memcmp(msg1->hdr.tsx_id, msg2->hdr.tsx_id, sizeof(msg1->hdr.tsx_id)))
+ return -40;
+ if (msg1->attr_count != msg2->attr_count)
+ return -50;
+
+ for (i=0; i<msg1->attr_count; ++i) {
+ const pj_stun_attr_hdr *a1 = msg1->attr[i];
+ const pj_stun_attr_hdr *a2 = msg2->attr[i];
+
+ if (a1->type != a2->type)
+ return -60;
+ if (a1->length != a2->length)
+ return -70;
+ }
+
+ return 0;
+}
+
+/* Decode and authenticate message with unknown non-mandatory attribute */
+static int handle_unknown_non_mandatory(void)
+{
+ pj_pool_t *pool = pj_pool_create(mem, NULL, 1000, 1000, NULL);
+ pj_stun_msg *msg0, *msg1, *msg2;
+ pj_uint8_t data[] = { 1, 2, 3, 4, 5, 6};
+ pj_uint8_t packet[500];
+ pj_stun_auth_cred cred;
+ pj_size_t len;
+ pj_status_t rc;
+
+ PJ_LOG(3,(THIS_FILE, " handling unknown non-mandatory attr"));
+
+ PJ_LOG(3,(THIS_FILE, " encoding"));
+ rc = pj_stun_msg_create(pool, PJ_STUN_BINDING_REQUEST, PJ_STUN_MAGIC, NULL, &msg0);
+ rc += pj_stun_msg_add_string_attr(pool, msg0, PJ_STUN_ATTR_USERNAME, &USERNAME);
+ rc += pj_stun_msg_add_binary_attr(pool, msg0, 0x80ff, data, sizeof(data));
+ rc += pj_stun_msg_add_msgint_attr(pool, msg0);
+ rc += pj_stun_msg_encode(msg0, packet, sizeof(packet), 0, &PASSWORD, &len);
+
+#if 0
+ if (1) {
+ unsigned i;
+ puts("");
+ printf("{ ");
+ for (i=0; i<len; ++i) printf("0x%02x, ", packet[i]);
+ puts(" }");
+ }
+#endif
+
+ PJ_LOG(3,(THIS_FILE, " decoding"));
+ rc += pj_stun_msg_decode(pool, packet, len, PJ_STUN_IS_DATAGRAM | PJ_STUN_CHECK_PACKET,
+ &msg1, NULL, NULL);
+
+ rc += cmp_msg(msg0, msg1);
+
+ pj_bzero(&cred, sizeof(cred));
+ cred.type = PJ_STUN_AUTH_CRED_STATIC;
+ cred.data.static_cred.username = USERNAME;
+ cred.data.static_cred.data_type = PJ_STUN_PASSWD_PLAIN;
+ cred.data.static_cred.data = PASSWORD;
+
+ PJ_LOG(3,(THIS_FILE, " authenticating"));
+ rc += pj_stun_authenticate_request(packet, len, msg1, &cred, pool, NULL, NULL);
+
+ PJ_LOG(3,(THIS_FILE, " clone"));
+ msg2 = pj_stun_msg_clone(pool, msg1);
+ rc += cmp_msg(msg0, msg2);
+
+ pj_pool_release(pool);
+
+ return rc==0 ? 0 : -4410;
+}
+
+
+int stun_test(void)
+{
+ int pad, rc;
+
+ pad = pj_stun_set_padding_char(32);
+
+ rc = decode_test();
+ if (rc != 0)
+ goto on_return;
+
+ rc = decode_verify();
+ if (rc != 0)
+ goto on_return;
+
+ rc = fingerprint_test_vector();
+ if (rc != 0)
+ goto on_return;
+
+ rc = handle_unknown_non_mandatory();
+ if (rc != 0)
+ goto on_return;
+
+on_return:
+ pj_stun_set_padding_char(pad);
+ return rc;
+}
+
diff --git a/pjnath/src/pjnath-test/stun_sock_test.c b/pjnath/src/pjnath-test/stun_sock_test.c
new file mode 100644
index 0000000..7a309ea
--- /dev/null
+++ b/pjnath/src/pjnath-test/stun_sock_test.c
@@ -0,0 +1,849 @@
+/* $Id: stun_sock_test.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "test.h"
+
+#define THIS_FILE "stun_sock_test.c"
+
+enum {
+ RESPOND_STUN = 1,
+ WITH_MAPPED = 2,
+ WITH_XOR_MAPPED = 4,
+
+ ECHO = 8
+};
+
+/*
+ * Simple STUN server
+ */
+struct stun_srv
+{
+ pj_activesock_t *asock;
+ unsigned flag;
+ pj_sockaddr addr;
+ unsigned rx_cnt;
+ pj_ioqueue_op_key_t send_key;
+ pj_str_t ip_to_send;
+ pj_uint16_t port_to_send;
+};
+
+static pj_bool_t srv_on_data_recvfrom(pj_activesock_t *asock,
+ void *data,
+ pj_size_t size,
+ const pj_sockaddr_t *src_addr,
+ int addr_len,
+ pj_status_t status)
+{
+ struct stun_srv *srv;
+ pj_ssize_t sent;
+
+ srv = (struct stun_srv*) pj_activesock_get_user_data(asock);
+
+ /* Ignore error */
+ if (status != PJ_SUCCESS)
+ return PJ_TRUE;
+
+ ++srv->rx_cnt;
+
+ /* Ignore if we're not responding */
+ if (srv->flag & RESPOND_STUN) {
+ pj_pool_t *pool;
+ pj_stun_msg *req_msg, *res_msg;
+
+ pool = pj_pool_create(mem, "stunsrv", 512, 512, NULL);
+
+ /* Parse request */
+ status = pj_stun_msg_decode(pool, (pj_uint8_t*)data, size,
+ PJ_STUN_IS_DATAGRAM | PJ_STUN_CHECK_PACKET,
+ &req_msg, NULL, NULL);
+ if (status != PJ_SUCCESS) {
+ app_perror(" pj_stun_msg_decode()", status);
+ pj_pool_release(pool);
+ return PJ_TRUE;
+ }
+
+ /* Create response */
+ status = pj_stun_msg_create(pool, PJ_STUN_BINDING_RESPONSE, PJ_STUN_MAGIC,
+ req_msg->hdr.tsx_id, &res_msg);
+ if (status != PJ_SUCCESS) {
+ app_perror(" pj_stun_msg_create()", status);
+ pj_pool_release(pool);
+ return PJ_TRUE;
+ }
+
+ /* Add MAPPED-ADDRESS or XOR-MAPPED-ADDRESS (or don't add) */
+ if (srv->flag & WITH_MAPPED) {
+ pj_sockaddr_in addr;
+
+ pj_sockaddr_in_init(&addr, &srv->ip_to_send, srv->port_to_send);
+ pj_stun_msg_add_sockaddr_attr(pool, res_msg, PJ_STUN_ATTR_MAPPED_ADDR,
+ PJ_FALSE, &addr, sizeof(addr));
+ } else if (srv->flag & WITH_XOR_MAPPED) {
+ pj_sockaddr_in addr;
+
+ pj_sockaddr_in_init(&addr, &srv->ip_to_send, srv->port_to_send);
+ pj_stun_msg_add_sockaddr_attr(pool, res_msg,
+ PJ_STUN_ATTR_XOR_MAPPED_ADDR,
+ PJ_TRUE, &addr, sizeof(addr));
+ }
+
+ /* Encode */
+ status = pj_stun_msg_encode(res_msg, (pj_uint8_t*)data, 100, 0,
+ NULL, &size);
+ if (status != PJ_SUCCESS) {
+ app_perror(" pj_stun_msg_encode()", status);
+ pj_pool_release(pool);
+ return PJ_TRUE;
+ }
+
+ /* Send back */
+ sent = size;
+ pj_activesock_sendto(asock, &srv->send_key, data, &sent, 0,
+ src_addr, addr_len);
+
+ pj_pool_release(pool);
+
+ } else if (srv->flag & ECHO) {
+ /* Send back */
+ sent = size;
+ pj_activesock_sendto(asock, &srv->send_key, data, &sent, 0,
+ src_addr, addr_len);
+
+ }
+
+ return PJ_TRUE;
+}
+
+static pj_status_t create_server(pj_pool_t *pool,
+ pj_ioqueue_t *ioqueue,
+ unsigned flag,
+ struct stun_srv **p_srv)
+{
+ struct stun_srv *srv;
+ pj_activesock_cb activesock_cb;
+ pj_status_t status;
+
+ srv = PJ_POOL_ZALLOC_T(pool, struct stun_srv);
+ srv->flag = flag;
+ srv->ip_to_send = pj_str("1.1.1.1");
+ srv->port_to_send = 1000;
+
+ status = pj_sockaddr_in_init(&srv->addr.ipv4, NULL, 0);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ pj_bzero(&activesock_cb, sizeof(activesock_cb));
+ activesock_cb.on_data_recvfrom = &srv_on_data_recvfrom;
+ status = pj_activesock_create_udp(pool, &srv->addr, NULL, ioqueue,
+ &activesock_cb, srv, &srv->asock,
+ &srv->addr);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ pj_ioqueue_op_key_init(&srv->send_key, sizeof(srv->send_key));
+
+ status = pj_activesock_start_recvfrom(srv->asock, pool, 512, 0);
+ if (status != PJ_SUCCESS) {
+ pj_activesock_close(srv->asock);
+ return status;
+ }
+
+ *p_srv = srv;
+ return PJ_SUCCESS;
+}
+
+static void destroy_server(struct stun_srv *srv)
+{
+ pj_activesock_close(srv->asock);
+}
+
+
+struct stun_client
+{
+ pj_pool_t *pool;
+ pj_stun_sock *sock;
+
+ pj_ioqueue_op_key_t send_key;
+ pj_bool_t destroy_on_err;
+
+ unsigned on_status_cnt;
+ pj_stun_sock_op last_op;
+ pj_status_t last_status;
+
+ unsigned on_rx_data_cnt;
+};
+
+static pj_bool_t stun_sock_on_status(pj_stun_sock *stun_sock,
+ pj_stun_sock_op op,
+ pj_status_t status)
+{
+ struct stun_client *client;
+
+ client = (struct stun_client*) pj_stun_sock_get_user_data(stun_sock);
+ client->on_status_cnt++;
+ client->last_op = op;
+ client->last_status = status;
+
+ if (status != PJ_SUCCESS && client->destroy_on_err) {
+ pj_stun_sock_destroy(client->sock);
+ client->sock = NULL;
+ return PJ_FALSE;
+ }
+
+ return PJ_TRUE;
+}
+
+static pj_bool_t stun_sock_on_rx_data(pj_stun_sock *stun_sock,
+ void *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *src_addr,
+ unsigned addr_len)
+{
+ struct stun_client *client;
+
+ PJ_UNUSED_ARG(pkt);
+ PJ_UNUSED_ARG(pkt_len);
+ PJ_UNUSED_ARG(src_addr);
+ PJ_UNUSED_ARG(addr_len);
+
+ client = (struct stun_client*) pj_stun_sock_get_user_data(stun_sock);
+ client->on_rx_data_cnt++;
+
+ return PJ_TRUE;
+}
+
+static pj_status_t create_client(pj_stun_config *cfg,
+ struct stun_client **p_client,
+ pj_bool_t destroy_on_err)
+{
+ pj_pool_t *pool;
+ struct stun_client *client;
+ pj_stun_sock_cfg sock_cfg;
+ pj_stun_sock_cb cb;
+ pj_status_t status;
+
+ pool = pj_pool_create(mem, "test", 512, 512, NULL);
+ client = PJ_POOL_ZALLOC_T(pool, struct stun_client);
+ client->pool = pool;
+
+ pj_stun_sock_cfg_default(&sock_cfg);
+
+ pj_bzero(&cb, sizeof(cb));
+ cb.on_status = &stun_sock_on_status;
+ cb.on_rx_data = &stun_sock_on_rx_data;
+ status = pj_stun_sock_create(cfg, NULL, pj_AF_INET(), &cb,
+ &sock_cfg, client, &client->sock);
+ if (status != PJ_SUCCESS) {
+ app_perror(" pj_stun_sock_create()", status);
+ pj_pool_release(pool);
+ return status;
+ }
+
+ pj_stun_sock_set_user_data(client->sock, client);
+
+ pj_ioqueue_op_key_init(&client->send_key, sizeof(client->send_key));
+
+ client->destroy_on_err = destroy_on_err;
+
+ *p_client = client;
+
+ return PJ_SUCCESS;
+}
+
+
+static void destroy_client(struct stun_client *client)
+{
+ if (client->sock) {
+ pj_stun_sock_destroy(client->sock);
+ client->sock = NULL;
+ }
+ pj_pool_release(client->pool);
+}
+
+static void handle_events(pj_stun_config *cfg, unsigned msec_delay)
+{
+ pj_time_val delay;
+
+ pj_timer_heap_poll(cfg->timer_heap, NULL);
+
+ delay.sec = 0;
+ delay.msec = msec_delay;
+ pj_time_val_normalize(&delay);
+
+ pj_ioqueue_poll(cfg->ioqueue, &delay);
+}
+
+/*
+ * Timeout test: scenario when no response is received from server
+ */
+static int timeout_test(pj_stun_config *cfg, pj_bool_t destroy_on_err)
+{
+ struct stun_srv *srv;
+ struct stun_client *client;
+ pj_str_t srv_addr;
+ pj_time_val timeout, t;
+ int ret = 0;
+ pj_status_t status;
+
+ PJ_LOG(3,(THIS_FILE, " timeout test [%d]", destroy_on_err));
+
+ status = create_client(cfg, &client, destroy_on_err);
+ if (status != PJ_SUCCESS)
+ return -10;
+
+ status = create_server(client->pool, cfg->ioqueue, 0, &srv);
+ if (status != PJ_SUCCESS) {
+ destroy_client(client);
+ return -20;
+ }
+
+ srv_addr = pj_str("127.0.0.1");
+ status = pj_stun_sock_start(client->sock, &srv_addr,
+ pj_ntohs(srv->addr.ipv4.sin_port), NULL);
+ if (status != PJ_SUCCESS) {
+ destroy_server(srv);
+ destroy_client(client);
+ return -30;
+ }
+
+ /* Wait until on_status() callback is called with the failure */
+ pj_gettimeofday(&timeout);
+ timeout.sec += 60;
+ do {
+ handle_events(cfg, 100);
+ pj_gettimeofday(&t);
+ } while (client->on_status_cnt==0 && PJ_TIME_VAL_LT(t, timeout));
+
+ /* Check that callback with correct operation is called */
+ if (client->last_op != PJ_STUN_SOCK_BINDING_OP) {
+ PJ_LOG(3,(THIS_FILE, " error: expecting Binding operation status"));
+ ret = -40;
+ goto on_return;
+ }
+ /* .. and with the correct status */
+ if (client->last_status != PJNATH_ESTUNTIMEDOUT) {
+ PJ_LOG(3,(THIS_FILE, " error: expecting PJNATH_ESTUNTIMEDOUT"));
+ ret = -50;
+ goto on_return;
+ }
+ /* Check that server received correct retransmissions */
+ if (srv->rx_cnt != PJ_STUN_MAX_TRANSMIT_COUNT) {
+ PJ_LOG(3,(THIS_FILE, " error: expecting %d retransmissions, got %d",
+ PJ_STUN_MAX_TRANSMIT_COUNT, srv->rx_cnt));
+ ret = -60;
+ goto on_return;
+ }
+ /* Check that client doesn't receive anything */
+ if (client->on_rx_data_cnt != 0) {
+ PJ_LOG(3,(THIS_FILE, " error: client shouldn't have received anything"));
+ ret = -70;
+ goto on_return;
+ }
+
+on_return:
+ destroy_server(srv);
+ destroy_client(client);
+ return ret;
+}
+
+
+/*
+ * Invalid response scenario: when server returns no MAPPED-ADDRESS or
+ * XOR-MAPPED-ADDRESS attribute.
+ */
+static int missing_attr_test(pj_stun_config *cfg, pj_bool_t destroy_on_err)
+{
+ struct stun_srv *srv;
+ struct stun_client *client;
+ pj_str_t srv_addr;
+ pj_time_val timeout, t;
+ int ret = 0;
+ pj_status_t status;
+
+ PJ_LOG(3,(THIS_FILE, " missing attribute test [%d]", destroy_on_err));
+
+ status = create_client(cfg, &client, destroy_on_err);
+ if (status != PJ_SUCCESS)
+ return -110;
+
+ status = create_server(client->pool, cfg->ioqueue, RESPOND_STUN, &srv);
+ if (status != PJ_SUCCESS) {
+ destroy_client(client);
+ return -120;
+ }
+
+ srv_addr = pj_str("127.0.0.1");
+ status = pj_stun_sock_start(client->sock, &srv_addr,
+ pj_ntohs(srv->addr.ipv4.sin_port), NULL);
+ if (status != PJ_SUCCESS) {
+ destroy_server(srv);
+ destroy_client(client);
+ return -130;
+ }
+
+ /* Wait until on_status() callback is called with the failure */
+ pj_gettimeofday(&timeout);
+ timeout.sec += 60;
+ do {
+ handle_events(cfg, 100);
+ pj_gettimeofday(&t);
+ } while (client->on_status_cnt==0 && PJ_TIME_VAL_LT(t, timeout));
+
+ /* Check that callback with correct operation is called */
+ if (client->last_op != PJ_STUN_SOCK_BINDING_OP) {
+ PJ_LOG(3,(THIS_FILE, " error: expecting Binding operation status"));
+ ret = -140;
+ goto on_return;
+ }
+ if (client->last_status != PJNATH_ESTUNNOMAPPEDADDR) {
+ PJ_LOG(3,(THIS_FILE, " error: expecting PJNATH_ESTUNNOMAPPEDADDR"));
+ ret = -150;
+ goto on_return;
+ }
+ /* Check that client doesn't receive anything */
+ if (client->on_rx_data_cnt != 0) {
+ PJ_LOG(3,(THIS_FILE, " error: client shouldn't have received anything"));
+ ret = -170;
+ goto on_return;
+ }
+
+on_return:
+ destroy_server(srv);
+ destroy_client(client);
+ return ret;
+}
+
+/*
+ * Keep-alive test.
+ */
+static int keep_alive_test(pj_stun_config *cfg)
+{
+ struct stun_srv *srv;
+ struct stun_client *client;
+ pj_sockaddr_in mapped_addr;
+ pj_stun_sock_info info;
+ pj_str_t srv_addr;
+ pj_time_val timeout, t;
+ int ret = 0;
+ pj_status_t status;
+
+ PJ_LOG(3,(THIS_FILE, " normal operation"));
+
+ status = create_client(cfg, &client, PJ_TRUE);
+ if (status != PJ_SUCCESS)
+ return -310;
+
+ status = create_server(client->pool, cfg->ioqueue, RESPOND_STUN|WITH_XOR_MAPPED, &srv);
+ if (status != PJ_SUCCESS) {
+ destroy_client(client);
+ return -320;
+ }
+
+ /*
+ * Part 1: initial Binding resolution.
+ */
+ PJ_LOG(3,(THIS_FILE, " initial Binding request"));
+ srv_addr = pj_str("127.0.0.1");
+ status = pj_stun_sock_start(client->sock, &srv_addr,
+ pj_ntohs(srv->addr.ipv4.sin_port), NULL);
+ if (status != PJ_SUCCESS) {
+ destroy_server(srv);
+ destroy_client(client);
+ return -330;
+ }
+
+ /* Wait until on_status() callback is called with success status */
+ pj_gettimeofday(&timeout);
+ timeout.sec += 60;
+ do {
+ handle_events(cfg, 100);
+ pj_gettimeofday(&t);
+ } while (client->on_status_cnt==0 && PJ_TIME_VAL_LT(t, timeout));
+
+ /* Check that callback with correct operation is called */
+ if (client->last_op != PJ_STUN_SOCK_BINDING_OP) {
+ PJ_LOG(3,(THIS_FILE, " error: expecting Binding operation status"));
+ ret = -340;
+ goto on_return;
+ }
+ if (client->last_status != PJ_SUCCESS) {
+ PJ_LOG(3,(THIS_FILE, " error: expecting PJ_SUCCESS status"));
+ ret = -350;
+ goto on_return;
+ }
+ /* Check that client doesn't receive anything */
+ if (client->on_rx_data_cnt != 0) {
+ PJ_LOG(3,(THIS_FILE, " error: client shouldn't have received anything"));
+ ret = -370;
+ goto on_return;
+ }
+
+ /* Get info */
+ pj_bzero(&info, sizeof(info));
+ pj_stun_sock_get_info(client->sock, &info);
+
+ /* Check that we have server address */
+ if (!pj_sockaddr_has_addr(&info.srv_addr)) {
+ PJ_LOG(3,(THIS_FILE, " error: missing server address"));
+ ret = -380;
+ goto on_return;
+ }
+ /* .. and bound address port must not be zero */
+ if (pj_sockaddr_get_port(&info.bound_addr)==0) {
+ PJ_LOG(3,(THIS_FILE, " error: bound address is zero"));
+ ret = -381;
+ goto on_return;
+ }
+ /* .. and mapped address */
+ if (!pj_sockaddr_has_addr(&info.mapped_addr)) {
+ PJ_LOG(3,(THIS_FILE, " error: missing mapped address"));
+ ret = -382;
+ goto on_return;
+ }
+ /* verify the mapped address */
+ pj_sockaddr_in_init(&mapped_addr, &srv->ip_to_send, srv->port_to_send);
+ if (pj_sockaddr_cmp(&info.mapped_addr, &mapped_addr) != 0) {
+ PJ_LOG(3,(THIS_FILE, " error: mapped address mismatched"));
+ ret = -383;
+ goto on_return;
+ }
+
+ /* .. and at least one alias */
+ if (info.alias_cnt == 0) {
+ PJ_LOG(3,(THIS_FILE, " error: must have at least one alias"));
+ ret = -384;
+ goto on_return;
+ }
+ if (!pj_sockaddr_has_addr(&info.aliases[0])) {
+ PJ_LOG(3,(THIS_FILE, " error: missing alias"));
+ ret = -386;
+ goto on_return;
+ }
+
+
+ /*
+ * Part 2: sending and receiving data
+ */
+ PJ_LOG(3,(THIS_FILE, " sending/receiving data"));
+
+ /* Change server operation mode to echo back data */
+ srv->flag = ECHO;
+
+ /* Reset server */
+ srv->rx_cnt = 0;
+
+ /* Client sending data to echo server */
+ {
+ char txt[100];
+ PJ_LOG(3,(THIS_FILE, " sending to %s", pj_sockaddr_print(&info.srv_addr, txt, sizeof(txt), 3)));
+ }
+ status = pj_stun_sock_sendto(client->sock, NULL, &ret, sizeof(ret),
+ 0, &info.srv_addr,
+ pj_sockaddr_get_len(&info.srv_addr));
+ if (status != PJ_SUCCESS && status != PJ_EPENDING) {
+ app_perror(" error: server sending data", status);
+ ret = -390;
+ goto on_return;
+ }
+
+ /* Wait for a short period until client receives data. We can't wait for
+ * too long otherwise the keep-alive will kick in.
+ */
+ pj_gettimeofday(&timeout);
+ timeout.sec += 1;
+ do {
+ handle_events(cfg, 100);
+ pj_gettimeofday(&t);
+ } while (client->on_rx_data_cnt==0 && PJ_TIME_VAL_LT(t, timeout));
+
+ /* Check that data is received in server */
+ if (srv->rx_cnt == 0) {
+ PJ_LOG(3,(THIS_FILE, " error: server didn't receive data"));
+ ret = -395;
+ goto on_return;
+ }
+
+ /* Check that status is still OK */
+ if (client->last_status != PJ_SUCCESS) {
+ app_perror(" error: client has failed", client->last_status);
+ ret = -400;
+ goto on_return;
+ }
+ /* Check that data has been received */
+ if (client->on_rx_data_cnt == 0) {
+ PJ_LOG(3,(THIS_FILE, " error: client doesn't receive data"));
+ ret = -410;
+ goto on_return;
+ }
+
+ /*
+ * Part 3: Successful keep-alive,
+ */
+ PJ_LOG(3,(THIS_FILE, " successful keep-alive scenario"));
+
+ /* Change server operation mode to normal mode */
+ srv->flag = RESPOND_STUN | WITH_XOR_MAPPED;
+
+ /* Reset server */
+ srv->rx_cnt = 0;
+
+ /* Reset client */
+ client->on_status_cnt = 0;
+ client->last_status = PJ_SUCCESS;
+ client->on_rx_data_cnt = 0;
+
+ /* Wait for keep-alive duration to see if client actually sends the
+ * keep-alive.
+ */
+ pj_gettimeofday(&timeout);
+ timeout.sec += (PJ_STUN_KEEP_ALIVE_SEC + 1);
+ do {
+ handle_events(cfg, 100);
+ pj_gettimeofday(&t);
+ } while (PJ_TIME_VAL_LT(t, timeout));
+
+ /* Check that server receives some packets */
+ if (srv->rx_cnt == 0) {
+ PJ_LOG(3, (THIS_FILE, " error: no keep-alive was received"));
+ ret = -420;
+ goto on_return;
+ }
+ /* Check that client status is still okay and on_status() callback is NOT
+ * called
+ */
+ /* No longer valid due to this ticket:
+ * http://trac.pjsip.org/repos/ticket/742
+
+ if (client->on_status_cnt != 0) {
+ PJ_LOG(3, (THIS_FILE, " error: on_status() must not be called on successful"
+ "keep-alive when mapped-address does not change"));
+ ret = -430;
+ goto on_return;
+ }
+ */
+ /* Check that client doesn't receive anything */
+ if (client->on_rx_data_cnt != 0) {
+ PJ_LOG(3,(THIS_FILE, " error: client shouldn't have received anything"));
+ ret = -440;
+ goto on_return;
+ }
+
+
+ /*
+ * Part 4: Successful keep-alive with IP address change
+ */
+ PJ_LOG(3,(THIS_FILE, " mapped IP address change"));
+
+ /* Change server operation mode to normal mode */
+ srv->flag = RESPOND_STUN | WITH_XOR_MAPPED;
+
+ /* Change mapped address in the response */
+ srv->ip_to_send = pj_str("2.2.2.2");
+ srv->port_to_send++;
+
+ /* Reset server */
+ srv->rx_cnt = 0;
+
+ /* Reset client */
+ client->on_status_cnt = 0;
+ client->last_status = PJ_SUCCESS;
+ client->on_rx_data_cnt = 0;
+
+ /* Wait for keep-alive duration to see if client actually sends the
+ * keep-alive.
+ */
+ pj_gettimeofday(&timeout);
+ timeout.sec += (PJ_STUN_KEEP_ALIVE_SEC + 1);
+ do {
+ handle_events(cfg, 100);
+ pj_gettimeofday(&t);
+ } while (PJ_TIME_VAL_LT(t, timeout));
+
+ /* Check that server receives some packets */
+ if (srv->rx_cnt == 0) {
+ PJ_LOG(3, (THIS_FILE, " error: no keep-alive was received"));
+ ret = -450;
+ goto on_return;
+ }
+ /* Check that on_status() callback is called (because mapped address
+ * has changed)
+ */
+ if (client->on_status_cnt != 1) {
+ PJ_LOG(3, (THIS_FILE, " error: on_status() was not called"));
+ ret = -460;
+ goto on_return;
+ }
+ /* Check that callback was called with correct operation */
+ if (client->last_op != PJ_STUN_SOCK_MAPPED_ADDR_CHANGE) {
+ PJ_LOG(3,(THIS_FILE, " error: expecting keep-alive operation status"));
+ ret = -470;
+ goto on_return;
+ }
+ /* Check that last status is still success */
+ if (client->last_status != PJ_SUCCESS) {
+ PJ_LOG(3, (THIS_FILE, " error: expecting successful status"));
+ ret = -480;
+ goto on_return;
+ }
+ /* Check that client doesn't receive anything */
+ if (client->on_rx_data_cnt != 0) {
+ PJ_LOG(3,(THIS_FILE, " error: client shouldn't have received anything"));
+ ret = -490;
+ goto on_return;
+ }
+
+ /* Get info */
+ pj_bzero(&info, sizeof(info));
+ pj_stun_sock_get_info(client->sock, &info);
+
+ /* Check that we have server address */
+ if (!pj_sockaddr_has_addr(&info.srv_addr)) {
+ PJ_LOG(3,(THIS_FILE, " error: missing server address"));
+ ret = -500;
+ goto on_return;
+ }
+ /* .. and mapped address */
+ if (!pj_sockaddr_has_addr(&info.mapped_addr)) {
+ PJ_LOG(3,(THIS_FILE, " error: missing mapped address"));
+ ret = -510;
+ goto on_return;
+ }
+ /* verify the mapped address */
+ pj_sockaddr_in_init(&mapped_addr, &srv->ip_to_send, srv->port_to_send);
+ if (pj_sockaddr_cmp(&info.mapped_addr, &mapped_addr) != 0) {
+ PJ_LOG(3,(THIS_FILE, " error: mapped address mismatched"));
+ ret = -520;
+ goto on_return;
+ }
+
+ /* .. and at least one alias */
+ if (info.alias_cnt == 0) {
+ PJ_LOG(3,(THIS_FILE, " error: must have at least one alias"));
+ ret = -530;
+ goto on_return;
+ }
+ if (!pj_sockaddr_has_addr(&info.aliases[0])) {
+ PJ_LOG(3,(THIS_FILE, " error: missing alias"));
+ ret = -540;
+ goto on_return;
+ }
+
+
+ /*
+ * Part 5: Failed keep-alive
+ */
+ PJ_LOG(3,(THIS_FILE, " failed keep-alive scenario"));
+
+ /* Change server operation mode to respond without attribute */
+ srv->flag = RESPOND_STUN;
+
+ /* Reset server */
+ srv->rx_cnt = 0;
+
+ /* Reset client */
+ client->on_status_cnt = 0;
+ client->last_status = PJ_SUCCESS;
+ client->on_rx_data_cnt = 0;
+
+ /* Wait until on_status() is called with failure. */
+ pj_gettimeofday(&timeout);
+ timeout.sec += (PJ_STUN_KEEP_ALIVE_SEC + PJ_STUN_TIMEOUT_VALUE + 5);
+ do {
+ handle_events(cfg, 100);
+ pj_gettimeofday(&t);
+ } while (client->on_status_cnt==0 && PJ_TIME_VAL_LT(t, timeout));
+
+ /* Check that callback with correct operation is called */
+ if (client->last_op != PJ_STUN_SOCK_KEEP_ALIVE_OP) {
+ PJ_LOG(3,(THIS_FILE, " error: expecting keep-alive operation status"));
+ ret = -600;
+ goto on_return;
+ }
+ if (client->last_status == PJ_SUCCESS) {
+ PJ_LOG(3,(THIS_FILE, " error: expecting failed keep-alive"));
+ ret = -610;
+ goto on_return;
+ }
+ /* Check that client doesn't receive anything */
+ if (client->on_rx_data_cnt != 0) {
+ PJ_LOG(3,(THIS_FILE, " error: client shouldn't have received anything"));
+ ret = -620;
+ goto on_return;
+ }
+
+
+on_return:
+ destroy_server(srv);
+ destroy_client(client);
+ return ret;
+}
+
+
+#define DO_TEST(expr) \
+ capture_pjlib_state(&stun_cfg, &pjlib_state); \
+ ret = expr; \
+ if (ret != 0) goto on_return; \
+ ret = check_pjlib_state(&stun_cfg, &pjlib_state); \
+ if (ret != 0) goto on_return;
+
+
+int stun_sock_test(void)
+{
+ struct pjlib_state pjlib_state;
+ pj_stun_config stun_cfg;
+ pj_ioqueue_t *ioqueue = NULL;
+ pj_timer_heap_t *timer_heap = NULL;
+ pj_pool_t *pool = NULL;
+ pj_status_t status;
+ int ret = 0;
+
+ pool = pj_pool_create(mem, NULL, 512, 512, NULL);
+
+ status = pj_ioqueue_create(pool, 12, &ioqueue);
+ if (status != PJ_SUCCESS) {
+ app_perror(" pj_ioqueue_create()", status);
+ ret = -4;
+ goto on_return;
+ }
+
+ status = pj_timer_heap_create(pool, 100, &timer_heap);
+ if (status != PJ_SUCCESS) {
+ app_perror(" pj_timer_heap_create()", status);
+ ret = -8;
+ goto on_return;
+ }
+
+ pj_stun_config_init(&stun_cfg, mem, 0, ioqueue, timer_heap);
+
+ DO_TEST(timeout_test(&stun_cfg, PJ_FALSE));
+ DO_TEST(timeout_test(&stun_cfg, PJ_TRUE));
+
+ DO_TEST(missing_attr_test(&stun_cfg, PJ_FALSE));
+ DO_TEST(missing_attr_test(&stun_cfg, PJ_TRUE));
+
+ DO_TEST(keep_alive_test(&stun_cfg));
+
+on_return:
+ if (timer_heap) pj_timer_heap_destroy(timer_heap);
+ if (ioqueue) pj_ioqueue_destroy(ioqueue);
+ if (pool) pj_pool_release(pool);
+ return ret;
+}
+
+
diff --git a/pjnath/src/pjnath-test/test.c b/pjnath/src/pjnath-test/test.c
new file mode 100644
index 0000000..081df25
--- /dev/null
+++ b/pjnath/src/pjnath-test/test.c
@@ -0,0 +1,212 @@
+/* $Id: test.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "test.h"
+#include <pjlib.h>
+
+void app_perror(const char *msg, pj_status_t rc)
+{
+ char errbuf[256];
+
+ PJ_CHECK_STACK();
+
+ pj_strerror(rc, errbuf, sizeof(errbuf));
+ PJ_LOG(1,("test", "%s: [pj_status_t=%d] %s", msg, rc, errbuf));
+}
+
+pj_status_t create_stun_config(pj_pool_t *pool, pj_stun_config *stun_cfg)
+{
+ pj_ioqueue_t *ioqueue;
+ pj_timer_heap_t *timer_heap;
+ pj_status_t status;
+
+ status = pj_ioqueue_create(pool, 64, &ioqueue);
+ if (status != PJ_SUCCESS) {
+ app_perror(" pj_ioqueue_create()", status);
+ return status;
+ }
+
+ status = pj_timer_heap_create(pool, 256, &timer_heap);
+ if (status != PJ_SUCCESS) {
+ app_perror(" pj_timer_heap_create()", status);
+ pj_ioqueue_destroy(ioqueue);
+ return status;
+ }
+
+ pj_stun_config_init(stun_cfg, mem, 0, ioqueue, timer_heap);
+
+ return PJ_SUCCESS;
+}
+
+void destroy_stun_config(pj_stun_config *stun_cfg)
+{
+ if (stun_cfg->timer_heap) {
+ pj_timer_heap_destroy(stun_cfg->timer_heap);
+ stun_cfg->timer_heap = NULL;
+ }
+ if (stun_cfg->ioqueue) {
+ pj_ioqueue_destroy(stun_cfg->ioqueue);
+ stun_cfg->ioqueue = NULL;
+ }
+}
+
+void poll_events(pj_stun_config *stun_cfg, unsigned msec,
+ pj_bool_t first_event_only)
+{
+ pj_time_val stop_time;
+ int count = 0;
+
+ pj_gettimeofday(&stop_time);
+ stop_time.msec += msec;
+ pj_time_val_normalize(&stop_time);
+
+ /* Process all events for the specified duration. */
+ for (;;) {
+ pj_time_val timeout = {0, 1}, now;
+ int c;
+
+ c = pj_timer_heap_poll( stun_cfg->timer_heap, NULL );
+ if (c > 0)
+ count += c;
+
+ //timeout.sec = timeout.msec = 0;
+ c = pj_ioqueue_poll( stun_cfg->ioqueue, &timeout);
+ if (c > 0)
+ count += c;
+
+ pj_gettimeofday(&now);
+ if (PJ_TIME_VAL_GTE(now, stop_time))
+ break;
+
+ if (first_event_only && count >= 0)
+ break;
+ }
+}
+
+void capture_pjlib_state(pj_stun_config *cfg, struct pjlib_state *st)
+{
+ pj_caching_pool *cp;
+
+ st->timer_cnt = pj_timer_heap_count(cfg->timer_heap);
+
+ cp = (pj_caching_pool*)mem;
+ st->pool_used_cnt = cp->used_count;
+}
+
+int check_pjlib_state(pj_stun_config *cfg,
+ const struct pjlib_state *initial_st)
+{
+ struct pjlib_state current_state;
+ int rc = 0;
+
+ capture_pjlib_state(cfg, &current_state);
+
+ if (current_state.timer_cnt > initial_st->timer_cnt) {
+ PJ_LOG(3,("", " error: possibly leaking timer"));
+ rc |= ERR_TIMER_LEAK;
+ }
+
+ if (current_state.pool_used_cnt > initial_st->pool_used_cnt) {
+ PJ_LOG(3,("", " error: possibly leaking memory"));
+ PJ_LOG(3,("", " dumping memory pool:"));
+ pj_pool_factory_dump(mem, PJ_TRUE);
+ rc |= ERR_MEMORY_LEAK;
+ }
+
+ return rc;
+}
+
+
+#define DO_TEST(test) do { \
+ PJ_LOG(3, ("test", "Running %s...", #test)); \
+ rc = test; \
+ PJ_LOG(3, ("test", \
+ "%s(%d)", \
+ (char*)(rc ? "..ERROR" : "..success"), rc)); \
+ if (rc!=0) goto on_return; \
+ } while (0)
+
+
+pj_pool_factory *mem;
+
+int param_log_decor = PJ_LOG_HAS_NEWLINE | PJ_LOG_HAS_TIME |
+ PJ_LOG_HAS_MICRO_SEC;
+
+static int test_inner(void)
+{
+ pj_caching_pool caching_pool;
+ int rc = 0;
+
+ mem = &caching_pool.factory;
+
+#if 1
+ pj_log_set_level(3);
+ pj_log_set_decor(param_log_decor);
+#endif
+
+ rc = pj_init();
+ if (rc != 0) {
+ app_perror("pj_init() error!!", rc);
+ return rc;
+ }
+
+ pj_dump_config();
+ pj_caching_pool_init( &caching_pool, &pj_pool_factory_default_policy, 0 );
+
+ pjlib_util_init();
+ pjnath_init();
+
+#if INCLUDE_STUN_TEST
+ DO_TEST(stun_test());
+ DO_TEST(sess_auth_test());
+#endif
+
+#if INCLUDE_ICE_TEST
+ DO_TEST(ice_test());
+#endif
+
+#if INCLUDE_STUN_SOCK_TEST
+ DO_TEST(stun_sock_test());
+#endif
+
+#if INCLUDE_TURN_SOCK_TEST
+ DO_TEST(turn_sock_test());
+#endif
+
+on_return:
+ return rc;
+}
+
+int test_main(void)
+{
+ PJ_USE_EXCEPTION;
+
+ PJ_TRY {
+ return test_inner();
+ }
+ PJ_CATCH_ANY {
+ int id = PJ_GET_EXCEPTION();
+ PJ_LOG(3,("test", "FATAL: unhandled exception id %d (%s)",
+ id, pj_exception_id_name(id)));
+ }
+ PJ_END;
+
+ return -1;
+}
+
diff --git a/pjnath/src/pjnath-test/test.h b/pjnath/src/pjnath-test/test.h
new file mode 100644
index 0000000..bbba992
--- /dev/null
+++ b/pjnath/src/pjnath-test/test.h
@@ -0,0 +1,63 @@
+/* $Id: test.h 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjlib.h>
+#include <pjlib-util.h>
+#include <pjnath.h>
+
+#define INCLUDE_STUN_TEST 1
+#define INCLUDE_ICE_TEST 1
+#define INCLUDE_STUN_SOCK_TEST 1
+#define INCLUDE_TURN_SOCK_TEST 1
+
+int stun_test(void);
+int sess_auth_test(void);
+int stun_sock_test(void);
+int turn_sock_test(void);
+int ice_test(void);
+int test_main(void);
+
+extern void app_perror(const char *title, pj_status_t rc);
+extern pj_pool_factory *mem;
+
+////////////////////////////////////
+/*
+ * Utilities
+ */
+pj_status_t create_stun_config(pj_pool_t *pool, pj_stun_config *stun_cfg);
+void destroy_stun_config(pj_stun_config *stun_cfg);
+
+void poll_events(pj_stun_config *stun_cfg, unsigned msec,
+ pj_bool_t first_event_only);
+
+typedef struct pjlib_state
+{
+ unsigned timer_cnt; /* Number of timer entries */
+ unsigned pool_used_cnt; /* Number of app pools */
+} pjlib_state;
+
+
+void capture_pjlib_state(pj_stun_config *cfg, struct pjlib_state *st);
+int check_pjlib_state(pj_stun_config *cfg,
+ const struct pjlib_state *initial_st);
+
+
+#define ERR_MEMORY_LEAK 1
+#define ERR_TIMER_LEAK 2
+
diff --git a/pjnath/src/pjnath-test/turn_sock_test.c b/pjnath/src/pjnath-test/turn_sock_test.c
new file mode 100644
index 0000000..f2be81d
--- /dev/null
+++ b/pjnath/src/pjnath-test/turn_sock_test.c
@@ -0,0 +1,516 @@
+/* $Id: turn_sock_test.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "test.h"
+#include "server.h"
+
+#define SRV_DOMAIN "pjsip.lab.domain"
+#define KA_INTERVAL 50
+
+struct test_result
+{
+ unsigned state_called;
+ unsigned rx_data_cnt;
+};
+
+struct test_session
+{
+ pj_pool_t *pool;
+ pj_stun_config *stun_cfg;
+ pj_turn_sock *turn_sock;
+ pj_dns_resolver *resolver;
+ test_server *test_srv;
+
+ pj_bool_t destroy_called;
+ int destroy_on_state;
+ struct test_result result;
+};
+
+struct test_session_cfg
+{
+ struct {
+ pj_bool_t enable_dns_srv;
+ int destroy_on_state;
+ } client;
+
+ struct {
+ pj_uint32_t flags;
+ pj_bool_t respond_allocate;
+ pj_bool_t respond_refresh;
+ } srv;
+};
+
+static void turn_on_rx_data(pj_turn_sock *turn_sock,
+ void *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *peer_addr,
+ unsigned addr_len);
+static void turn_on_state(pj_turn_sock *turn_sock,
+ pj_turn_state_t old_state,
+ pj_turn_state_t new_state);
+
+static void destroy_session(struct test_session *sess)
+{
+ if (sess->resolver) {
+ pj_dns_resolver_destroy(sess->resolver, PJ_TRUE);
+ sess->resolver = NULL;
+ }
+
+ if (sess->turn_sock) {
+ if (!sess->destroy_called) {
+ sess->destroy_called = PJ_TRUE;
+ pj_turn_sock_destroy(sess->turn_sock);
+ }
+ sess->turn_sock = NULL;
+ }
+
+ if (sess->test_srv) {
+ destroy_test_server(sess->test_srv);
+ sess->test_srv = NULL;
+ }
+
+ if (sess->pool) {
+ pj_pool_release(sess->pool);
+ }
+}
+
+
+
+static int create_test_session(pj_stun_config *stun_cfg,
+ const struct test_session_cfg *cfg,
+ struct test_session **p_sess)
+{
+ struct test_session *sess;
+ pj_pool_t *pool;
+ pj_turn_sock_cb turn_sock_cb;
+ pj_turn_alloc_param alloc_param;
+ pj_stun_auth_cred cred;
+ pj_status_t status;
+
+ /* Create client */
+ pool = pj_pool_create(mem, "turnclient", 512, 512, NULL);
+ sess = PJ_POOL_ZALLOC_T(pool, struct test_session);
+ sess->pool = pool;
+ sess->stun_cfg = stun_cfg;
+ sess->destroy_on_state = cfg->client.destroy_on_state;
+
+ pj_bzero(&turn_sock_cb, sizeof(turn_sock_cb));
+ turn_sock_cb.on_rx_data = &turn_on_rx_data;
+ turn_sock_cb.on_state = &turn_on_state;
+ status = pj_turn_sock_create(sess->stun_cfg, pj_AF_INET(), PJ_TURN_TP_UDP,
+ &turn_sock_cb, 0, sess, &sess->turn_sock);
+ if (status != PJ_SUCCESS) {
+ destroy_session(sess);
+ return -20;
+ }
+
+ /* Create test server */
+ status = create_test_server(sess->stun_cfg, cfg->srv.flags,
+ SRV_DOMAIN, &sess->test_srv);
+ if (status != PJ_SUCCESS) {
+ destroy_session(sess);
+ return -30;
+ }
+
+ sess->test_srv->turn_respond_allocate = cfg->srv.respond_allocate;
+ sess->test_srv->turn_respond_refresh = cfg->srv.respond_refresh;
+
+ /* Create client resolver */
+ status = pj_dns_resolver_create(mem, "resolver", 0, sess->stun_cfg->timer_heap,
+ sess->stun_cfg->ioqueue, &sess->resolver);
+ if (status != PJ_SUCCESS) {
+ destroy_session(sess);
+ return -40;
+
+ } else {
+ pj_str_t dns_srv = pj_str("127.0.0.1");
+ pj_uint16_t dns_srv_port = (pj_uint16_t) DNS_SERVER_PORT;
+ status = pj_dns_resolver_set_ns(sess->resolver, 1, &dns_srv, &dns_srv_port);
+
+ if (status != PJ_SUCCESS) {
+ destroy_session(sess);
+ return -50;
+ }
+ }
+
+ /* Init TURN credential */
+ pj_bzero(&cred, sizeof(cred));
+ cred.type = PJ_STUN_AUTH_CRED_STATIC;
+ cred.data.static_cred.realm = pj_str(SRV_DOMAIN);
+ cred.data.static_cred.username = pj_str(TURN_USERNAME);
+ cred.data.static_cred.data_type = PJ_STUN_PASSWD_PLAIN;
+ cred.data.static_cred.data = pj_str(TURN_PASSWD);
+
+ /* Init TURN allocate parameter */
+ pj_turn_alloc_param_default(&alloc_param);
+ alloc_param.ka_interval = KA_INTERVAL;
+
+ /* Start the client */
+ if (cfg->client.enable_dns_srv) {
+ /* Use DNS SRV to resolve server, may fallback to DNS A */
+ pj_str_t domain = pj_str(SRV_DOMAIN);
+ status = pj_turn_sock_alloc(sess->turn_sock, &domain, TURN_SERVER_PORT,
+ sess->resolver, &cred, &alloc_param);
+
+ } else {
+ /* Explicitly specify server address */
+ pj_str_t host = pj_str("127.0.0.1");
+ status = pj_turn_sock_alloc(sess->turn_sock, &host, TURN_SERVER_PORT,
+ NULL, &cred, &alloc_param);
+
+ }
+
+ if (status != PJ_SUCCESS) {
+ if (cfg->client.destroy_on_state >= PJ_TURN_STATE_READY) {
+ destroy_session(sess);
+ return -70;
+ }
+ }
+
+ *p_sess = sess;
+ return 0;
+}
+
+
+static void turn_on_rx_data(pj_turn_sock *turn_sock,
+ void *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *peer_addr,
+ unsigned addr_len)
+{
+ struct test_session *sess;
+
+ PJ_UNUSED_ARG(pkt);
+ PJ_UNUSED_ARG(pkt_len);
+ PJ_UNUSED_ARG(peer_addr);
+ PJ_UNUSED_ARG(addr_len);
+
+ sess = (struct test_session*) pj_turn_sock_get_user_data(turn_sock);
+ if (sess == NULL)
+ return;
+
+ sess->result.rx_data_cnt++;
+}
+
+
+static void turn_on_state(pj_turn_sock *turn_sock,
+ pj_turn_state_t old_state,
+ pj_turn_state_t new_state)
+{
+ struct test_session *sess;
+ unsigned i, mask;
+
+ PJ_UNUSED_ARG(old_state);
+
+ sess = (struct test_session*) pj_turn_sock_get_user_data(turn_sock);
+ if (sess == NULL)
+ return;
+
+ /* This state must not be called before */
+ pj_assert((sess->result.state_called & (1<<new_state)) == 0);
+
+ /* new_state must be greater than old_state */
+ pj_assert(new_state > old_state);
+
+ /* must not call any greater state before */
+ mask = 0;
+ for (i=new_state+1; i<31; ++i) mask |= (1 << i);
+
+ pj_assert((sess->result.state_called & mask) == 0);
+
+ sess->result.state_called |= (1 << new_state);
+
+ if (new_state >= sess->destroy_on_state && !sess->destroy_called) {
+ sess->destroy_called = PJ_TRUE;
+ pj_turn_sock_destroy(turn_sock);
+ }
+
+ if (new_state >= PJ_TURN_STATE_DESTROYING) {
+ pj_turn_sock_set_user_data(sess->turn_sock, NULL);
+ sess->turn_sock = NULL;
+ }
+}
+
+
+/////////////////////////////////////////////////////////////////////
+
+static int state_progression_test(pj_stun_config *stun_cfg)
+{
+ struct test_session_cfg test_cfg =
+ {
+ { /* Client cfg */
+ /* DNS SRV */ /* Destroy on state */
+ PJ_TRUE, 0xFFFF
+ },
+ { /* Server cfg */
+ 0xFFFFFFFF, /* flags */
+ PJ_TRUE, /* respond to allocate */
+ PJ_TRUE /* respond to refresh */
+ }
+ };
+ struct test_session *sess;
+ unsigned i;
+ int rc;
+
+ PJ_LOG(3,("", " state progression tests"));
+
+ for (i=0; i<=1; ++i) {
+ enum { TIMEOUT = 60 };
+ pjlib_state pjlib_state;
+ pj_turn_session_info info;
+ struct test_result result;
+ pj_time_val tstart;
+
+ PJ_LOG(3,("", " %s DNS SRV resolution",
+ (i==0? "without" : "with")));
+
+ capture_pjlib_state(stun_cfg, &pjlib_state);
+
+ test_cfg.client.enable_dns_srv = i;
+
+ rc = create_test_session(stun_cfg, &test_cfg, &sess);
+ if (rc != 0)
+ return rc;
+
+ pj_bzero(&info, sizeof(info));
+
+ /* Wait until state is READY */
+ pj_gettimeofday(&tstart);
+ while (sess->turn_sock) {
+ pj_time_val now;
+
+ poll_events(stun_cfg, 10, PJ_FALSE);
+ rc = pj_turn_sock_get_info(sess->turn_sock, &info);
+ if (rc!=PJ_SUCCESS)
+ break;
+
+ if (info.state >= PJ_TURN_STATE_READY)
+ break;
+
+ pj_gettimeofday(&now);
+ if (now.sec - tstart.sec > TIMEOUT) {
+ PJ_LOG(3,("", " timed-out"));
+ break;
+ }
+ }
+
+ if (info.state != PJ_TURN_STATE_READY) {
+ PJ_LOG(3,("", " error: state is not READY"));
+ destroy_session(sess);
+ return -130;
+ }
+
+ /* Deallocate */
+ pj_turn_sock_destroy(sess->turn_sock);
+
+ /* Wait for couple of seconds.
+ * We can't poll the session info since the session may have
+ * been destroyed
+ */
+ poll_events(stun_cfg, 2000, PJ_FALSE);
+ sess->turn_sock = NULL;
+ pj_memcpy(&result, &sess->result, sizeof(result));
+ destroy_session(sess);
+
+ /* Check the result */
+ if ((result.state_called & (1<<PJ_TURN_STATE_RESOLVING)) == 0) {
+ PJ_LOG(3,("", " error: PJ_TURN_STATE_RESOLVING is not called"));
+ return -140;
+ }
+
+ if ((result.state_called & (1<<PJ_TURN_STATE_RESOLVED)) == 0) {
+ PJ_LOG(3,("", " error: PJ_TURN_STATE_RESOLVED is not called"));
+ return -150;
+ }
+
+ if ((result.state_called & (1<<PJ_TURN_STATE_ALLOCATING)) == 0) {
+ PJ_LOG(3,("", " error: PJ_TURN_STATE_ALLOCATING is not called"));
+ return -155;
+ }
+
+ if ((result.state_called & (1<<PJ_TURN_STATE_READY)) == 0) {
+ PJ_LOG(3,("", " error: PJ_TURN_STATE_READY is not called"));
+ return -160;
+ }
+
+ if ((result.state_called & (1<<PJ_TURN_STATE_DEALLOCATING)) == 0) {
+ PJ_LOG(3,("", " error: PJ_TURN_STATE_DEALLOCATING is not called"));
+ return -170;
+ }
+
+ if ((result.state_called & (1<<PJ_TURN_STATE_DEALLOCATED)) == 0) {
+ PJ_LOG(3,("", " error: PJ_TURN_STATE_DEALLOCATED is not called"));
+ return -180;
+ }
+
+ if ((result.state_called & (1<<PJ_TURN_STATE_DESTROYING)) == 0) {
+ PJ_LOG(3,("", " error: PJ_TURN_STATE_DESTROYING is not called"));
+ return -190;
+ }
+
+ poll_events(stun_cfg, 500, PJ_FALSE);
+ rc = check_pjlib_state(stun_cfg, &pjlib_state);
+ if (rc != 0) {
+ PJ_LOG(3,("", " error: memory/timer-heap leak detected"));
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+
+/////////////////////////////////////////////////////////////////////
+
+static int destroy_test(pj_stun_config *stun_cfg,
+ pj_bool_t with_dns_srv,
+ pj_bool_t in_callback)
+{
+ struct test_session_cfg test_cfg =
+ {
+ { /* Client cfg */
+ /* DNS SRV */ /* Destroy on state */
+ PJ_TRUE, 0xFFFF
+ },
+ { /* Server cfg */
+ 0xFFFFFFFF, /* flags */
+ PJ_TRUE, /* respond to allocate */
+ PJ_TRUE /* respond to refresh */
+ }
+ };
+ struct test_session *sess;
+ int target_state;
+ int rc;
+
+ PJ_LOG(3,("", " destroy test %s %s",
+ (in_callback? "in callback" : ""),
+ (with_dns_srv? "with DNS srv" : "")
+ ));
+
+ test_cfg.client.enable_dns_srv = with_dns_srv;
+
+ for (target_state=PJ_TURN_STATE_RESOLVING; target_state<=PJ_TURN_STATE_READY; ++target_state) {
+ enum { TIMEOUT = 60 };
+ pjlib_state pjlib_state;
+ pj_turn_session_info info;
+ pj_time_val tstart;
+
+ capture_pjlib_state(stun_cfg, &pjlib_state);
+
+ PJ_LOG(3,("", " %s", pj_turn_state_name((pj_turn_state_t)target_state)));
+
+ if (in_callback)
+ test_cfg.client.destroy_on_state = target_state;
+
+ rc = create_test_session(stun_cfg, &test_cfg, &sess);
+ if (rc != 0)
+ return rc;
+
+ if (in_callback) {
+ pj_gettimeofday(&tstart);
+ rc = 0;
+ while (sess->turn_sock) {
+ pj_time_val now;
+
+ poll_events(stun_cfg, 100, PJ_FALSE);
+
+ pj_gettimeofday(&now);
+ if (now.sec - tstart.sec > TIMEOUT) {
+ rc = -7;
+ break;
+ }
+ }
+
+ } else {
+ pj_gettimeofday(&tstart);
+ rc = 0;
+ while (sess->turn_sock) {
+ pj_time_val now;
+
+ poll_events(stun_cfg, 1, PJ_FALSE);
+
+ pj_turn_sock_get_info(sess->turn_sock, &info);
+
+ if (info.state >= target_state) {
+ pj_turn_sock_destroy(sess->turn_sock);
+ break;
+ }
+
+ pj_gettimeofday(&now);
+ if (now.sec - tstart.sec > TIMEOUT) {
+ rc = -8;
+ break;
+ }
+ }
+ }
+
+
+ if (rc != 0) {
+ PJ_LOG(3,("", " error: timeout"));
+ return rc;
+ }
+
+ poll_events(stun_cfg, 1000, PJ_FALSE);
+ destroy_session(sess);
+
+ rc = check_pjlib_state(stun_cfg, &pjlib_state);
+ if (rc != 0) {
+ PJ_LOG(3,("", " error: memory/timer-heap leak detected"));
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+
+/////////////////////////////////////////////////////////////////////
+
+int turn_sock_test(void)
+{
+ pj_pool_t *pool;
+ pj_stun_config stun_cfg;
+ int i, rc = 0;
+
+ pool = pj_pool_create(mem, "turntest", 512, 512, NULL);
+ rc = create_stun_config(pool, &stun_cfg);
+ if (rc != PJ_SUCCESS) {
+ pj_pool_release(pool);
+ return -2;
+ }
+
+ rc = state_progression_test(&stun_cfg);
+ if (rc != 0)
+ goto on_return;
+
+ for (i=0; i<=1; ++i) {
+ int j;
+ for (j=0; j<=1; ++j) {
+ rc = destroy_test(&stun_cfg, i, j);
+ if (rc != 0)
+ goto on_return;
+ }
+ }
+
+on_return:
+ destroy_stun_config(&stun_cfg);
+ pj_pool_release(pool);
+ return rc;
+}
+
diff --git a/pjnath/src/pjnath/errno.c b/pjnath/src/pjnath/errno.c
new file mode 100644
index 0000000..389e9ad
--- /dev/null
+++ b/pjnath/src/pjnath/errno.c
@@ -0,0 +1,216 @@
+/* $Id: errno.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/errno.h>
+#include <pjnath/stun_msg.h>
+#include <pj/assert.h>
+#include <pj/log.h>
+#include <pj/string.h>
+
+
+
+/* PJNATH's own error codes/messages
+ * MUST KEEP THIS ARRAY SORTED!!
+ * Message must be limited to 64 chars!
+ */
+#if defined(PJ_HAS_ERROR_STRING) && PJ_HAS_ERROR_STRING!=0
+static const struct
+{
+ int code;
+ const char *msg;
+} err_str[] =
+{
+ /* STUN related error codes */
+ PJ_BUILD_ERR( PJNATH_EINSTUNMSG, "Invalid STUN message"),
+ PJ_BUILD_ERR( PJNATH_EINSTUNMSGLEN, "Invalid STUN message length"),
+ PJ_BUILD_ERR( PJNATH_EINSTUNMSGTYPE, "Invalid or unexpected STUN message type"),
+ PJ_BUILD_ERR( PJNATH_ESTUNTIMEDOUT, "STUN transaction has timed out"),
+
+ PJ_BUILD_ERR( PJNATH_ESTUNTOOMANYATTR, "Too many STUN attributes"),
+ PJ_BUILD_ERR( PJNATH_ESTUNINATTRLEN, "Invalid STUN attribute length"),
+ PJ_BUILD_ERR( PJNATH_ESTUNDUPATTR, "Found duplicate STUN attribute"),
+
+ PJ_BUILD_ERR( PJNATH_ESTUNFINGERPRINT, "STUN FINGERPRINT verification failed"),
+ PJ_BUILD_ERR( PJNATH_ESTUNMSGINTPOS, "Invalid STUN attribute after MESSAGE-INTEGRITY"),
+ PJ_BUILD_ERR( PJNATH_ESTUNFINGERPOS, "Invalid STUN attribute after FINGERPRINT"),
+
+ PJ_BUILD_ERR( PJNATH_ESTUNNOMAPPEDADDR, "STUN (XOR-)MAPPED-ADDRESS attribute not found"),
+ PJ_BUILD_ERR( PJNATH_ESTUNIPV6NOTSUPP, "STUN IPv6 attribute not supported"),
+ PJ_BUILD_ERR( PJNATH_EINVAF, "Invalid STUN address family value"),
+ PJ_BUILD_ERR( PJNATH_ESTUNINSERVER, "Invalid STUN server or server not configured"),
+
+ PJ_BUILD_ERR( PJNATH_ESTUNDESTROYED, "STUN object has been destoyed"),
+
+ /* ICE related errors */
+ PJ_BUILD_ERR( PJNATH_ENOICE, "ICE session not available"),
+ PJ_BUILD_ERR( PJNATH_EICEINPROGRESS, "ICE check is in progress"),
+ PJ_BUILD_ERR( PJNATH_EICEFAILED, "All ICE checklists failed"),
+ PJ_BUILD_ERR( PJNATH_EICEMISMATCH, "Default target doesn't match any ICE candidates"),
+ PJ_BUILD_ERR( PJNATH_EICEINCOMPID, "Invalid ICE component ID"),
+ PJ_BUILD_ERR( PJNATH_EICEINCANDID, "Invalid ICE candidate ID"),
+ PJ_BUILD_ERR( PJNATH_EICEINSRCADDR, "Source address mismatch"),
+ PJ_BUILD_ERR( PJNATH_EICEMISSINGSDP, "Missing ICE SDP attribute"),
+ PJ_BUILD_ERR( PJNATH_EICEINCANDSDP, "Invalid SDP \"candidate\" attribute"),
+ PJ_BUILD_ERR( PJNATH_EICENOHOSTCAND, "No host candidate associated with srflx"),
+ PJ_BUILD_ERR( PJNATH_EICENOMTIMEOUT, "Controlled agent timed out waiting for nomination"),
+
+ /* TURN related errors */
+ PJ_BUILD_ERR( PJNATH_ETURNINTP, "Invalid/unsupported transport"),
+
+};
+#endif /* PJ_HAS_ERROR_STRING */
+
+
+/*
+ * pjnath_strerror()
+ */
+static pj_str_t pjnath_strerror(pj_status_t statcode,
+ char *buf, pj_size_t bufsize )
+{
+ pj_str_t errstr;
+
+#if defined(PJ_HAS_ERROR_STRING) && (PJ_HAS_ERROR_STRING != 0)
+
+ if (statcode >= PJNATH_ERRNO_START &&
+ statcode < PJNATH_ERRNO_START + PJ_ERRNO_SPACE_SIZE)
+ {
+ /* Find the error in the table.
+ * Use binary search!
+ */
+ int first = 0;
+ int n = PJ_ARRAY_SIZE(err_str);
+
+ while (n > 0) {
+ int half = n/2;
+ int mid = first + half;
+
+ if (err_str[mid].code < statcode) {
+ first = mid+1;
+ n -= (half+1);
+ } else if (err_str[mid].code > statcode) {
+ n = half;
+ } else {
+ first = mid;
+ break;
+ }
+ }
+
+
+ if (PJ_ARRAY_SIZE(err_str) && err_str[first].code == statcode) {
+ pj_str_t msg;
+
+ msg.ptr = (char*)err_str[first].msg;
+ msg.slen = pj_ansi_strlen(err_str[first].msg);
+
+ errstr.ptr = buf;
+ pj_strncpy_with_null(&errstr, &msg, bufsize);
+ return errstr;
+
+ }
+ }
+
+#endif /* PJ_HAS_ERROR_STRING */
+
+
+ /* Error not found. */
+ errstr.ptr = buf;
+ errstr.slen = pj_ansi_snprintf(buf, bufsize,
+ "Unknown pjnath error %d",
+ statcode);
+ if (errstr.slen < 0) errstr.slen = 0;
+ else if (errstr.slen > (int)bufsize) errstr.slen = bufsize;
+
+ return errstr;
+}
+
+
+static pj_str_t pjnath_strerror2(pj_status_t statcode,
+ char *buf, pj_size_t bufsize )
+{
+ int stun_code = statcode - PJ_STATUS_FROM_STUN_CODE(0);
+ const pj_str_t cmsg = pj_stun_get_err_reason(stun_code);
+ pj_str_t errstr;
+
+ buf[bufsize-1] = '\0';
+
+ if (cmsg.slen == 0) {
+ /* Not found */
+ errstr.ptr = buf;
+ errstr.slen = pj_ansi_snprintf(buf, bufsize,
+ "Unknown STUN err-code %d",
+ stun_code);
+ } else {
+ errstr.ptr = buf;
+ pj_strncpy(&errstr, &cmsg, bufsize);
+ if (errstr.slen < (int)bufsize)
+ buf[errstr.slen] = '\0';
+ else
+ buf[bufsize-1] = '\0';
+ }
+
+ if (errstr.slen < 0) errstr.slen = 0;
+ else if (errstr.slen > (int)bufsize) errstr.slen = bufsize;
+
+ return errstr;
+}
+
+
+PJ_DEF(pj_status_t) pjnath_init(void)
+{
+ pj_status_t status;
+
+ status = pj_register_strerror(PJNATH_ERRNO_START, 299,
+ &pjnath_strerror);
+ pj_assert(status == PJ_SUCCESS);
+
+ status = pj_register_strerror(PJ_STATUS_FROM_STUN_CODE(300),
+ 699 - 300,
+ &pjnath_strerror2);
+ pj_assert(status == PJ_SUCCESS);
+
+ return PJ_SUCCESS;
+}
+
+
+#if PJNATH_ERROR_LEVEL <= PJ_LOG_MAX_LEVEL
+
+PJ_DEF(void) pjnath_perror(const char *sender, const char *title,
+ pj_status_t status)
+{
+ char errmsg[PJ_ERR_MSG_SIZE];
+
+ pj_strerror(status, errmsg, sizeof(errmsg));
+
+#if PJNATH_ERROR_LEVEL==1
+ PJ_LOG(1,(sender, "%s: %s", title, errmsg));
+#elif PJNATH_ERROR_LEVEL==2
+ PJ_LOG(2,(sender, "%s: %s", title, errmsg));
+#elif PJNATH_ERROR_LEVEL==3
+ PJ_LOG(3,(sender, "%s: %s", title, errmsg));
+#elif PJNATH_ERROR_LEVEL==4
+ PJ_LOG(4,(sender, "%s: %s", title, errmsg));
+#elif PJNATH_ERROR_LEVEL==5
+ PJ_LOG(5,(sender, "%s: %s", title, errmsg));
+#else
+# error Invalid PJNATH_ERROR_LEVEL value
+#endif
+}
+
+#endif /* PJNATH_ERROR_LEVEL <= PJ_LOG_MAX_LEVEL */
+
diff --git a/pjnath/src/pjnath/ice_session.c b/pjnath/src/pjnath/ice_session.c
new file mode 100644
index 0000000..05f39bc
--- /dev/null
+++ b/pjnath/src/pjnath/ice_session.c
@@ -0,0 +1,2968 @@
+/* $Id: ice_session.c 3999 2012-03-30 07:10:13Z bennylp $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/ice_session.h>
+#include <pj/addr_resolv.h>
+#include <pj/array.h>
+#include <pj/assert.h>
+#include <pj/guid.h>
+#include <pj/hash.h>
+#include <pj/log.h>
+#include <pj/os.h>
+#include <pj/pool.h>
+#include <pj/rand.h>
+#include <pj/string.h>
+
+/* String names for candidate types */
+static const char *cand_type_names[] =
+{
+ "host",
+ "srflx",
+ "prflx",
+ "relay"
+
+};
+
+/* String names for pj_ice_sess_check_state */
+#if PJ_LOG_MAX_LEVEL >= 4
+static const char *check_state_name[] =
+{
+ "Frozen",
+ "Waiting",
+ "In Progress",
+ "Succeeded",
+ "Failed"
+};
+
+static const char *clist_state_name[] =
+{
+ "Idle",
+ "Running",
+ "Completed"
+};
+#endif /* PJ_LOG_MAX_LEVEL >= 4 */
+
+static const char *role_names[] =
+{
+ "Unknown",
+ "Controlled",
+ "Controlling"
+};
+
+enum timer_type
+{
+ TIMER_NONE, /**< Timer not active */
+ TIMER_COMPLETION_CALLBACK, /**< Call on_ice_complete() callback */
+ TIMER_CONTROLLED_WAIT_NOM, /**< Controlled agent is waiting for
+ controlling agent to send connectivity
+ check with nominated flag after it has
+ valid check for every components. */
+ TIMER_START_NOMINATED_CHECK,/**< Controlling agent start connectivity
+ checks with USE-CANDIDATE flag. */
+ TIMER_KEEP_ALIVE /**< ICE keep-alive timer. */
+
+};
+
+/* Candidate type preference */
+static pj_uint8_t cand_type_prefs[4] =
+{
+#if PJ_ICE_CAND_TYPE_PREF_BITS < 8
+ /* Keep it to 2 bits */
+ 3, /**< PJ_ICE_HOST_PREF */
+ 1, /**< PJ_ICE_SRFLX_PREF. */
+ 2, /**< PJ_ICE_PRFLX_PREF */
+ 0 /**< PJ_ICE_RELAYED_PREF */
+#else
+ /* Default ICE session preferences, according to draft-ice */
+ 126, /**< PJ_ICE_HOST_PREF */
+ 100, /**< PJ_ICE_SRFLX_PREF. */
+ 110, /**< PJ_ICE_PRFLX_PREF */
+ 0 /**< PJ_ICE_RELAYED_PREF */
+#endif
+};
+
+#define CHECK_NAME_LEN 128
+#define LOG4(expr) PJ_LOG(4,expr)
+#define LOG5(expr) PJ_LOG(4,expr)
+#define GET_LCAND_ID(cand) (cand - ice->lcand)
+#define GET_CHECK_ID(cl, chk) (chk - (cl)->checks)
+
+
+/* The data that will be attached to the STUN session on each
+ * component.
+ */
+typedef struct stun_data
+{
+ pj_ice_sess *ice;
+ unsigned comp_id;
+ pj_ice_sess_comp *comp;
+} stun_data;
+
+
+/* The data that will be attached to the timer to perform
+ * periodic check.
+ */
+typedef struct timer_data
+{
+ pj_ice_sess *ice;
+ pj_ice_sess_checklist *clist;
+} timer_data;
+
+
+/* This is the data that will be attached as token to outgoing
+ * STUN messages.
+ */
+
+
+/* Forward declarations */
+static void on_timer(pj_timer_heap_t *th, pj_timer_entry *te);
+static void on_ice_complete(pj_ice_sess *ice, pj_status_t status);
+static void ice_keep_alive(pj_ice_sess *ice, pj_bool_t send_now);
+static void destroy_ice(pj_ice_sess *ice,
+ pj_status_t reason);
+static pj_status_t start_periodic_check(pj_timer_heap_t *th,
+ pj_timer_entry *te);
+static void start_nominated_check(pj_ice_sess *ice);
+static void periodic_timer(pj_timer_heap_t *th,
+ pj_timer_entry *te);
+static void handle_incoming_check(pj_ice_sess *ice,
+ const pj_ice_rx_check *rcheck);
+
+/* These are the callbacks registered to the STUN sessions */
+static pj_status_t on_stun_send_msg(pj_stun_session *sess,
+ void *token,
+ const void *pkt,
+ pj_size_t pkt_size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len);
+static pj_status_t on_stun_rx_request(pj_stun_session *sess,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_stun_rx_data *rdata,
+ void *token,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len);
+static void on_stun_request_complete(pj_stun_session *stun_sess,
+ pj_status_t status,
+ void *token,
+ pj_stun_tx_data *tdata,
+ const pj_stun_msg *response,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len);
+static pj_status_t on_stun_rx_indication(pj_stun_session *sess,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_stun_msg *msg,
+ void *token,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len);
+
+/* These are the callbacks for performing STUN authentication */
+static pj_status_t stun_auth_get_auth(void *user_data,
+ pj_pool_t *pool,
+ pj_str_t *realm,
+ pj_str_t *nonce);
+static pj_status_t stun_auth_get_cred(const pj_stun_msg *msg,
+ void *user_data,
+ pj_pool_t *pool,
+ pj_str_t *realm,
+ pj_str_t *username,
+ pj_str_t *nonce,
+ pj_stun_passwd_type *data_type,
+ pj_str_t *data);
+static pj_status_t stun_auth_get_password(const pj_stun_msg *msg,
+ void *user_data,
+ const pj_str_t *realm,
+ const pj_str_t *username,
+ pj_pool_t *pool,
+ pj_stun_passwd_type *data_type,
+ pj_str_t *data);
+
+
+PJ_DEF(const char*) pj_ice_get_cand_type_name(pj_ice_cand_type type)
+{
+ PJ_ASSERT_RETURN(type <= PJ_ICE_CAND_TYPE_RELAYED, "???");
+ return cand_type_names[type];
+}
+
+
+PJ_DEF(const char*) pj_ice_sess_role_name(pj_ice_sess_role role)
+{
+ switch (role) {
+ case PJ_ICE_SESS_ROLE_UNKNOWN:
+ return "Unknown";
+ case PJ_ICE_SESS_ROLE_CONTROLLED:
+ return "Controlled";
+ case PJ_ICE_SESS_ROLE_CONTROLLING:
+ return "Controlling";
+ default:
+ return "??";
+ }
+}
+
+
+/* Get the prefix for the foundation */
+static int get_type_prefix(pj_ice_cand_type type)
+{
+ switch (type) {
+ case PJ_ICE_CAND_TYPE_HOST: return 'H';
+ case PJ_ICE_CAND_TYPE_SRFLX: return 'S';
+ case PJ_ICE_CAND_TYPE_PRFLX: return 'P';
+ case PJ_ICE_CAND_TYPE_RELAYED: return 'R';
+ default:
+ pj_assert(!"Invalid type");
+ return 'U';
+ }
+}
+
+/* Calculate foundation:
+ * Two candidates have the same foundation when they are "similar" - of
+ * the same type and obtained from the same host candidate and STUN
+ * server using the same protocol. Otherwise, their foundation is
+ * different.
+ */
+PJ_DEF(void) pj_ice_calc_foundation(pj_pool_t *pool,
+ pj_str_t *foundation,
+ pj_ice_cand_type type,
+ const pj_sockaddr *base_addr)
+{
+#if PJNATH_ICE_PRIO_STD
+ char buf[64];
+ pj_uint32_t val;
+
+ if (base_addr->addr.sa_family == pj_AF_INET()) {
+ val = pj_ntohl(base_addr->ipv4.sin_addr.s_addr);
+ } else {
+ val = pj_hash_calc(0, pj_sockaddr_get_addr(base_addr),
+ pj_sockaddr_get_addr_len(base_addr));
+ }
+ pj_ansi_snprintf(buf, sizeof(buf), "%c%x",
+ get_type_prefix(type), val);
+ pj_strdup2(pool, foundation, buf);
+#else
+ /* Much shorter version, valid for candidates added by
+ * pj_ice_strans.
+ */
+ foundation->ptr = (char*) pj_pool_alloc(pool, 1);
+ *foundation->ptr = (char)get_type_prefix(type);
+ foundation->slen = 1;
+
+ PJ_UNUSED_ARG(base_addr);
+#endif
+}
+
+
+/* Init component */
+static pj_status_t init_comp(pj_ice_sess *ice,
+ unsigned comp_id,
+ pj_ice_sess_comp *comp)
+{
+ pj_stun_session_cb sess_cb;
+ pj_stun_auth_cred auth_cred;
+ stun_data *sd;
+ pj_status_t status;
+
+ /* Init STUN callbacks */
+ pj_bzero(&sess_cb, sizeof(sess_cb));
+ sess_cb.on_request_complete = &on_stun_request_complete;
+ sess_cb.on_rx_indication = &on_stun_rx_indication;
+ sess_cb.on_rx_request = &on_stun_rx_request;
+ sess_cb.on_send_msg = &on_stun_send_msg;
+
+ /* Create STUN session for this candidate */
+ status = pj_stun_session_create(&ice->stun_cfg, NULL,
+ &sess_cb, PJ_TRUE,
+ &comp->stun_sess);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ /* Associate data with this STUN session */
+ sd = PJ_POOL_ZALLOC_T(ice->pool, struct stun_data);
+ sd->ice = ice;
+ sd->comp_id = comp_id;
+ sd->comp = comp;
+ pj_stun_session_set_user_data(comp->stun_sess, sd);
+
+ /* Init STUN authentication credential */
+ pj_bzero(&auth_cred, sizeof(auth_cred));
+ auth_cred.type = PJ_STUN_AUTH_CRED_DYNAMIC;
+ auth_cred.data.dyn_cred.get_auth = &stun_auth_get_auth;
+ auth_cred.data.dyn_cred.get_cred = &stun_auth_get_cred;
+ auth_cred.data.dyn_cred.get_password = &stun_auth_get_password;
+ auth_cred.data.dyn_cred.user_data = comp->stun_sess;
+ pj_stun_session_set_credential(comp->stun_sess, PJ_STUN_AUTH_SHORT_TERM,
+ &auth_cred);
+
+ return PJ_SUCCESS;
+}
+
+
+/* Init options with default values */
+PJ_DEF(void) pj_ice_sess_options_default(pj_ice_sess_options *opt)
+{
+ opt->aggressive = PJ_TRUE;
+ opt->nominated_check_delay = PJ_ICE_NOMINATED_CHECK_DELAY;
+ opt->controlled_agent_want_nom_timeout =
+ ICE_CONTROLLED_AGENT_WAIT_NOMINATION_TIMEOUT;
+}
+
+/*
+ * Create ICE session.
+ */
+PJ_DEF(pj_status_t) pj_ice_sess_create(pj_stun_config *stun_cfg,
+ const char *name,
+ pj_ice_sess_role role,
+ unsigned comp_cnt,
+ const pj_ice_sess_cb *cb,
+ const pj_str_t *local_ufrag,
+ const pj_str_t *local_passwd,
+ pj_ice_sess **p_ice)
+{
+ pj_pool_t *pool;
+ pj_ice_sess *ice;
+ unsigned i;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(stun_cfg && cb && p_ice, PJ_EINVAL);
+
+ if (name == NULL)
+ name = "icess%p";
+
+ pool = pj_pool_create(stun_cfg->pf, name, PJNATH_POOL_LEN_ICE_SESS,
+ PJNATH_POOL_INC_ICE_SESS, NULL);
+ ice = PJ_POOL_ZALLOC_T(pool, pj_ice_sess);
+ ice->pool = pool;
+ ice->role = role;
+ ice->tie_breaker.u32.hi = pj_rand();
+ ice->tie_breaker.u32.lo = pj_rand();
+ ice->prefs = cand_type_prefs;
+ pj_ice_sess_options_default(&ice->opt);
+
+ pj_timer_entry_init(&ice->timer, TIMER_NONE, (void*)ice, &on_timer);
+
+ pj_ansi_snprintf(ice->obj_name, sizeof(ice->obj_name),
+ name, ice);
+
+ status = pj_mutex_create_recursive(pool, ice->obj_name,
+ &ice->mutex);
+ if (status != PJ_SUCCESS) {
+ destroy_ice(ice, status);
+ return status;
+ }
+
+ pj_memcpy(&ice->cb, cb, sizeof(*cb));
+ pj_memcpy(&ice->stun_cfg, stun_cfg, sizeof(*stun_cfg));
+
+ ice->comp_cnt = comp_cnt;
+ for (i=0; i<comp_cnt; ++i) {
+ pj_ice_sess_comp *comp;
+ comp = &ice->comp[i];
+ comp->valid_check = NULL;
+ comp->nominated_check = NULL;
+
+ status = init_comp(ice, i+1, comp);
+ if (status != PJ_SUCCESS) {
+ destroy_ice(ice, status);
+ return status;
+ }
+ }
+
+ /* Initialize transport datas */
+ for (i=0; i<PJ_ARRAY_SIZE(ice->tp_data); ++i) {
+ ice->tp_data[i].transport_id = i;
+ ice->tp_data[i].has_req_data = PJ_FALSE;
+ }
+
+ if (local_ufrag == NULL) {
+ ice->rx_ufrag.ptr = (char*) pj_pool_alloc(ice->pool, PJ_ICE_UFRAG_LEN);
+ pj_create_random_string(ice->rx_ufrag.ptr, PJ_ICE_UFRAG_LEN);
+ ice->rx_ufrag.slen = PJ_ICE_UFRAG_LEN;
+ } else {
+ pj_strdup(ice->pool, &ice->rx_ufrag, local_ufrag);
+ }
+
+ if (local_passwd == NULL) {
+ ice->rx_pass.ptr = (char*) pj_pool_alloc(ice->pool, PJ_ICE_UFRAG_LEN);
+ pj_create_random_string(ice->rx_pass.ptr, PJ_ICE_UFRAG_LEN);
+ ice->rx_pass.slen = PJ_ICE_UFRAG_LEN;
+ } else {
+ pj_strdup(ice->pool, &ice->rx_pass, local_passwd);
+ }
+
+ pj_list_init(&ice->early_check);
+
+ /* Done */
+ *p_ice = ice;
+
+ LOG4((ice->obj_name,
+ "ICE session created, comp_cnt=%d, role is %s agent",
+ comp_cnt, role_names[ice->role]));
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Get the value of various options of the ICE session.
+ */
+PJ_DEF(pj_status_t) pj_ice_sess_get_options(pj_ice_sess *ice,
+ pj_ice_sess_options *opt)
+{
+ PJ_ASSERT_RETURN(ice, PJ_EINVAL);
+ pj_memcpy(opt, &ice->opt, sizeof(*opt));
+ return PJ_SUCCESS;
+}
+
+/*
+ * Specify various options for this ICE session.
+ */
+PJ_DEF(pj_status_t) pj_ice_sess_set_options(pj_ice_sess *ice,
+ const pj_ice_sess_options *opt)
+{
+ PJ_ASSERT_RETURN(ice && opt, PJ_EINVAL);
+ pj_memcpy(&ice->opt, opt, sizeof(*opt));
+ LOG5((ice->obj_name, "ICE nomination type set to %s",
+ (ice->opt.aggressive ? "aggressive" : "regular")));
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Destroy
+ */
+static void destroy_ice(pj_ice_sess *ice,
+ pj_status_t reason)
+{
+ unsigned i;
+
+ if (reason == PJ_SUCCESS) {
+ LOG4((ice->obj_name, "Destroying ICE session"));
+ }
+
+ /* Let other callbacks finish */
+ if (ice->mutex) {
+ pj_mutex_lock(ice->mutex);
+ pj_mutex_unlock(ice->mutex);
+ }
+
+ if (ice->timer.id) {
+ pj_timer_heap_cancel(ice->stun_cfg.timer_heap,
+ &ice->timer);
+ ice->timer.id = PJ_FALSE;
+ }
+
+ for (i=0; i<ice->comp_cnt; ++i) {
+ if (ice->comp[i].stun_sess) {
+ pj_stun_session_destroy(ice->comp[i].stun_sess);
+ ice->comp[i].stun_sess = NULL;
+ }
+ }
+
+ if (ice->clist.timer.id) {
+ pj_timer_heap_cancel(ice->stun_cfg.timer_heap, &ice->clist.timer);
+ ice->clist.timer.id = PJ_FALSE;
+ }
+
+ if (ice->mutex) {
+ pj_mutex_destroy(ice->mutex);
+ ice->mutex = NULL;
+ }
+
+ if (ice->pool) {
+ pj_pool_t *pool = ice->pool;
+ ice->pool = NULL;
+ pj_pool_release(pool);
+ }
+}
+
+
+/*
+ * Destroy
+ */
+PJ_DEF(pj_status_t) pj_ice_sess_destroy(pj_ice_sess *ice)
+{
+ PJ_ASSERT_RETURN(ice, PJ_EINVAL);
+ destroy_ice(ice, PJ_SUCCESS);
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Change session role.
+ */
+PJ_DEF(pj_status_t) pj_ice_sess_change_role(pj_ice_sess *ice,
+ pj_ice_sess_role new_role)
+{
+ PJ_ASSERT_RETURN(ice, PJ_EINVAL);
+
+ if (new_role != ice->role) {
+ ice->role = new_role;
+ LOG4((ice->obj_name, "Role changed to %s", role_names[new_role]));
+ }
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Change type preference
+ */
+PJ_DEF(pj_status_t) pj_ice_sess_set_prefs(pj_ice_sess *ice,
+ const pj_uint8_t prefs[4])
+{
+ unsigned i;
+ PJ_ASSERT_RETURN(ice && prefs, PJ_EINVAL);
+ ice->prefs = (pj_uint8_t*) pj_pool_calloc(ice->pool, PJ_ARRAY_SIZE(prefs),
+ sizeof(pj_uint8_t));
+ for (i=0; i<4; ++i) {
+#if PJ_ICE_CAND_TYPE_PREF_BITS < 8
+ pj_assert(prefs[i] < (2 << PJ_ICE_CAND_TYPE_PREF_BITS));
+#endif
+ ice->prefs[i] = prefs[i];
+ }
+ return PJ_SUCCESS;
+}
+
+
+/* Find component by ID */
+static pj_ice_sess_comp *find_comp(const pj_ice_sess *ice, unsigned comp_id)
+{
+ pj_assert(comp_id > 0 && comp_id <= ice->comp_cnt);
+ return (pj_ice_sess_comp*) &ice->comp[comp_id-1];
+}
+
+
+/* Callback by STUN authentication when it needs to send 401 */
+static pj_status_t stun_auth_get_auth(void *user_data,
+ pj_pool_t *pool,
+ pj_str_t *realm,
+ pj_str_t *nonce)
+{
+ PJ_UNUSED_ARG(user_data);
+ PJ_UNUSED_ARG(pool);
+
+ realm->slen = 0;
+ nonce->slen = 0;
+
+ return PJ_SUCCESS;
+}
+
+
+/* Get credential to be sent with outgoing message */
+static pj_status_t stun_auth_get_cred(const pj_stun_msg *msg,
+ void *user_data,
+ pj_pool_t *pool,
+ pj_str_t *realm,
+ pj_str_t *username,
+ pj_str_t *nonce,
+ pj_stun_passwd_type *data_type,
+ pj_str_t *data)
+{
+ pj_stun_session *sess = (pj_stun_session *)user_data;
+ stun_data *sd = (stun_data*) pj_stun_session_get_user_data(sess);
+ pj_ice_sess *ice = sd->ice;
+
+ PJ_UNUSED_ARG(pool);
+ realm->slen = nonce->slen = 0;
+
+ if (PJ_STUN_IS_RESPONSE(msg->hdr.type)) {
+ /* Outgoing responses need to have the same credential as
+ * incoming requests.
+ */
+ *username = ice->rx_uname;
+ *data_type = PJ_STUN_PASSWD_PLAIN;
+ *data = ice->rx_pass;
+ }
+ else {
+ *username = ice->tx_uname;
+ *data_type = PJ_STUN_PASSWD_PLAIN;
+ *data = ice->tx_pass;
+ }
+
+ return PJ_SUCCESS;
+}
+
+/* Get password to be used to authenticate incoming message */
+static pj_status_t stun_auth_get_password(const pj_stun_msg *msg,
+ void *user_data,
+ const pj_str_t *realm,
+ const pj_str_t *username,
+ pj_pool_t *pool,
+ pj_stun_passwd_type *data_type,
+ pj_str_t *data)
+{
+ pj_stun_session *sess = (pj_stun_session *)user_data;
+ stun_data *sd = (stun_data*) pj_stun_session_get_user_data(sess);
+ pj_ice_sess *ice = sd->ice;
+
+ PJ_UNUSED_ARG(realm);
+ PJ_UNUSED_ARG(pool);
+
+ if (PJ_STUN_IS_SUCCESS_RESPONSE(msg->hdr.type) ||
+ PJ_STUN_IS_ERROR_RESPONSE(msg->hdr.type))
+ {
+ /* Incoming response is authenticated with TX credential */
+ /* Verify username */
+ if (pj_strcmp(username, &ice->tx_uname) != 0)
+ return PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_UNAUTHORIZED);
+ *data_type = PJ_STUN_PASSWD_PLAIN;
+ *data = ice->tx_pass;
+
+ } else {
+ /* Incoming request is authenticated with RX credential */
+ /* The agent MUST accept a credential if the username consists
+ * of two values separated by a colon, where the first value is
+ * equal to the username fragment generated by the agent in an offer
+ * or answer for a session in-progress, and the MESSAGE-INTEGRITY
+ * is the output of a hash of the password and the STUN packet's
+ * contents.
+ */
+ const char *pos;
+ pj_str_t ufrag;
+
+ pos = (const char*)pj_memchr(username->ptr, ':', username->slen);
+ if (pos == NULL)
+ return PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_UNAUTHORIZED);
+
+ ufrag.ptr = (char*)username->ptr;
+ ufrag.slen = (pos - username->ptr);
+
+ if (pj_strcmp(&ufrag, &ice->rx_ufrag) != 0)
+ return PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_UNAUTHORIZED);
+
+ *data_type = PJ_STUN_PASSWD_PLAIN;
+ *data = ice->rx_pass;
+
+ }
+
+ return PJ_SUCCESS;
+}
+
+
+static pj_uint32_t CALC_CAND_PRIO(pj_ice_sess *ice,
+ pj_ice_cand_type type,
+ pj_uint32_t local_pref,
+ pj_uint32_t comp_id)
+{
+#if PJNATH_ICE_PRIO_STD
+ return ((ice->prefs[type] & 0xFF) << 24) +
+ ((local_pref & 0xFFFF) << 8) +
+ (((256 - comp_id) & 0xFF) << 0);
+#else
+ enum {
+ type_mask = ((2 << PJ_ICE_CAND_TYPE_PREF_BITS) - 1),
+ local_mask = ((2 << PJ_ICE_LOCAL_PREF_BITS) - 1),
+ comp_mask = ((2 << PJ_ICE_COMP_BITS) - 1),
+
+ comp_shift = 0,
+ local_shift = (PJ_ICE_COMP_BITS),
+ type_shift = (comp_shift + local_shift),
+
+ max_comp = (2<<PJ_ICE_COMP_BITS),
+ };
+
+ return ((ice->prefs[type] & type_mask) << type_shift) +
+ ((local_pref & local_mask) << local_shift) +
+ (((max_comp - comp_id) & comp_mask) << comp_shift);
+#endif
+}
+
+
+/*
+ * Add ICE candidate
+ */
+PJ_DEF(pj_status_t) pj_ice_sess_add_cand(pj_ice_sess *ice,
+ unsigned comp_id,
+ unsigned transport_id,
+ pj_ice_cand_type type,
+ pj_uint16_t local_pref,
+ const pj_str_t *foundation,
+ const pj_sockaddr_t *addr,
+ const pj_sockaddr_t *base_addr,
+ const pj_sockaddr_t *rel_addr,
+ int addr_len,
+ unsigned *p_cand_id)
+{
+ pj_ice_sess_cand *lcand;
+ pj_status_t status = PJ_SUCCESS;
+
+ PJ_ASSERT_RETURN(ice && comp_id &&
+ foundation && addr && base_addr && addr_len,
+ PJ_EINVAL);
+ PJ_ASSERT_RETURN(comp_id <= ice->comp_cnt, PJ_EINVAL);
+
+ pj_mutex_lock(ice->mutex);
+
+ if (ice->lcand_cnt >= PJ_ARRAY_SIZE(ice->lcand)) {
+ status = PJ_ETOOMANY;
+ goto on_error;
+ }
+
+ lcand = &ice->lcand[ice->lcand_cnt];
+ lcand->comp_id = (pj_uint8_t)comp_id;
+ lcand->transport_id = (pj_uint8_t)transport_id;
+ lcand->type = type;
+ pj_strdup(ice->pool, &lcand->foundation, foundation);
+ lcand->prio = CALC_CAND_PRIO(ice, type, local_pref, lcand->comp_id);
+ pj_memcpy(&lcand->addr, addr, addr_len);
+ pj_memcpy(&lcand->base_addr, base_addr, addr_len);
+ if (rel_addr == NULL)
+ rel_addr = base_addr;
+ pj_memcpy(&lcand->rel_addr, rel_addr, addr_len);
+
+ pj_ansi_strcpy(ice->tmp.txt, pj_inet_ntoa(lcand->addr.ipv4.sin_addr));
+ LOG4((ice->obj_name,
+ "Candidate %d added: comp_id=%d, type=%s, foundation=%.*s, "
+ "addr=%s:%d, base=%s:%d, prio=0x%x (%u)",
+ ice->lcand_cnt,
+ lcand->comp_id,
+ cand_type_names[lcand->type],
+ (int)lcand->foundation.slen,
+ lcand->foundation.ptr,
+ ice->tmp.txt,
+ (int)pj_ntohs(lcand->addr.ipv4.sin_port),
+ pj_inet_ntoa(lcand->base_addr.ipv4.sin_addr),
+ (int)pj_htons(lcand->base_addr.ipv4.sin_port),
+ lcand->prio, lcand->prio));
+
+ if (p_cand_id)
+ *p_cand_id = ice->lcand_cnt;
+
+ ++ice->lcand_cnt;
+
+on_error:
+ pj_mutex_unlock(ice->mutex);
+ return status;
+}
+
+
+/* Find default candidate ID for the component */
+PJ_DEF(pj_status_t) pj_ice_sess_find_default_cand(pj_ice_sess *ice,
+ unsigned comp_id,
+ int *cand_id)
+{
+ unsigned i;
+
+ PJ_ASSERT_RETURN(ice && comp_id && cand_id, PJ_EINVAL);
+ PJ_ASSERT_RETURN(comp_id <= ice->comp_cnt, PJ_EINVAL);
+
+ *cand_id = -1;
+
+ pj_mutex_lock(ice->mutex);
+
+ /* First find in valid list if we have nominated pair */
+ for (i=0; i<ice->valid_list.count; ++i) {
+ pj_ice_sess_check *check = &ice->valid_list.checks[i];
+
+ if (check->lcand->comp_id == comp_id) {
+ *cand_id = GET_LCAND_ID(check->lcand);
+ pj_mutex_unlock(ice->mutex);
+ return PJ_SUCCESS;
+ }
+ }
+
+ /* If there's no nominated pair, find relayed candidate */
+ for (i=0; i<ice->lcand_cnt; ++i) {
+ pj_ice_sess_cand *lcand = &ice->lcand[i];
+ if (lcand->comp_id==comp_id &&
+ lcand->type == PJ_ICE_CAND_TYPE_RELAYED)
+ {
+ *cand_id = GET_LCAND_ID(lcand);
+ pj_mutex_unlock(ice->mutex);
+ return PJ_SUCCESS;
+ }
+ }
+
+ /* If there's no relayed candidate, find reflexive candidate */
+ for (i=0; i<ice->lcand_cnt; ++i) {
+ pj_ice_sess_cand *lcand = &ice->lcand[i];
+ if (lcand->comp_id==comp_id &&
+ (lcand->type == PJ_ICE_CAND_TYPE_SRFLX ||
+ lcand->type == PJ_ICE_CAND_TYPE_PRFLX))
+ {
+ *cand_id = GET_LCAND_ID(lcand);
+ pj_mutex_unlock(ice->mutex);
+ return PJ_SUCCESS;
+ }
+ }
+
+ /* Otherwise return host candidate */
+ for (i=0; i<ice->lcand_cnt; ++i) {
+ pj_ice_sess_cand *lcand = &ice->lcand[i];
+ if (lcand->comp_id==comp_id &&
+ lcand->type == PJ_ICE_CAND_TYPE_HOST)
+ {
+ *cand_id = GET_LCAND_ID(lcand);
+ pj_mutex_unlock(ice->mutex);
+ return PJ_SUCCESS;
+ }
+ }
+
+ /* Still no candidate is found! :( */
+ pj_mutex_unlock(ice->mutex);
+
+ pj_assert(!"Should have a candidate by now");
+ return PJ_EBUG;
+}
+
+
+#ifndef MIN
+# define MIN(a,b) (a < b ? a : b)
+#endif
+
+#ifndef MAX
+# define MAX(a,b) (a > b ? a : b)
+#endif
+
+static pj_timestamp CALC_CHECK_PRIO(const pj_ice_sess *ice,
+ const pj_ice_sess_cand *lcand,
+ const pj_ice_sess_cand *rcand)
+{
+ pj_uint32_t O, A;
+ pj_timestamp prio;
+
+ /* Original formula:
+ * pair priority = 2^32*MIN(O,A) + 2*MAX(O,A) + (O>A?1:0)
+ */
+
+ if (ice->role == PJ_ICE_SESS_ROLE_CONTROLLING) {
+ O = lcand->prio;
+ A = rcand->prio;
+ } else {
+ O = rcand->prio;
+ A = lcand->prio;
+ }
+
+ /*
+ return ((pj_uint64_t)1 << 32) * MIN(O, A) +
+ (pj_uint64_t)2 * MAX(O, A) + (O>A ? 1 : 0);
+ */
+
+ prio.u32.hi = MIN(O,A);
+ prio.u32.lo = (MAX(O, A) << 1) + (O>A ? 1 : 0);
+
+ return prio;
+}
+
+
+PJ_INLINE(int) CMP_CHECK_PRIO(const pj_ice_sess_check *c1,
+ const pj_ice_sess_check *c2)
+{
+ return pj_cmp_timestamp(&c1->prio, &c2->prio);
+}
+
+
+#if PJ_LOG_MAX_LEVEL >= 4
+static const char *dump_check(char *buffer, unsigned bufsize,
+ const pj_ice_sess_checklist *clist,
+ const pj_ice_sess_check *check)
+{
+ const pj_ice_sess_cand *lcand = check->lcand;
+ const pj_ice_sess_cand *rcand = check->rcand;
+ char laddr[PJ_INET6_ADDRSTRLEN];
+ int len;
+
+ PJ_CHECK_STACK();
+
+ pj_ansi_strcpy(laddr, pj_inet_ntoa(lcand->addr.ipv4.sin_addr));
+
+ if (lcand->addr.addr.sa_family == pj_AF_INET()) {
+ len = pj_ansi_snprintf(buffer, bufsize,
+ "%d: [%d] %s:%d-->%s:%d",
+ (int)GET_CHECK_ID(clist, check),
+ check->lcand->comp_id,
+ laddr, (int)pj_ntohs(lcand->addr.ipv4.sin_port),
+ pj_inet_ntoa(rcand->addr.ipv4.sin_addr),
+ (int)pj_ntohs(rcand->addr.ipv4.sin_port));
+ } else {
+ len = pj_ansi_snprintf(buffer, bufsize, "IPv6->IPv6");
+ }
+
+
+ if (len < 0)
+ len = 0;
+ else if (len >= (int)bufsize)
+ len = bufsize - 1;
+
+ buffer[len] = '\0';
+ return buffer;
+}
+
+static void dump_checklist(const char *title, pj_ice_sess *ice,
+ const pj_ice_sess_checklist *clist)
+{
+ unsigned i;
+
+ LOG4((ice->obj_name, "%s", title));
+ for (i=0; i<clist->count; ++i) {
+ const pj_ice_sess_check *c = &clist->checks[i];
+ LOG4((ice->obj_name, " %s (%s, state=%s)",
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt), clist, c),
+ (c->nominated ? "nominated" : "not nominated"),
+ check_state_name[c->state]));
+ }
+}
+
+#else
+#define dump_checklist(title, ice, clist)
+#endif
+
+static void check_set_state(pj_ice_sess *ice, pj_ice_sess_check *check,
+ pj_ice_sess_check_state st,
+ pj_status_t err_code)
+{
+ pj_assert(check->state < PJ_ICE_SESS_CHECK_STATE_SUCCEEDED);
+
+ LOG5((ice->obj_name, "Check %s: state changed from %s to %s",
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt), &ice->clist, check),
+ check_state_name[check->state],
+ check_state_name[st]));
+ check->state = st;
+ check->err_code = err_code;
+}
+
+static void clist_set_state(pj_ice_sess *ice, pj_ice_sess_checklist *clist,
+ pj_ice_sess_checklist_state st)
+{
+ if (clist->state != st) {
+ LOG5((ice->obj_name, "Checklist: state changed from %s to %s",
+ clist_state_name[clist->state],
+ clist_state_name[st]));
+ clist->state = st;
+ }
+}
+
+/* Sort checklist based on priority */
+static void sort_checklist(pj_ice_sess *ice, pj_ice_sess_checklist *clist)
+{
+ unsigned i;
+ pj_ice_sess_check **check_ptr[PJ_ICE_MAX_COMP*2];
+ unsigned check_ptr_cnt = 0;
+
+ for (i=0; i<ice->comp_cnt; ++i) {
+ if (ice->comp[i].valid_check) {
+ check_ptr[check_ptr_cnt++] = &ice->comp[i].valid_check;
+ }
+ if (ice->comp[i].nominated_check) {
+ check_ptr[check_ptr_cnt++] = &ice->comp[i].nominated_check;
+ }
+ }
+
+ for (i=0; i<clist->count-1; ++i) {
+ unsigned j, highest = i;
+
+ for (j=i+1; j<clist->count; ++j) {
+ if (CMP_CHECK_PRIO(&clist->checks[j], &clist->checks[highest]) > 0) {
+ highest = j;
+ }
+ }
+
+ if (highest != i) {
+ pj_ice_sess_check tmp;
+ unsigned k;
+
+ pj_memcpy(&tmp, &clist->checks[i], sizeof(pj_ice_sess_check));
+ pj_memcpy(&clist->checks[i], &clist->checks[highest],
+ sizeof(pj_ice_sess_check));
+ pj_memcpy(&clist->checks[highest], &tmp,
+ sizeof(pj_ice_sess_check));
+
+ /* Update valid and nominated check pointers, since we're moving
+ * around checks
+ */
+ for (k=0; k<check_ptr_cnt; ++k) {
+ if (*check_ptr[k] == &clist->checks[highest])
+ *check_ptr[k] = &clist->checks[i];
+ else if (*check_ptr[k] == &clist->checks[i])
+ *check_ptr[k] = &clist->checks[highest];
+ }
+ }
+ }
+}
+
+enum
+{
+ SOCKADDR_EQUAL = 0,
+ SOCKADDR_NOT_EQUAL = 1
+};
+
+/* Utility: compare sockaddr.
+ * Returns 0 if equal.
+ */
+static int sockaddr_cmp(const pj_sockaddr *a1, const pj_sockaddr *a2)
+{
+ if (a1->addr.sa_family != a2->addr.sa_family)
+ return SOCKADDR_NOT_EQUAL;
+
+ if (a1->addr.sa_family == pj_AF_INET()) {
+ return !(a1->ipv4.sin_addr.s_addr == a2->ipv4.sin_addr.s_addr &&
+ a1->ipv4.sin_port == a2->ipv4.sin_port);
+ } else if (a1->addr.sa_family == pj_AF_INET6()) {
+ return pj_memcmp(&a1->ipv6, &a2->ipv6, sizeof(a1->ipv6));
+ } else {
+ pj_assert(!"Invalid address family!");
+ return SOCKADDR_NOT_EQUAL;
+ }
+}
+
+
+/* Prune checklist, this must have been done after the checklist
+ * is sorted.
+ */
+static pj_status_t prune_checklist(pj_ice_sess *ice,
+ pj_ice_sess_checklist *clist)
+{
+ unsigned i;
+
+ /* Since an agent cannot send requests directly from a reflexive
+ * candidate, but only from its base, the agent next goes through the
+ * sorted list of candidate pairs. For each pair where the local
+ * candidate is server reflexive, the server reflexive candidate MUST be
+ * replaced by its base. Once this has been done, the agent MUST prune
+ * the list. This is done by removing a pair if its local and remote
+ * candidates are identical to the local and remote candidates of a pair
+ * higher up on the priority list. The result is a sequence of ordered
+ * candidate pairs, called the check list for that media stream.
+ */
+ /* First replace SRFLX candidates with their base */
+ for (i=0; i<clist->count; ++i) {
+ pj_ice_sess_cand *srflx = clist->checks[i].lcand;
+
+ if (clist->checks[i].lcand->type == PJ_ICE_CAND_TYPE_SRFLX) {
+ /* Find the base for this candidate */
+ unsigned j;
+ for (j=0; j<ice->lcand_cnt; ++j) {
+ pj_ice_sess_cand *host = &ice->lcand[j];
+
+ if (host->type != PJ_ICE_CAND_TYPE_HOST)
+ continue;
+
+ if (sockaddr_cmp(&srflx->base_addr, &host->addr) == 0) {
+ /* Replace this SRFLX with its BASE */
+ clist->checks[i].lcand = host;
+ break;
+ }
+ }
+
+ if (j==ice->lcand_cnt) {
+ /* Host candidate not found this this srflx! */
+ LOG4((ice->obj_name,
+ "Base candidate %s:%d not found for srflx candidate %d",
+ pj_inet_ntoa(srflx->base_addr.ipv4.sin_addr),
+ pj_ntohs(srflx->base_addr.ipv4.sin_port),
+ GET_LCAND_ID(clist->checks[i].lcand)));
+ return PJNATH_EICENOHOSTCAND;
+ }
+ }
+ }
+
+ /* Next remove a pair if its local and remote candidates are identical
+ * to the local and remote candidates of a pair higher up on the priority
+ * list
+ */
+ /*
+ * Not in ICE!
+ * Remove host candidates if their base are the the same!
+ */
+ for (i=0; i<clist->count; ++i) {
+ pj_ice_sess_cand *licand = clist->checks[i].lcand;
+ pj_ice_sess_cand *ricand = clist->checks[i].rcand;
+ unsigned j;
+
+ for (j=i+1; j<clist->count;) {
+ pj_ice_sess_cand *ljcand = clist->checks[j].lcand;
+ pj_ice_sess_cand *rjcand = clist->checks[j].rcand;
+ const char *reason = NULL;
+
+ if ((licand == ljcand) && (ricand == rjcand)) {
+ reason = "duplicate found";
+ } else if ((rjcand == ricand) &&
+ (sockaddr_cmp(&ljcand->base_addr,
+ &licand->base_addr)==0))
+ {
+ reason = "equal base";
+ }
+
+ if (reason != NULL) {
+ /* Found duplicate, remove it */
+ LOG5((ice->obj_name, "Check %s pruned (%s)",
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
+ &ice->clist, &clist->checks[j]),
+ reason));
+
+ pj_array_erase(clist->checks, sizeof(clist->checks[0]),
+ clist->count, j);
+ --clist->count;
+
+ } else {
+ ++j;
+ }
+ }
+ }
+
+ return PJ_SUCCESS;
+}
+
+/* Timer callback */
+static void on_timer(pj_timer_heap_t *th, pj_timer_entry *te)
+{
+ pj_ice_sess *ice = (pj_ice_sess*) te->user_data;
+ enum timer_type type = (enum timer_type)te->id;
+ pj_bool_t has_mutex = PJ_TRUE;
+
+ PJ_UNUSED_ARG(th);
+
+ pj_mutex_lock(ice->mutex);
+
+ te->id = TIMER_NONE;
+
+ switch (type) {
+ case TIMER_CONTROLLED_WAIT_NOM:
+ LOG4((ice->obj_name,
+ "Controlled agent timed-out in waiting for the controlling "
+ "agent to send nominated check. Setting state to fail now.."));
+ on_ice_complete(ice, PJNATH_EICENOMTIMEOUT);
+ break;
+ case TIMER_COMPLETION_CALLBACK:
+ {
+ void (*on_ice_complete)(pj_ice_sess *ice, pj_status_t status);
+ pj_status_t ice_status;
+
+ /* Start keep-alive timer but don't send any packets yet.
+ * Need to do it here just in case app destroy the session
+ * in the callback.
+ */
+ if (ice->ice_status == PJ_SUCCESS)
+ ice_keep_alive(ice, PJ_FALSE);
+
+ /* Release mutex in case app destroy us in the callback */
+ ice_status = ice->ice_status;
+ on_ice_complete = ice->cb.on_ice_complete;
+ has_mutex = PJ_FALSE;
+ pj_mutex_unlock(ice->mutex);
+
+ /* Notify app about ICE completion*/
+ if (on_ice_complete)
+ (*on_ice_complete)(ice, ice_status);
+ }
+ break;
+ case TIMER_START_NOMINATED_CHECK:
+ start_nominated_check(ice);
+ break;
+ case TIMER_KEEP_ALIVE:
+ ice_keep_alive(ice, PJ_TRUE);
+ break;
+ case TIMER_NONE:
+ /* Nothing to do, just to get rid of gcc warning */
+ break;
+ }
+
+ if (has_mutex)
+ pj_mutex_unlock(ice->mutex);
+}
+
+/* Send keep-alive */
+static void ice_keep_alive(pj_ice_sess *ice, pj_bool_t send_now)
+{
+ if (send_now) {
+ /* Send Binding Indication for the component */
+ pj_ice_sess_comp *comp = &ice->comp[ice->comp_ka];
+ pj_stun_tx_data *tdata;
+ pj_ice_sess_check *the_check;
+ pj_ice_msg_data *msg_data;
+ int addr_len;
+ pj_bool_t saved;
+ pj_status_t status;
+
+ /* Must have nominated check by now */
+ pj_assert(comp->nominated_check != NULL);
+ the_check = comp->nominated_check;
+
+ /* Create the Binding Indication */
+ status = pj_stun_session_create_ind(comp->stun_sess,
+ PJ_STUN_BINDING_INDICATION,
+ &tdata);
+ if (status != PJ_SUCCESS)
+ goto done;
+
+ /* Need the transport_id */
+ msg_data = PJ_POOL_ZALLOC_T(tdata->pool, pj_ice_msg_data);
+ msg_data->transport_id = the_check->lcand->transport_id;
+
+ /* Temporarily disable FINGERPRINT. The Binding Indication
+ * SHOULD NOT contain any attributes.
+ */
+ saved = pj_stun_session_use_fingerprint(comp->stun_sess, PJ_FALSE);
+
+ /* Send to session */
+ addr_len = pj_sockaddr_get_len(&the_check->rcand->addr);
+ status = pj_stun_session_send_msg(comp->stun_sess, msg_data,
+ PJ_FALSE, PJ_FALSE,
+ &the_check->rcand->addr,
+ addr_len, tdata);
+
+ /* Restore FINGERPRINT usage */
+ pj_stun_session_use_fingerprint(comp->stun_sess, saved);
+
+done:
+ ice->comp_ka = (ice->comp_ka + 1) % ice->comp_cnt;
+ }
+
+ if (ice->timer.id == TIMER_NONE) {
+ pj_time_val delay = { 0, 0 };
+
+ delay.msec = (PJ_ICE_SESS_KEEP_ALIVE_MIN +
+ (pj_rand() % PJ_ICE_SESS_KEEP_ALIVE_MAX_RAND)) * 1000 /
+ ice->comp_cnt;
+ pj_time_val_normalize(&delay);
+
+ ice->timer.id = TIMER_KEEP_ALIVE;
+ pj_timer_heap_schedule(ice->stun_cfg.timer_heap, &ice->timer, &delay);
+
+ } else {
+ pj_assert(!"Not expected any timer active");
+ }
+}
+
+/* This function is called when ICE processing completes */
+static void on_ice_complete(pj_ice_sess *ice, pj_status_t status)
+{
+ if (!ice->is_complete) {
+ ice->is_complete = PJ_TRUE;
+ ice->ice_status = status;
+
+ if (ice->timer.id != TIMER_NONE) {
+ pj_timer_heap_cancel(ice->stun_cfg.timer_heap, &ice->timer);
+ ice->timer.id = TIMER_NONE;
+ }
+
+ /* Log message */
+ LOG4((ice->obj_name, "ICE process complete, status=%s",
+ pj_strerror(status, ice->tmp.errmsg,
+ sizeof(ice->tmp.errmsg)).ptr));
+
+ dump_checklist("Valid list", ice, &ice->valid_list);
+
+ /* Call callback */
+ if (ice->cb.on_ice_complete) {
+ pj_time_val delay = {0, 0};
+
+ ice->timer.id = TIMER_COMPLETION_CALLBACK;
+ pj_timer_heap_schedule(ice->stun_cfg.timer_heap,
+ &ice->timer, &delay);
+ }
+ }
+}
+
+/* Update valid check and nominated check for the candidate */
+static void update_comp_check(pj_ice_sess *ice, unsigned comp_id,
+ pj_ice_sess_check *check)
+{
+ pj_ice_sess_comp *comp;
+
+ comp = find_comp(ice, comp_id);
+ if (comp->valid_check == NULL) {
+ comp->valid_check = check;
+ } else {
+ if (CMP_CHECK_PRIO(comp->valid_check, check) < 0)
+ comp->valid_check = check;
+ }
+
+ if (check->nominated) {
+ /* Update the nominated check for the component */
+ if (comp->nominated_check == NULL) {
+ comp->nominated_check = check;
+ } else {
+ if (CMP_CHECK_PRIO(comp->nominated_check, check) < 0)
+ comp->nominated_check = check;
+ }
+ }
+}
+
+/* This function is called when one check completes */
+static pj_bool_t on_check_complete(pj_ice_sess *ice,
+ pj_ice_sess_check *check)
+{
+ pj_ice_sess_comp *comp;
+ unsigned i;
+
+ pj_assert(check->state >= PJ_ICE_SESS_CHECK_STATE_SUCCEEDED);
+
+ comp = find_comp(ice, check->lcand->comp_id);
+
+ /* 7.1.2.2.2. Updating Pair States
+ *
+ * The agent sets the state of the pair that generated the check to
+ * Succeeded. The success of this check might also cause the state of
+ * other checks to change as well. The agent MUST perform the following
+ * two steps:
+ *
+ * 1. The agent changes the states for all other Frozen pairs for the
+ * same media stream and same foundation to Waiting. Typically
+ * these other pairs will have different component IDs but not
+ * always.
+ */
+ if (check->err_code==PJ_SUCCESS) {
+
+ for (i=0; i<ice->clist.count; ++i) {
+ pj_ice_sess_check *c = &ice->clist.checks[i];
+ if (pj_strcmp(&c->lcand->foundation, &check->lcand->foundation)==0
+ && c->state == PJ_ICE_SESS_CHECK_STATE_FROZEN)
+ {
+ check_set_state(ice, c, PJ_ICE_SESS_CHECK_STATE_WAITING, 0);
+ }
+ }
+
+ LOG5((ice->obj_name, "Check %d is successful%s",
+ GET_CHECK_ID(&ice->clist, check),
+ (check->nominated ? " and nominated" : "")));
+
+ }
+
+ /* 8.2. Updating States
+ *
+ * For both controlling and controlled agents, the state of ICE
+ * processing depends on the presence of nominated candidate pairs in
+ * the valid list and on the state of the check list:
+ *
+ * o If there are no nominated pairs in the valid list for a media
+ * stream and the state of the check list is Running, ICE processing
+ * continues.
+ *
+ * o If there is at least one nominated pair in the valid list:
+ *
+ * - The agent MUST remove all Waiting and Frozen pairs in the check
+ * list for the same component as the nominated pairs for that
+ * media stream
+ *
+ * - If an In-Progress pair in the check list is for the same
+ * component as a nominated pair, the agent SHOULD cease
+ * retransmissions for its check if its pair priority is lower
+ * than the lowest priority nominated pair for that component
+ */
+ if (check->err_code==PJ_SUCCESS && check->nominated) {
+
+ for (i=0; i<ice->clist.count; ++i) {
+
+ pj_ice_sess_check *c = &ice->clist.checks[i];
+
+ if (c->lcand->comp_id == check->lcand->comp_id) {
+
+ if (c->state < PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS) {
+
+ /* Just fail Frozen/Waiting check */
+ LOG5((ice->obj_name,
+ "Check %s to be failed because state is %s",
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
+ &ice->clist, c),
+ check_state_name[c->state]));
+ check_set_state(ice, c, PJ_ICE_SESS_CHECK_STATE_FAILED,
+ PJ_ECANCELLED);
+
+ } else if (c->state == PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS
+ && (PJ_ICE_CANCEL_ALL ||
+ CMP_CHECK_PRIO(c, check) < 0)) {
+
+ /* State is IN_PROGRESS, cancel transaction */
+ if (c->tdata) {
+ LOG5((ice->obj_name,
+ "Cancelling check %s (In Progress)",
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
+ &ice->clist, c)));
+ pj_stun_session_cancel_req(comp->stun_sess,
+ c->tdata, PJ_FALSE, 0);
+ c->tdata = NULL;
+ check_set_state(ice, c, PJ_ICE_SESS_CHECK_STATE_FAILED,
+ PJ_ECANCELLED);
+ }
+ }
+ }
+ }
+ }
+
+
+ /* Still in 8.2. Updating States
+ *
+ * o Once there is at least one nominated pair in the valid list for
+ * every component of at least one media stream and the state of the
+ * check list is Running:
+ *
+ * * The agent MUST change the state of processing for its check
+ * list for that media stream to Completed.
+ *
+ * * The agent MUST continue to respond to any checks it may still
+ * receive for that media stream, and MUST perform triggered
+ * checks if required by the processing of Section 7.2.
+ *
+ * * The agent MAY begin transmitting media for this media stream as
+ * described in Section 11.1
+ */
+
+ /* See if all components have nominated pair. If they do, then mark
+ * ICE processing as success, otherwise wait.
+ */
+ for (i=0; i<ice->comp_cnt; ++i) {
+ if (ice->comp[i].nominated_check == NULL)
+ break;
+ }
+ if (i == ice->comp_cnt) {
+ /* All components have nominated pair */
+ on_ice_complete(ice, PJ_SUCCESS);
+ return PJ_TRUE;
+ }
+
+ /* Note: this is the stuffs that we don't do in 7.1.2.2.2, since our
+ * ICE session only supports one media stream for now:
+ *
+ * 7.1.2.2.2. Updating Pair States
+ *
+ * 2. If there is a pair in the valid list for every component of this
+ * media stream (where this is the actual number of components being
+ * used, in cases where the number of components signaled in the SDP
+ * differs from offerer to answerer), the success of this check may
+ * unfreeze checks for other media streams.
+ */
+
+ /* 7.1.2.3. Check List and Timer State Updates
+ * Regardless of whether the check was successful or failed, the
+ * completion of the transaction may require updating of check list and
+ * timer states.
+ *
+ * If all of the pairs in the check list are now either in the Failed or
+ * Succeeded state, and there is not a pair in the valid list for each
+ * component of the media stream, the state of the check list is set to
+ * Failed.
+ */
+
+ /*
+ * See if all checks in the checklist have completed. If we do,
+ * then mark ICE processing as failed.
+ */
+ for (i=0; i<ice->clist.count; ++i) {
+ pj_ice_sess_check *c = &ice->clist.checks[i];
+ if (c->state < PJ_ICE_SESS_CHECK_STATE_SUCCEEDED) {
+ break;
+ }
+ }
+
+ if (i == ice->clist.count) {
+ /* All checks have completed, but we don't have nominated pair.
+ * If agent's role is controlled, check if all components have
+ * valid pair. If it does, this means the controlled agent has
+ * finished the check list and it's waiting for controlling
+ * agent to send checks with USE-CANDIDATE flag set.
+ */
+ if (ice->role == PJ_ICE_SESS_ROLE_CONTROLLED) {
+ for (i=0; i < ice->comp_cnt; ++i) {
+ if (ice->comp[i].valid_check == NULL)
+ break;
+ }
+
+ if (i < ice->comp_cnt) {
+ /* This component ID doesn't have valid pair.
+ * Mark ICE as failed.
+ */
+ on_ice_complete(ice, PJNATH_EICEFAILED);
+ return PJ_TRUE;
+ } else {
+ /* All components have a valid pair.
+ * We should wait until we receive nominated checks.
+ */
+ if (ice->timer.id == TIMER_NONE &&
+ ice->opt.controlled_agent_want_nom_timeout >= 0)
+ {
+ pj_time_val delay;
+
+ delay.sec = 0;
+ delay.msec = ice->opt.controlled_agent_want_nom_timeout;
+ pj_time_val_normalize(&delay);
+
+ ice->timer.id = TIMER_CONTROLLED_WAIT_NOM;
+ pj_timer_heap_schedule(ice->stun_cfg.timer_heap,
+ &ice->timer,
+ &delay);
+
+ LOG5((ice->obj_name,
+ "All checks have completed. Controlled agent now "
+ "waits for nomination from controlling agent "
+ "(timeout=%d msec)",
+ ice->opt.controlled_agent_want_nom_timeout));
+ }
+ return PJ_FALSE;
+ }
+
+ /* Unreached */
+
+ } else if (ice->is_nominating) {
+ /* We are controlling agent and all checks have completed but
+ * there's at least one component without nominated pair (or
+ * more likely we don't have any nominated pairs at all).
+ */
+ on_ice_complete(ice, PJNATH_EICEFAILED);
+ return PJ_TRUE;
+
+ } else {
+ /* We are controlling agent and all checks have completed. If
+ * we have valid list for every component, then move on to
+ * sending nominated check, otherwise we have failed.
+ */
+ for (i=0; i<ice->comp_cnt; ++i) {
+ if (ice->comp[i].valid_check == NULL)
+ break;
+ }
+
+ if (i < ice->comp_cnt) {
+ /* At least one component doesn't have a valid check. Mark
+ * ICE as failed.
+ */
+ on_ice_complete(ice, PJNATH_EICEFAILED);
+ return PJ_TRUE;
+ }
+
+ /* Now it's time to send connectivity check with nomination
+ * flag set.
+ */
+ LOG4((ice->obj_name,
+ "All checks have completed, starting nominated checks now"));
+ start_nominated_check(ice);
+ return PJ_FALSE;
+ }
+ }
+
+ /* If this connectivity check has been successful, scan all components
+ * and see if they have a valid pair, if we are controlling and we haven't
+ * started our nominated check yet.
+ */
+ if (check->err_code == PJ_SUCCESS &&
+ ice->role==PJ_ICE_SESS_ROLE_CONTROLLING &&
+ !ice->is_nominating &&
+ ice->timer.id == TIMER_NONE)
+ {
+ pj_time_val delay;
+
+ for (i=0; i<ice->comp_cnt; ++i) {
+ if (ice->comp[i].valid_check == NULL)
+ break;
+ }
+
+ if (i < ice->comp_cnt) {
+ /* Some components still don't have valid pair, continue
+ * processing.
+ */
+ return PJ_FALSE;
+ }
+
+ LOG4((ice->obj_name,
+ "Scheduling nominated check in %d ms",
+ ice->opt.nominated_check_delay));
+
+ if (ice->timer.id != TIMER_NONE) {
+ pj_timer_heap_cancel(ice->stun_cfg.timer_heap, &ice->timer);
+ ice->timer.id = TIMER_NONE;
+ }
+
+ /* All components have valid pair. Let connectivity checks run for
+ * a little bit more time, then start our nominated check.
+ */
+ delay.sec = 0;
+ delay.msec = ice->opt.nominated_check_delay;
+ pj_time_val_normalize(&delay);
+
+ ice->timer.id = TIMER_START_NOMINATED_CHECK;
+ pj_timer_heap_schedule(ice->stun_cfg.timer_heap, &ice->timer, &delay);
+ return PJ_FALSE;
+ }
+
+ /* We still have checks to perform */
+ return PJ_FALSE;
+}
+
+
+/* Create checklist by pairing local candidates with remote candidates */
+PJ_DEF(pj_status_t) pj_ice_sess_create_check_list(
+ pj_ice_sess *ice,
+ const pj_str_t *rem_ufrag,
+ const pj_str_t *rem_passwd,
+ unsigned rcand_cnt,
+ const pj_ice_sess_cand rcand[])
+{
+ pj_ice_sess_checklist *clist;
+ char buf[128];
+ pj_str_t username;
+ timer_data *td;
+ unsigned i, j;
+ unsigned highest_comp = 0;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(ice && rem_ufrag && rem_passwd && rcand_cnt && rcand,
+ PJ_EINVAL);
+ PJ_ASSERT_RETURN(rcand_cnt + ice->rcand_cnt <= PJ_ICE_MAX_CAND,
+ PJ_ETOOMANY);
+
+ pj_mutex_lock(ice->mutex);
+
+ /* Save credentials */
+ username.ptr = buf;
+
+ pj_strcpy(&username, rem_ufrag);
+ pj_strcat2(&username, ":");
+ pj_strcat(&username, &ice->rx_ufrag);
+
+ pj_strdup(ice->pool, &ice->tx_uname, &username);
+ pj_strdup(ice->pool, &ice->tx_ufrag, rem_ufrag);
+ pj_strdup(ice->pool, &ice->tx_pass, rem_passwd);
+
+ pj_strcpy(&username, &ice->rx_ufrag);
+ pj_strcat2(&username, ":");
+ pj_strcat(&username, rem_ufrag);
+
+ pj_strdup(ice->pool, &ice->rx_uname, &username);
+
+
+ /* Save remote candidates */
+ ice->rcand_cnt = 0;
+ for (i=0; i<rcand_cnt; ++i) {
+ pj_ice_sess_cand *cn = &ice->rcand[ice->rcand_cnt];
+
+ /* Ignore candidate which has no matching component ID */
+ if (rcand[i].comp_id==0 || rcand[i].comp_id > ice->comp_cnt) {
+ continue;
+ }
+
+ if (rcand[i].comp_id > highest_comp)
+ highest_comp = rcand[i].comp_id;
+
+ pj_memcpy(cn, &rcand[i], sizeof(pj_ice_sess_cand));
+ pj_strdup(ice->pool, &cn->foundation, &rcand[i].foundation);
+ ice->rcand_cnt++;
+ }
+
+ /* Generate checklist */
+ clist = &ice->clist;
+ for (i=0; i<ice->lcand_cnt; ++i) {
+ for (j=0; j<ice->rcand_cnt; ++j) {
+
+ pj_ice_sess_cand *lcand = &ice->lcand[i];
+ pj_ice_sess_cand *rcand = &ice->rcand[j];
+ pj_ice_sess_check *chk = &clist->checks[clist->count];
+
+ if (clist->count >= PJ_ICE_MAX_CHECKS) {
+ pj_mutex_unlock(ice->mutex);
+ return PJ_ETOOMANY;
+ }
+
+ /* A local candidate is paired with a remote candidate if
+ * and only if the two candidates have the same component ID
+ * and have the same IP address version.
+ */
+ if ((lcand->comp_id != rcand->comp_id) ||
+ (lcand->addr.addr.sa_family != rcand->addr.addr.sa_family))
+ {
+ continue;
+ }
+
+
+ chk->lcand = lcand;
+ chk->rcand = rcand;
+ chk->state = PJ_ICE_SESS_CHECK_STATE_FROZEN;
+
+ chk->prio = CALC_CHECK_PRIO(ice, lcand, rcand);
+
+ clist->count++;
+ }
+ }
+
+ /* Sort checklist based on priority */
+ sort_checklist(ice, clist);
+
+ /* Prune the checklist */
+ status = prune_checklist(ice, clist);
+ if (status != PJ_SUCCESS) {
+ pj_mutex_unlock(ice->mutex);
+ return status;
+ }
+
+ /* Disable our components which don't have matching component */
+ for (i=highest_comp; i<ice->comp_cnt; ++i) {
+ if (ice->comp[i].stun_sess) {
+ pj_stun_session_destroy(ice->comp[i].stun_sess);
+ pj_bzero(&ice->comp[i], sizeof(ice->comp[i]));
+ }
+ }
+ ice->comp_cnt = highest_comp;
+
+ /* Init timer entry in the checklist. Initially the timer ID is FALSE
+ * because timer is not running.
+ */
+ clist->timer.id = PJ_FALSE;
+ td = PJ_POOL_ZALLOC_T(ice->pool, timer_data);
+ td->ice = ice;
+ td->clist = clist;
+ clist->timer.user_data = (void*)td;
+ clist->timer.cb = &periodic_timer;
+
+
+ /* Log checklist */
+ dump_checklist("Checklist created:", ice, clist);
+
+ pj_mutex_unlock(ice->mutex);
+
+ return PJ_SUCCESS;
+}
+
+/* Perform check on the specified candidate pair. */
+static pj_status_t perform_check(pj_ice_sess *ice,
+ pj_ice_sess_checklist *clist,
+ unsigned check_id,
+ pj_bool_t nominate)
+{
+ pj_ice_sess_comp *comp;
+ pj_ice_msg_data *msg_data;
+ pj_ice_sess_check *check;
+ const pj_ice_sess_cand *lcand;
+ const pj_ice_sess_cand *rcand;
+ pj_uint32_t prio;
+ pj_status_t status;
+
+ check = &clist->checks[check_id];
+ lcand = check->lcand;
+ rcand = check->rcand;
+ comp = find_comp(ice, lcand->comp_id);
+
+ LOG5((ice->obj_name,
+ "Sending connectivity check for check %s",
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt), clist, check)));
+ pj_log_push_indent();
+
+ /* Create request */
+ status = pj_stun_session_create_req(comp->stun_sess,
+ PJ_STUN_BINDING_REQUEST, PJ_STUN_MAGIC,
+ NULL, &check->tdata);
+ if (status != PJ_SUCCESS) {
+ pjnath_perror(ice->obj_name, "Error creating STUN request", status);
+ pj_log_pop_indent();
+ return status;
+ }
+
+ /* Attach data to be retrieved later when STUN request transaction
+ * completes and on_stun_request_complete() callback is called.
+ */
+ msg_data = PJ_POOL_ZALLOC_T(check->tdata->pool, pj_ice_msg_data);
+ msg_data->transport_id = lcand->transport_id;
+ msg_data->has_req_data = PJ_TRUE;
+ msg_data->data.req.ice = ice;
+ msg_data->data.req.clist = clist;
+ msg_data->data.req.ckid = check_id;
+
+ /* Add PRIORITY */
+#if PJNATH_ICE_PRIO_STD
+ prio = CALC_CAND_PRIO(ice, PJ_ICE_CAND_TYPE_PRFLX, 65535,
+ lcand->comp_id);
+#else
+ prio = CALC_CAND_PRIO(ice, PJ_ICE_CAND_TYPE_PRFLX, 0,
+ lcand->comp_id);
+#endif
+ pj_stun_msg_add_uint_attr(check->tdata->pool, check->tdata->msg,
+ PJ_STUN_ATTR_PRIORITY, prio);
+
+ /* Add USE-CANDIDATE and set this check to nominated.
+ * Also add ICE-CONTROLLING or ICE-CONTROLLED
+ */
+ if (ice->role == PJ_ICE_SESS_ROLE_CONTROLLING) {
+ if (nominate) {
+ pj_stun_msg_add_empty_attr(check->tdata->pool, check->tdata->msg,
+ PJ_STUN_ATTR_USE_CANDIDATE);
+ check->nominated = PJ_TRUE;
+ }
+
+ pj_stun_msg_add_uint64_attr(check->tdata->pool, check->tdata->msg,
+ PJ_STUN_ATTR_ICE_CONTROLLING,
+ &ice->tie_breaker);
+
+ } else {
+ pj_stun_msg_add_uint64_attr(check->tdata->pool, check->tdata->msg,
+ PJ_STUN_ATTR_ICE_CONTROLLED,
+ &ice->tie_breaker);
+ }
+
+
+ /* Note that USERNAME and MESSAGE-INTEGRITY will be added by the
+ * STUN session.
+ */
+
+ /* Initiate STUN transaction to send the request */
+ status = pj_stun_session_send_msg(comp->stun_sess, msg_data, PJ_FALSE,
+ PJ_TRUE, &rcand->addr,
+ sizeof(pj_sockaddr_in), check->tdata);
+ if (status != PJ_SUCCESS) {
+ check->tdata = NULL;
+ pjnath_perror(ice->obj_name, "Error sending STUN request", status);
+ pj_log_pop_indent();
+ return status;
+ }
+
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS,
+ PJ_SUCCESS);
+ pj_log_pop_indent();
+ return PJ_SUCCESS;
+}
+
+
+/* Start periodic check for the specified checklist.
+ * This callback is called by timer on every Ta (20msec by default)
+ */
+static pj_status_t start_periodic_check(pj_timer_heap_t *th,
+ pj_timer_entry *te)
+{
+ timer_data *td;
+ pj_ice_sess *ice;
+ pj_ice_sess_checklist *clist;
+ unsigned i, start_count=0;
+ pj_status_t status;
+
+ td = (struct timer_data*) te->user_data;
+ ice = td->ice;
+ clist = td->clist;
+
+ pj_mutex_lock(ice->mutex);
+
+ /* Set timer ID to FALSE first */
+ te->id = PJ_FALSE;
+
+ /* Set checklist state to Running */
+ clist_set_state(ice, clist, PJ_ICE_SESS_CHECKLIST_ST_RUNNING);
+
+ LOG5((ice->obj_name, "Starting checklist periodic check"));
+ pj_log_push_indent();
+
+ /* Send STUN Binding request for check with highest priority on
+ * Waiting state.
+ */
+ for (i=0; i<clist->count; ++i) {
+ pj_ice_sess_check *check = &clist->checks[i];
+
+ if (check->state == PJ_ICE_SESS_CHECK_STATE_WAITING) {
+ status = perform_check(ice, clist, i, ice->is_nominating);
+ if (status != PJ_SUCCESS) {
+ pj_mutex_unlock(ice->mutex);
+ pj_log_pop_indent();
+ return status;
+ }
+
+ ++start_count;
+ break;
+ }
+ }
+
+ /* If we don't have anything in Waiting state, perform check to
+ * highest priority pair that is in Frozen state.
+ */
+ if (start_count==0) {
+ for (i=0; i<clist->count; ++i) {
+ pj_ice_sess_check *check = &clist->checks[i];
+
+ if (check->state == PJ_ICE_SESS_CHECK_STATE_FROZEN) {
+ status = perform_check(ice, clist, i, ice->is_nominating);
+ if (status != PJ_SUCCESS) {
+ pj_mutex_unlock(ice->mutex);
+ pj_log_pop_indent();
+ return status;
+ }
+
+ ++start_count;
+ break;
+ }
+ }
+ }
+
+ /* Cannot start check because there's no suitable candidate pair.
+ */
+ if (start_count!=0) {
+ /* Schedule for next timer */
+ pj_time_val timeout = {0, PJ_ICE_TA_VAL};
+
+ te->id = PJ_TRUE;
+ pj_time_val_normalize(&timeout);
+ pj_timer_heap_schedule(th, te, &timeout);
+ }
+
+ pj_mutex_unlock(ice->mutex);
+ pj_log_pop_indent();
+ return PJ_SUCCESS;
+}
+
+
+/* Start sending connectivity check with USE-CANDIDATE */
+static void start_nominated_check(pj_ice_sess *ice)
+{
+ pj_time_val delay;
+ unsigned i;
+ pj_status_t status;
+
+ LOG4((ice->obj_name, "Starting nominated check.."));
+ pj_log_push_indent();
+
+ pj_assert(ice->is_nominating == PJ_FALSE);
+
+ /* Stop our timer if it's active */
+ if (ice->timer.id == TIMER_START_NOMINATED_CHECK) {
+ pj_timer_heap_cancel(ice->stun_cfg.timer_heap, &ice->timer);
+ ice->timer.id = TIMER_NONE;
+ }
+
+ /* For each component, set the check state of valid check with
+ * highest priority to Waiting (it should have Success state now).
+ */
+ for (i=0; i<ice->comp_cnt; ++i) {
+ unsigned j;
+ const pj_ice_sess_check *vc = ice->comp[i].valid_check;
+
+ pj_assert(ice->comp[i].nominated_check == NULL);
+ pj_assert(vc->err_code == PJ_SUCCESS);
+
+ for (j=0; j<ice->clist.count; ++j) {
+ pj_ice_sess_check *c = &ice->clist.checks[j];
+ if (c->lcand->transport_id == vc->lcand->transport_id &&
+ c->rcand == vc->rcand)
+ {
+ pj_assert(c->err_code == PJ_SUCCESS);
+ c->state = PJ_ICE_SESS_CHECK_STATE_FROZEN;
+ check_set_state(ice, c, PJ_ICE_SESS_CHECK_STATE_WAITING,
+ PJ_SUCCESS);
+ break;
+ }
+ }
+ }
+
+ /* And (re)start the periodic check */
+ if (ice->clist.timer.id) {
+ pj_timer_heap_cancel(ice->stun_cfg.timer_heap, &ice->clist.timer);
+ ice->clist.timer.id = PJ_FALSE;
+ }
+
+ ice->clist.timer.id = PJ_TRUE;
+ delay.sec = delay.msec = 0;
+ status = pj_timer_heap_schedule(ice->stun_cfg.timer_heap,
+ &ice->clist.timer, &delay);
+ if (status != PJ_SUCCESS) {
+ ice->clist.timer.id = PJ_FALSE;
+ } else {
+ LOG5((ice->obj_name, "Periodic timer rescheduled.."));
+ }
+
+ ice->is_nominating = PJ_TRUE;
+ pj_log_pop_indent();
+}
+
+/* Timer callback to perform periodic check */
+static void periodic_timer(pj_timer_heap_t *th,
+ pj_timer_entry *te)
+{
+ start_periodic_check(th, te);
+}
+
+
+/* Utility: find string in string array */
+const pj_str_t *find_str(const pj_str_t *strlist[], unsigned count,
+ const pj_str_t *str)
+{
+ unsigned i;
+ for (i=0; i<count; ++i) {
+ if (pj_strcmp(strlist[i], str)==0)
+ return strlist[i];
+ }
+ return NULL;
+}
+
+
+/*
+ * Start ICE periodic check. This function will return immediately, and
+ * application will be notified about the connectivity check status in
+ * #pj_ice_sess_cb callback.
+ */
+PJ_DEF(pj_status_t) pj_ice_sess_start_check(pj_ice_sess *ice)
+{
+ pj_ice_sess_checklist *clist;
+ const pj_ice_sess_cand *cand0;
+ const pj_str_t *flist[PJ_ICE_MAX_CAND]; // XXX
+ pj_ice_rx_check *rcheck;
+ unsigned i, flist_cnt = 0;
+ pj_time_val delay;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(ice, PJ_EINVAL);
+
+ /* Checklist must have been created */
+ PJ_ASSERT_RETURN(ice->clist.count > 0, PJ_EINVALIDOP);
+
+ /* Lock session */
+ pj_mutex_lock(ice->mutex);
+
+ LOG4((ice->obj_name, "Starting ICE check.."));
+ pj_log_push_indent();
+
+ /* If we are using aggressive nomination, set the is_nominating state */
+ if (ice->opt.aggressive)
+ ice->is_nominating = PJ_TRUE;
+
+ /* The agent examines the check list for the first media stream (a
+ * media stream is the first media stream when it is described by
+ * the first m-line in the SDP offer and answer). For that media
+ * stream, it:
+ *
+ * - Groups together all of the pairs with the same foundation,
+ *
+ * - For each group, sets the state of the pair with the lowest
+ * component ID to Waiting. If there is more than one such pair,
+ * the one with the highest priority is used.
+ */
+
+ clist = &ice->clist;
+
+ /* Pickup the first pair for component 1. */
+ for (i=0; i<clist->count; ++i) {
+ if (clist->checks[i].lcand->comp_id == 1)
+ break;
+ }
+ if (i == clist->count) {
+ pj_assert(!"Unable to find checklist for component 1");
+ pj_mutex_unlock(ice->mutex);
+ pj_log_pop_indent();
+ return PJNATH_EICEINCOMPID;
+ }
+
+ /* Set this check to WAITING only if state is frozen. It may be possible
+ * that this check has already been started by a trigger check
+ */
+ if (clist->checks[i].state == PJ_ICE_SESS_CHECK_STATE_FROZEN) {
+ check_set_state(ice, &clist->checks[i],
+ PJ_ICE_SESS_CHECK_STATE_WAITING, PJ_SUCCESS);
+ }
+
+ cand0 = clist->checks[i].lcand;
+ flist[flist_cnt++] = &clist->checks[i].lcand->foundation;
+
+ /* Find all of the other pairs in that check list with the same
+ * component ID, but different foundations, and sets all of their
+ * states to Waiting as well.
+ */
+ for (++i; i<clist->count; ++i) {
+ const pj_ice_sess_cand *cand1;
+
+ cand1 = clist->checks[i].lcand;
+
+ if (cand1->comp_id==cand0->comp_id &&
+ find_str(flist, flist_cnt, &cand1->foundation)==NULL)
+ {
+ if (clist->checks[i].state == PJ_ICE_SESS_CHECK_STATE_FROZEN) {
+ check_set_state(ice, &clist->checks[i],
+ PJ_ICE_SESS_CHECK_STATE_WAITING, PJ_SUCCESS);
+ }
+ flist[flist_cnt++] = &cand1->foundation;
+ }
+ }
+
+ /* First, perform all pending triggered checks, simultaneously. */
+ rcheck = ice->early_check.next;
+ while (rcheck != &ice->early_check) {
+ LOG4((ice->obj_name,
+ "Performing delayed triggerred check for component %d",
+ rcheck->comp_id));
+ pj_log_push_indent();
+ handle_incoming_check(ice, rcheck);
+ rcheck = rcheck->next;
+ pj_log_pop_indent();
+ }
+ pj_list_init(&ice->early_check);
+
+ /* Start periodic check */
+ /* We could start it immediately like below, but lets schedule timer
+ * instead to reduce stack usage:
+ * return start_periodic_check(ice->stun_cfg.timer_heap, &clist->timer);
+ */
+ clist->timer.id = PJ_TRUE;
+ delay.sec = delay.msec = 0;
+ status = pj_timer_heap_schedule(ice->stun_cfg.timer_heap,
+ &clist->timer, &delay);
+ if (status != PJ_SUCCESS) {
+ clist->timer.id = PJ_FALSE;
+ }
+
+ pj_mutex_unlock(ice->mutex);
+ pj_log_pop_indent();
+ return status;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+
+/* Callback called by STUN session to send the STUN message.
+ * STUN session also doesn't have a transport, remember?!
+ */
+static pj_status_t on_stun_send_msg(pj_stun_session *sess,
+ void *token,
+ const void *pkt,
+ pj_size_t pkt_size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len)
+{
+ stun_data *sd = (stun_data*) pj_stun_session_get_user_data(sess);
+ pj_ice_sess *ice = sd->ice;
+ pj_ice_msg_data *msg_data = (pj_ice_msg_data*) token;
+
+ return (*ice->cb.on_tx_pkt)(ice, sd->comp_id, msg_data->transport_id,
+ pkt, pkt_size, dst_addr, addr_len);
+}
+
+
+/* This callback is called when outgoing STUN request completed */
+static void on_stun_request_complete(pj_stun_session *stun_sess,
+ pj_status_t status,
+ void *token,
+ pj_stun_tx_data *tdata,
+ const pj_stun_msg *response,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_ice_msg_data *msg_data = (pj_ice_msg_data*) token;
+ pj_ice_sess *ice;
+ pj_ice_sess_check *check, *new_check;
+ pj_ice_sess_cand *lcand;
+ pj_ice_sess_checklist *clist;
+ pj_stun_xor_mapped_addr_attr *xaddr;
+ unsigned i;
+
+ PJ_UNUSED_ARG(stun_sess);
+ PJ_UNUSED_ARG(src_addr_len);
+
+ pj_assert(msg_data->has_req_data);
+
+ ice = msg_data->data.req.ice;
+ clist = msg_data->data.req.clist;
+ check = &clist->checks[msg_data->data.req.ckid];
+
+
+ /* Mark STUN transaction as complete */
+ pj_assert(tdata == check->tdata);
+ check->tdata = NULL;
+
+ pj_mutex_lock(ice->mutex);
+
+ /* Init lcand to NULL. lcand will be found from the mapped address
+ * found in the response.
+ */
+ lcand = NULL;
+
+ if (status != PJ_SUCCESS) {
+ char errmsg[PJ_ERR_MSG_SIZE];
+
+ if (status==PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_ROLE_CONFLICT)) {
+
+ /* Role conclict response.
+ *
+ * 7.1.2.1. Failure Cases:
+ *
+ * If the request had contained the ICE-CONTROLLED attribute,
+ * the agent MUST switch to the controlling role if it has not
+ * already done so. If the request had contained the
+ * ICE-CONTROLLING attribute, the agent MUST switch to the
+ * controlled role if it has not already done so. Once it has
+ * switched, the agent MUST immediately retry the request with
+ * the ICE-CONTROLLING or ICE-CONTROLLED attribute reflecting
+ * its new role.
+ */
+ pj_ice_sess_role new_role = PJ_ICE_SESS_ROLE_UNKNOWN;
+ pj_stun_msg *req = tdata->msg;
+
+ if (pj_stun_msg_find_attr(req, PJ_STUN_ATTR_ICE_CONTROLLING, 0)) {
+ new_role = PJ_ICE_SESS_ROLE_CONTROLLED;
+ } else if (pj_stun_msg_find_attr(req, PJ_STUN_ATTR_ICE_CONTROLLED,
+ 0)) {
+ new_role = PJ_ICE_SESS_ROLE_CONTROLLING;
+ } else {
+ pj_assert(!"We should have put CONTROLLING/CONTROLLED attr!");
+ new_role = PJ_ICE_SESS_ROLE_CONTROLLED;
+ }
+
+ if (new_role != ice->role) {
+ LOG4((ice->obj_name,
+ "Changing role because of role conflict response"));
+ pj_ice_sess_change_role(ice, new_role);
+ }
+
+ /* Resend request */
+ LOG4((ice->obj_name, "Resending check because of role conflict"));
+ pj_log_push_indent();
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_WAITING, 0);
+ perform_check(ice, clist, msg_data->data.req.ckid,
+ check->nominated || ice->is_nominating);
+ pj_log_pop_indent();
+ pj_mutex_unlock(ice->mutex);
+ return;
+ }
+
+ pj_strerror(status, errmsg, sizeof(errmsg));
+ LOG4((ice->obj_name,
+ "Check %s%s: connectivity check FAILED: %s",
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
+ &ice->clist, check),
+ (check->nominated ? " (nominated)" : " (not nominated)"),
+ errmsg));
+ pj_log_push_indent();
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED, status);
+ on_check_complete(ice, check);
+ pj_log_pop_indent();
+ pj_mutex_unlock(ice->mutex);
+ return;
+ }
+
+
+ /* 7.1.2.1. Failure Cases
+ *
+ * The agent MUST check that the source IP address and port of the
+ * response equals the destination IP address and port that the Binding
+ * Request was sent to, and that the destination IP address and port of
+ * the response match the source IP address and port that the Binding
+ * Request was sent from.
+ */
+ if (sockaddr_cmp(&check->rcand->addr, (const pj_sockaddr*)src_addr) != 0) {
+ status = PJNATH_EICEINSRCADDR;
+ LOG4((ice->obj_name,
+ "Check %s%s: connectivity check FAILED: source address mismatch",
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
+ &ice->clist, check),
+ (check->nominated ? " (nominated)" : " (not nominated)")));
+ pj_log_push_indent();
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED, status);
+ on_check_complete(ice, check);
+ pj_log_pop_indent();
+ pj_mutex_unlock(ice->mutex);
+ return;
+ }
+
+ /* 7.1.2.2. Success Cases
+ *
+ * A check is considered to be a success if all of the following are
+ * true:
+ *
+ * o the STUN transaction generated a success response
+ *
+ * o the source IP address and port of the response equals the
+ * destination IP address and port that the Binding Request was sent
+ * to
+ *
+ * o the destination IP address and port of the response match the
+ * source IP address and port that the Binding Request was sent from
+ */
+
+
+ LOG4((ice->obj_name,
+ "Check %s%s: connectivity check SUCCESS",
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
+ &ice->clist, check),
+ (check->nominated ? " (nominated)" : " (not nominated)")));
+
+ /* Get the STUN XOR-MAPPED-ADDRESS attribute. */
+ xaddr = (pj_stun_xor_mapped_addr_attr*)
+ pj_stun_msg_find_attr(response, PJ_STUN_ATTR_XOR_MAPPED_ADDR,0);
+ if (!xaddr) {
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED,
+ PJNATH_ESTUNNOMAPPEDADDR);
+ on_check_complete(ice, check);
+ pj_mutex_unlock(ice->mutex);
+ return;
+ }
+
+ /* Find local candidate that matches the XOR-MAPPED-ADDRESS */
+ pj_assert(lcand == NULL);
+ for (i=0; i<ice->lcand_cnt; ++i) {
+ if (sockaddr_cmp(&xaddr->sockaddr, &ice->lcand[i].addr) == 0) {
+ /* Match */
+ lcand = &ice->lcand[i];
+ break;
+ }
+ }
+
+ /* 7.1.2.2.1. Discovering Peer Reflexive Candidates
+ * If the transport address returned in XOR-MAPPED-ADDRESS does not match
+ * any of the local candidates that the agent knows about, the mapped
+ * address represents a new candidate - a peer reflexive candidate.
+ */
+ if (lcand == NULL) {
+ unsigned cand_id;
+ pj_str_t foundation;
+
+ pj_ice_calc_foundation(ice->pool, &foundation, PJ_ICE_CAND_TYPE_PRFLX,
+ &check->lcand->base_addr);
+
+ /* Still in 7.1.2.2.1. Discovering Peer Reflexive Candidates
+ * Its priority is set equal to the value of the PRIORITY attribute
+ * in the Binding Request.
+ *
+ * I think the priority calculated by add_cand() should be the same
+ * as the one calculated in perform_check(), so there's no need to
+ * get the priority from the PRIORITY attribute.
+ */
+
+ /* Add new peer reflexive candidate */
+ status = pj_ice_sess_add_cand(ice, check->lcand->comp_id,
+ msg_data->transport_id,
+ PJ_ICE_CAND_TYPE_PRFLX,
+ 65535, &foundation,
+ &xaddr->sockaddr,
+ &check->lcand->base_addr,
+ &check->lcand->base_addr,
+ sizeof(pj_sockaddr_in), &cand_id);
+ if (status != PJ_SUCCESS) {
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED,
+ status);
+ on_check_complete(ice, check);
+ pj_mutex_unlock(ice->mutex);
+ return;
+ }
+
+ /* Update local candidate */
+ lcand = &ice->lcand[cand_id];
+
+ }
+
+ /* 7.1.2.2.3. Constructing a Valid Pair
+ * Next, the agent constructs a candidate pair whose local candidate
+ * equals the mapped address of the response, and whose remote candidate
+ * equals the destination address to which the request was sent.
+ */
+
+ /* Add pair to valid list, if it's not there, otherwise just update
+ * nominated flag
+ */
+ for (i=0; i<ice->valid_list.count; ++i) {
+ if (ice->valid_list.checks[i].lcand == lcand &&
+ ice->valid_list.checks[i].rcand == check->rcand)
+ break;
+ }
+
+ if (i==ice->valid_list.count) {
+ pj_assert(ice->valid_list.count < PJ_ICE_MAX_CHECKS);
+ new_check = &ice->valid_list.checks[ice->valid_list.count++];
+ new_check->lcand = lcand;
+ new_check->rcand = check->rcand;
+ new_check->prio = CALC_CHECK_PRIO(ice, lcand, check->rcand);
+ new_check->state = PJ_ICE_SESS_CHECK_STATE_SUCCEEDED;
+ new_check->nominated = check->nominated;
+ new_check->err_code = PJ_SUCCESS;
+ } else {
+ new_check = &ice->valid_list.checks[i];
+ ice->valid_list.checks[i].nominated = check->nominated;
+ }
+
+ /* Update valid check and nominated check for the component */
+ update_comp_check(ice, new_check->lcand->comp_id, new_check);
+
+ /* Sort valid_list (must do so after update_comp_check(), otherwise
+ * new_check will point to something else (#953)
+ */
+ sort_checklist(ice, &ice->valid_list);
+
+ /* 7.1.2.2.2. Updating Pair States
+ *
+ * The agent sets the state of the pair that generated the check to
+ * Succeeded. The success of this check might also cause the state of
+ * other checks to change as well.
+ */
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_SUCCEEDED,
+ PJ_SUCCESS);
+
+ /* Perform 7.1.2.2.2. Updating Pair States.
+ * This may terminate ICE processing.
+ */
+ if (on_check_complete(ice, check)) {
+ /* ICE complete! */
+ pj_mutex_unlock(ice->mutex);
+ return;
+ }
+
+ pj_mutex_unlock(ice->mutex);
+}
+
+
+/* This callback is called by the STUN session associated with a candidate
+ * when it receives incoming request.
+ */
+static pj_status_t on_stun_rx_request(pj_stun_session *sess,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_stun_rx_data *rdata,
+ void *token,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ stun_data *sd;
+ const pj_stun_msg *msg = rdata->msg;
+ pj_ice_msg_data *msg_data;
+ pj_ice_sess *ice;
+ pj_stun_priority_attr *prio_attr;
+ pj_stun_use_candidate_attr *uc_attr;
+ pj_stun_uint64_attr *role_attr;
+ pj_stun_tx_data *tdata;
+ pj_ice_rx_check *rcheck, tmp_rcheck;
+ pj_status_t status;
+
+ PJ_UNUSED_ARG(pkt);
+ PJ_UNUSED_ARG(pkt_len);
+
+ /* Reject any requests except Binding request */
+ if (msg->hdr.type != PJ_STUN_BINDING_REQUEST) {
+ pj_stun_session_respond(sess, rdata, PJ_STUN_SC_BAD_REQUEST,
+ NULL, token, PJ_TRUE,
+ src_addr, src_addr_len);
+ return PJ_SUCCESS;
+ }
+
+
+ sd = (stun_data*) pj_stun_session_get_user_data(sess);
+ ice = sd->ice;
+
+ pj_mutex_lock(ice->mutex);
+
+ /*
+ * Note:
+ * Be aware that when STUN request is received, we might not get
+ * SDP answer yet, so we might not have remote candidates and
+ * checklist yet. This case will be handled after we send
+ * a response.
+ */
+
+ /* Get PRIORITY attribute */
+ prio_attr = (pj_stun_priority_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_PRIORITY, 0);
+ if (prio_attr == NULL) {
+ LOG5((ice->obj_name, "Received Binding request with no PRIORITY"));
+ pj_mutex_unlock(ice->mutex);
+ return PJ_SUCCESS;
+ }
+
+ /* Get USE-CANDIDATE attribute */
+ uc_attr = (pj_stun_use_candidate_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_USE_CANDIDATE, 0);
+
+
+ /* Get ICE-CONTROLLING or ICE-CONTROLLED */
+ role_attr = (pj_stun_uint64_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_ICE_CONTROLLING, 0);
+ if (role_attr == NULL) {
+ role_attr = (pj_stun_uint64_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_ICE_CONTROLLED, 0);
+ }
+
+ /* Handle the case when request comes before answer is received.
+ * We need to put credential in the response, and since we haven't
+ * got the response, copy the username from the request.
+ */
+ if (ice->rcand_cnt == 0) {
+ pj_stun_string_attr *uname_attr;
+
+ uname_attr = (pj_stun_string_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_USERNAME, 0);
+ pj_assert(uname_attr != NULL);
+ pj_strdup(ice->pool, &ice->rx_uname, &uname_attr->value);
+ }
+
+ /* 7.2.1.1. Detecting and Repairing Role Conflicts
+ */
+ if (ice->role == PJ_ICE_SESS_ROLE_CONTROLLING &&
+ role_attr && role_attr->hdr.type == PJ_STUN_ATTR_ICE_CONTROLLING)
+ {
+ if (pj_cmp_timestamp(&ice->tie_breaker, &role_attr->value) < 0) {
+ /* Switch role to controlled */
+ LOG4((ice->obj_name,
+ "Changing role because of ICE-CONTROLLING attribute"));
+ pj_ice_sess_change_role(ice, PJ_ICE_SESS_ROLE_CONTROLLED);
+ } else {
+ /* Generate 487 response */
+ pj_stun_session_respond(sess, rdata, PJ_STUN_SC_ROLE_CONFLICT,
+ NULL, token, PJ_TRUE,
+ src_addr, src_addr_len);
+ pj_mutex_unlock(ice->mutex);
+ return PJ_SUCCESS;
+ }
+
+ } else if (ice->role == PJ_ICE_SESS_ROLE_CONTROLLED &&
+ role_attr && role_attr->hdr.type == PJ_STUN_ATTR_ICE_CONTROLLED)
+ {
+ if (pj_cmp_timestamp(&ice->tie_breaker, &role_attr->value) < 0) {
+ /* Generate 487 response */
+ pj_stun_session_respond(sess, rdata, PJ_STUN_SC_ROLE_CONFLICT,
+ NULL, token, PJ_TRUE,
+ src_addr, src_addr_len);
+ pj_mutex_unlock(ice->mutex);
+ return PJ_SUCCESS;
+ } else {
+ /* Switch role to controlled */
+ LOG4((ice->obj_name,
+ "Changing role because of ICE-CONTROLLED attribute"));
+ pj_ice_sess_change_role(ice, PJ_ICE_SESS_ROLE_CONTROLLING);
+ }
+ }
+
+ /*
+ * First send response to this request
+ */
+ status = pj_stun_session_create_res(sess, rdata, 0, NULL, &tdata);
+ if (status != PJ_SUCCESS) {
+ pj_mutex_unlock(ice->mutex);
+ return status;
+ }
+
+ /* Add XOR-MAPPED-ADDRESS attribute */
+ status = pj_stun_msg_add_sockaddr_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_XOR_MAPPED_ADDR,
+ PJ_TRUE, src_addr, src_addr_len);
+
+ /* Create a msg_data to be associated with this response */
+ msg_data = PJ_POOL_ZALLOC_T(tdata->pool, pj_ice_msg_data);
+ msg_data->transport_id = ((pj_ice_msg_data*)token)->transport_id;
+ msg_data->has_req_data = PJ_FALSE;
+
+ /* Send the response */
+ status = pj_stun_session_send_msg(sess, msg_data, PJ_TRUE, PJ_TRUE,
+ src_addr, src_addr_len, tdata);
+
+
+ /*
+ * Handling early check.
+ *
+ * It's possible that we receive this request before we receive SDP
+ * answer. In this case, we can't perform trigger check since we
+ * don't have checklist yet, so just save this check in a pending
+ * triggered check array to be acted upon later.
+ */
+ if (ice->rcand_cnt == 0) {
+ rcheck = PJ_POOL_ZALLOC_T(ice->pool, pj_ice_rx_check);
+ } else {
+ rcheck = &tmp_rcheck;
+ }
+
+ /* Init rcheck */
+ rcheck->comp_id = sd->comp_id;
+ rcheck->transport_id = ((pj_ice_msg_data*)token)->transport_id;
+ rcheck->src_addr_len = src_addr_len;
+ pj_memcpy(&rcheck->src_addr, src_addr, src_addr_len);
+ rcheck->use_candidate = (uc_attr != NULL);
+ rcheck->priority = prio_attr->value;
+ rcheck->role_attr = role_attr;
+
+ if (ice->rcand_cnt == 0) {
+ /* We don't have answer yet, so keep this request for later */
+ LOG4((ice->obj_name, "Received an early check for comp %d",
+ rcheck->comp_id));
+ pj_list_push_back(&ice->early_check, rcheck);
+ } else {
+ /* Handle this check */
+ handle_incoming_check(ice, rcheck);
+ }
+
+ pj_mutex_unlock(ice->mutex);
+ return PJ_SUCCESS;
+}
+
+
+/* Handle incoming Binding request and perform triggered check.
+ * This function may be called by on_stun_rx_request(), or when
+ * SDP answer is received and we have received early checks.
+ */
+static void handle_incoming_check(pj_ice_sess *ice,
+ const pj_ice_rx_check *rcheck)
+{
+ pj_ice_sess_comp *comp;
+ pj_ice_sess_cand *lcand = NULL;
+ pj_ice_sess_cand *rcand;
+ unsigned i;
+
+ comp = find_comp(ice, rcheck->comp_id);
+
+ /* Find remote candidate based on the source transport address of
+ * the request.
+ */
+ for (i=0; i<ice->rcand_cnt; ++i) {
+ if (sockaddr_cmp(&rcheck->src_addr, &ice->rcand[i].addr)==0)
+ break;
+ }
+
+ /* 7.2.1.3. Learning Peer Reflexive Candidates
+ * If the source transport address of the request does not match any
+ * existing remote candidates, it represents a new peer reflexive remote
+ * candidate.
+ */
+ if (i == ice->rcand_cnt) {
+ if (ice->rcand_cnt >= PJ_ICE_MAX_CAND) {
+ LOG4((ice->obj_name,
+ "Unable to add new peer reflexive candidate: too many "
+ "candidates already (%d)", PJ_ICE_MAX_CAND));
+ return;
+ }
+
+ rcand = &ice->rcand[ice->rcand_cnt++];
+ rcand->comp_id = (pj_uint8_t)rcheck->comp_id;
+ rcand->type = PJ_ICE_CAND_TYPE_PRFLX;
+ rcand->prio = rcheck->priority;
+ pj_memcpy(&rcand->addr, &rcheck->src_addr, rcheck->src_addr_len);
+
+ /* Foundation is random, unique from other foundation */
+ rcand->foundation.ptr = (char*) pj_pool_alloc(ice->pool, 36);
+ rcand->foundation.slen = pj_ansi_snprintf(rcand->foundation.ptr, 36,
+ "f%p",
+ rcand->foundation.ptr);
+
+ LOG4((ice->obj_name,
+ "Added new remote candidate from the request: %s:%d",
+ pj_inet_ntoa(rcand->addr.ipv4.sin_addr),
+ (int)pj_ntohs(rcand->addr.ipv4.sin_port)));
+
+ } else {
+ /* Remote candidate found */
+ rcand = &ice->rcand[i];
+ }
+
+#if 0
+ /* Find again the local candidate by matching the base address
+ * with the local candidates in the checklist. Checks may have
+ * been pruned before, so it's possible that if we use the lcand
+ * as it is, we wouldn't be able to find the check in the checklist
+ * and we will end up creating a new check unnecessarily.
+ */
+ for (i=0; i<ice->clist.count; ++i) {
+ pj_ice_sess_check *c = &ice->clist.checks[i];
+ if (/*c->lcand == lcand ||*/
+ sockaddr_cmp(&c->lcand->base_addr, &lcand->base_addr)==0)
+ {
+ lcand = c->lcand;
+ break;
+ }
+ }
+#else
+ /* Just get candidate with the highest priority and same transport ID
+ * for the specified component ID in the checklist.
+ */
+ for (i=0; i<ice->clist.count; ++i) {
+ pj_ice_sess_check *c = &ice->clist.checks[i];
+ if (c->lcand->comp_id == rcheck->comp_id &&
+ c->lcand->transport_id == rcheck->transport_id)
+ {
+ lcand = c->lcand;
+ break;
+ }
+ }
+ if (lcand == NULL) {
+ /* Should not happen, but just in case remote is sending a
+ * Binding request for a component which it doesn't have.
+ */
+ LOG4((ice->obj_name,
+ "Received Binding request but no local candidate is found!"));
+ return;
+ }
+#endif
+
+ /*
+ * Create candidate pair for this request.
+ */
+
+ /*
+ * 7.2.1.4. Triggered Checks
+ *
+ * Now that we have local and remote candidate, check if we already
+ * have this pair in our checklist.
+ */
+ for (i=0; i<ice->clist.count; ++i) {
+ pj_ice_sess_check *c = &ice->clist.checks[i];
+ if (c->lcand == lcand && c->rcand == rcand)
+ break;
+ }
+
+ /* If the pair is already on the check list:
+ * - If the state of that pair is Waiting or Frozen, its state is
+ * changed to In-Progress and a check for that pair is performed
+ * immediately. This is called a triggered check.
+ *
+ * - If the state of that pair is In-Progress, the agent SHOULD
+ * generate an immediate retransmit of the Binding Request for the
+ * check in progress. This is to facilitate rapid completion of
+ * ICE when both agents are behind NAT.
+ *
+ * - If the state of that pair is Failed or Succeeded, no triggered
+ * check is sent.
+ */
+ if (i != ice->clist.count) {
+ pj_ice_sess_check *c = &ice->clist.checks[i];
+
+ /* If USE-CANDIDATE is present, set nominated flag
+ * Note: DO NOT overwrite nominated flag if one is already set.
+ */
+ c->nominated = ((rcheck->use_candidate) || c->nominated);
+
+ if (c->state == PJ_ICE_SESS_CHECK_STATE_FROZEN ||
+ c->state == PJ_ICE_SESS_CHECK_STATE_WAITING)
+ {
+ /* See if we shall nominate this check */
+ pj_bool_t nominate = (c->nominated || ice->is_nominating);
+
+ LOG5((ice->obj_name, "Performing triggered check for check %d",i));
+ pj_log_push_indent();
+ perform_check(ice, &ice->clist, i, nominate);
+ pj_log_pop_indent();
+
+ } else if (c->state == PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS) {
+ /* Should retransmit immediately
+ */
+ LOG5((ice->obj_name, "Triggered check for check %d not performed "
+ "because it's in progress. Retransmitting", i));
+ pj_log_push_indent();
+ pj_stun_session_retransmit_req(comp->stun_sess, c->tdata);
+ pj_log_pop_indent();
+
+ } else if (c->state == PJ_ICE_SESS_CHECK_STATE_SUCCEEDED) {
+ /* Check complete for this component.
+ * Note this may end ICE process.
+ */
+ pj_bool_t complete;
+ unsigned j;
+
+ /* If this check is nominated, scan the valid_list for the
+ * same check and update the nominated flag. A controlled
+ * agent might have finished the check earlier.
+ */
+ if (rcheck->use_candidate) {
+ for (j=0; j<ice->valid_list.count; ++j) {
+ pj_ice_sess_check *vc = &ice->valid_list.checks[j];
+ if (vc->lcand->transport_id == c->lcand->transport_id &&
+ vc->rcand == c->rcand)
+ {
+ /* Set nominated flag */
+ vc->nominated = PJ_TRUE;
+
+ /* Update valid check and nominated check for the component */
+ update_comp_check(ice, vc->lcand->comp_id, vc);
+
+ LOG5((ice->obj_name, "Valid check %s is nominated",
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
+ &ice->valid_list, vc)));
+ }
+ }
+ }
+
+ LOG5((ice->obj_name, "Triggered check for check %d not performed "
+ "because it's completed", i));
+ pj_log_push_indent();
+ complete = on_check_complete(ice, c);
+ pj_log_pop_indent();
+ if (complete) {
+ return;
+ }
+ }
+
+ }
+ /* If the pair is not already on the check list:
+ * - The pair is inserted into the check list based on its priority.
+ * - Its state is set to In-Progress
+ * - A triggered check for that pair is performed immediately.
+ */
+ /* Note: only do this if we don't have too many checks in checklist */
+ else if (ice->clist.count < PJ_ICE_MAX_CHECKS) {
+
+ pj_ice_sess_check *c = &ice->clist.checks[ice->clist.count];
+ pj_bool_t nominate;
+
+ c->lcand = lcand;
+ c->rcand = rcand;
+ c->prio = CALC_CHECK_PRIO(ice, lcand, rcand);
+ c->state = PJ_ICE_SESS_CHECK_STATE_WAITING;
+ c->nominated = rcheck->use_candidate;
+ c->err_code = PJ_SUCCESS;
+
+ nominate = (c->nominated || ice->is_nominating);
+
+ LOG4((ice->obj_name, "New triggered check added: %d",
+ ice->clist.count));
+ pj_log_push_indent();
+ perform_check(ice, &ice->clist, ice->clist.count++, nominate);
+ pj_log_pop_indent();
+
+ } else {
+ LOG4((ice->obj_name, "Error: unable to perform triggered check: "
+ "TOO MANY CHECKS IN CHECKLIST!"));
+ }
+}
+
+
+static pj_status_t on_stun_rx_indication(pj_stun_session *sess,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_stun_msg *msg,
+ void *token,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ struct stun_data *sd;
+
+ PJ_UNUSED_ARG(sess);
+ PJ_UNUSED_ARG(pkt);
+ PJ_UNUSED_ARG(pkt_len);
+ PJ_UNUSED_ARG(msg);
+ PJ_UNUSED_ARG(token);
+ PJ_UNUSED_ARG(src_addr);
+ PJ_UNUSED_ARG(src_addr_len);
+
+ sd = (struct stun_data*) pj_stun_session_get_user_data(sess);
+
+ pj_log_push_indent();
+
+ if (msg->hdr.type == PJ_STUN_BINDING_INDICATION) {
+ LOG5((sd->ice->obj_name, "Received Binding Indication keep-alive "
+ "for component %d", sd->comp_id));
+ } else {
+ LOG4((sd->ice->obj_name, "Received unexpected %s indication "
+ "for component %d", pj_stun_get_method_name(msg->hdr.type),
+ sd->comp_id));
+ }
+
+ pj_log_pop_indent();
+
+ return PJ_SUCCESS;
+}
+
+
+PJ_DEF(pj_status_t) pj_ice_sess_send_data(pj_ice_sess *ice,
+ unsigned comp_id,
+ const void *data,
+ pj_size_t data_len)
+{
+ pj_status_t status = PJ_SUCCESS;
+ pj_ice_sess_comp *comp;
+ pj_ice_sess_cand *cand;
+ pj_uint8_t transport_id;
+ pj_sockaddr addr;
+
+ PJ_ASSERT_RETURN(ice && comp_id, PJ_EINVAL);
+
+ /* It is possible that comp_cnt is less than comp_id, when remote
+ * doesn't support all the components that we have.
+ */
+ if (comp_id > ice->comp_cnt) {
+ return PJNATH_EICEINCOMPID;
+ }
+
+ pj_mutex_lock(ice->mutex);
+
+ comp = find_comp(ice, comp_id);
+ if (comp == NULL) {
+ status = PJNATH_EICEINCOMPID;
+ pj_mutex_unlock(ice->mutex);
+ goto on_return;
+ }
+
+ if (comp->valid_check == NULL) {
+ status = PJNATH_EICEINPROGRESS;
+ pj_mutex_unlock(ice->mutex);
+ goto on_return;
+ }
+
+ cand = comp->valid_check->lcand;
+ transport_id = cand->transport_id;
+ pj_sockaddr_cp(&addr, &comp->valid_check->rcand->addr);
+
+ /* Release the mutex now to avoid deadlock (see ticket #1451). */
+ pj_mutex_unlock(ice->mutex);
+
+ status = (*ice->cb.on_tx_pkt)(ice, comp_id, transport_id,
+ data, data_len,
+ &addr,
+ sizeof(pj_sockaddr_in));
+
+on_return:
+ return status;
+}
+
+
+PJ_DEF(pj_status_t) pj_ice_sess_on_rx_pkt(pj_ice_sess *ice,
+ unsigned comp_id,
+ unsigned transport_id,
+ void *pkt,
+ pj_size_t pkt_size,
+ const pj_sockaddr_t *src_addr,
+ int src_addr_len)
+{
+ pj_status_t status = PJ_SUCCESS;
+ pj_ice_sess_comp *comp;
+ pj_ice_msg_data *msg_data = NULL;
+ unsigned i;
+
+ PJ_ASSERT_RETURN(ice, PJ_EINVAL);
+
+ pj_mutex_lock(ice->mutex);
+
+ comp = find_comp(ice, comp_id);
+ if (comp == NULL) {
+ pj_mutex_unlock(ice->mutex);
+ return PJNATH_EICEINCOMPID;
+ }
+
+ /* Find transport */
+ for (i=0; i<PJ_ARRAY_SIZE(ice->tp_data); ++i) {
+ if (ice->tp_data[i].transport_id == transport_id) {
+ msg_data = &ice->tp_data[i];
+ break;
+ }
+ }
+ if (msg_data == NULL) {
+ pj_assert(!"Invalid transport ID");
+ pj_mutex_unlock(ice->mutex);
+ return PJ_EINVAL;
+ }
+
+ /* Don't check fingerprint. We only need to distinguish STUN and non-STUN
+ * packets. We don't need to verify the STUN packet too rigorously, that
+ * will be done by the user.
+ */
+ status = pj_stun_msg_check((const pj_uint8_t*)pkt, pkt_size,
+ PJ_STUN_IS_DATAGRAM |
+ PJ_STUN_NO_FINGERPRINT_CHECK);
+ if (status == PJ_SUCCESS) {
+ status = pj_stun_session_on_rx_pkt(comp->stun_sess, pkt, pkt_size,
+ PJ_STUN_IS_DATAGRAM, msg_data,
+ NULL, src_addr, src_addr_len);
+ if (status != PJ_SUCCESS) {
+ pj_strerror(status, ice->tmp.errmsg, sizeof(ice->tmp.errmsg));
+ LOG4((ice->obj_name, "Error processing incoming message: %s",
+ ice->tmp.errmsg));
+ }
+ pj_mutex_unlock(ice->mutex);
+ } else {
+ /* Not a STUN packet. Call application's callback instead, but release
+ * the mutex now or otherwise we may get deadlock.
+ */
+ pj_mutex_unlock(ice->mutex);
+
+ (*ice->cb.on_rx_data)(ice, comp_id, transport_id, pkt, pkt_size,
+ src_addr, src_addr_len);
+ status = PJ_SUCCESS;
+ }
+
+ return status;
+}
+
+
diff --git a/pjnath/src/pjnath/ice_strans.c b/pjnath/src/pjnath/ice_strans.c
new file mode 100644
index 0000000..8ae2a90
--- /dev/null
+++ b/pjnath/src/pjnath/ice_strans.c
@@ -0,0 +1,1757 @@
+/* $Id: ice_strans.c 4133 2012-05-21 14:00:17Z bennylp $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/ice_strans.h>
+#include <pjnath/errno.h>
+#include <pj/addr_resolv.h>
+#include <pj/array.h>
+#include <pj/assert.h>
+#include <pj/ip_helper.h>
+#include <pj/lock.h>
+#include <pj/log.h>
+#include <pj/os.h>
+#include <pj/pool.h>
+#include <pj/rand.h>
+#include <pj/string.h>
+#include <pj/compat/socket.h>
+
+
+#if 0
+# define TRACE_PKT(expr) PJ_LOG(5,expr)
+#else
+# define TRACE_PKT(expr)
+#endif
+
+
+/* Transport IDs */
+enum tp_type
+{
+ TP_NONE,
+ TP_STUN,
+ TP_TURN
+};
+
+/* Candidate's local preference values. This is mostly used to
+ * specify preference among candidates with the same type. Since
+ * we don't have the facility to specify that, we'll just set it
+ * all to the same value.
+ */
+#if PJNATH_ICE_PRIO_STD
+# define SRFLX_PREF 65535
+# define HOST_PREF 65535
+# define RELAY_PREF 65535
+#else
+# define SRFLX_PREF 0
+# define HOST_PREF 0
+# define RELAY_PREF 0
+#endif
+
+
+/* The candidate type preference when STUN candidate is used */
+static pj_uint8_t srflx_pref_table[4] =
+{
+#if PJNATH_ICE_PRIO_STD
+ 100, /**< PJ_ICE_HOST_PREF */
+ 110, /**< PJ_ICE_SRFLX_PREF */
+ 126, /**< PJ_ICE_PRFLX_PREF */
+ 0 /**< PJ_ICE_RELAYED_PREF */
+#else
+ /* Keep it to 2 bits */
+ 1, /**< PJ_ICE_HOST_PREF */
+ 2, /**< PJ_ICE_SRFLX_PREF */
+ 3, /**< PJ_ICE_PRFLX_PREF */
+ 0 /**< PJ_ICE_RELAYED_PREF */
+#endif
+};
+
+
+/* ICE callbacks */
+static void on_ice_complete(pj_ice_sess *ice, pj_status_t status);
+static pj_status_t ice_tx_pkt(pj_ice_sess *ice,
+ unsigned comp_id,
+ unsigned transport_id,
+ const void *pkt, pj_size_t size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned dst_addr_len);
+static void ice_rx_data(pj_ice_sess *ice,
+ unsigned comp_id,
+ unsigned transport_id,
+ void *pkt, pj_size_t size,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len);
+
+
+/* STUN socket callbacks */
+/* Notification when incoming packet has been received. */
+static pj_bool_t stun_on_rx_data(pj_stun_sock *stun_sock,
+ void *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *src_addr,
+ unsigned addr_len);
+/* Notifification when asynchronous send operation has completed. */
+static pj_bool_t stun_on_data_sent(pj_stun_sock *stun_sock,
+ pj_ioqueue_op_key_t *send_key,
+ pj_ssize_t sent);
+/* Notification when the status of the STUN transport has changed. */
+static pj_bool_t stun_on_status(pj_stun_sock *stun_sock,
+ pj_stun_sock_op op,
+ pj_status_t status);
+
+
+/* TURN callbacks */
+static void turn_on_rx_data(pj_turn_sock *turn_sock,
+ void *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *peer_addr,
+ unsigned addr_len);
+static void turn_on_state(pj_turn_sock *turn_sock, pj_turn_state_t old_state,
+ pj_turn_state_t new_state);
+
+
+
+/* Forward decls */
+static void destroy_ice_st(pj_ice_strans *ice_st);
+#define ice_st_perror(ice_st,msg,rc) pjnath_perror(ice_st->obj_name,msg,rc)
+static void sess_init_update(pj_ice_strans *ice_st);
+
+static void sess_add_ref(pj_ice_strans *ice_st);
+static pj_bool_t sess_dec_ref(pj_ice_strans *ice_st);
+
+/**
+ * This structure describes an ICE stream transport component. A component
+ * in ICE stream transport typically corresponds to a single socket created
+ * for this component, and bound to a specific transport address. This
+ * component may have multiple alias addresses, for example one alias
+ * address for each interfaces in multi-homed host, another for server
+ * reflexive alias, and another for relayed alias. For each transport
+ * address alias, an ICE stream transport candidate (#pj_ice_sess_cand) will
+ * be created, and these candidates will eventually registered to the ICE
+ * session.
+ */
+typedef struct pj_ice_strans_comp
+{
+ pj_ice_strans *ice_st; /**< ICE stream transport. */
+ unsigned comp_id; /**< Component ID. */
+
+ pj_stun_sock *stun_sock; /**< STUN transport. */
+ pj_turn_sock *turn_sock; /**< TURN relay transport. */
+ pj_bool_t turn_log_off; /**< TURN loggin off? */
+ unsigned turn_err_cnt; /**< TURN disconnected count. */
+
+ unsigned cand_cnt; /**< # of candidates/aliaes. */
+ pj_ice_sess_cand cand_list[PJ_ICE_ST_MAX_CAND]; /**< Cand array */
+
+ unsigned default_cand; /**< Default candidate. */
+
+} pj_ice_strans_comp;
+
+
+/**
+ * This structure represents the ICE stream transport.
+ */
+struct pj_ice_strans
+{
+ char *obj_name; /**< Log ID. */
+ pj_pool_t *pool; /**< Pool used by this object. */
+ void *user_data; /**< Application data. */
+ pj_ice_strans_cfg cfg; /**< Configuration. */
+ pj_ice_strans_cb cb; /**< Application callback. */
+ pj_lock_t *init_lock; /**< Initialization mutex. */
+
+ pj_ice_strans_state state; /**< Session state. */
+ pj_ice_sess *ice; /**< ICE session. */
+ pj_time_val start_time;/**< Time when ICE was started */
+
+ unsigned comp_cnt; /**< Number of components. */
+ pj_ice_strans_comp **comp; /**< Components array. */
+
+ pj_timer_entry ka_timer; /**< STUN keep-alive timer. */
+
+ pj_atomic_t *busy_cnt; /**< To prevent destroy */
+ pj_bool_t destroy_req;/**< Destroy has been called? */
+ pj_bool_t cb_called; /**< Init error callback called?*/
+};
+
+
+/* Validate configuration */
+static pj_status_t pj_ice_strans_cfg_check_valid(const pj_ice_strans_cfg *cfg)
+{
+ pj_status_t status;
+
+ status = pj_stun_config_check_valid(&cfg->stun_cfg);
+ if (!status)
+ return status;
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Initialize ICE transport configuration with default values.
+ */
+PJ_DEF(void) pj_ice_strans_cfg_default(pj_ice_strans_cfg *cfg)
+{
+ pj_bzero(cfg, sizeof(*cfg));
+
+ pj_stun_config_init(&cfg->stun_cfg, NULL, 0, NULL, NULL);
+ pj_stun_sock_cfg_default(&cfg->stun.cfg);
+ pj_turn_alloc_param_default(&cfg->turn.alloc_param);
+ pj_turn_sock_cfg_default(&cfg->turn.cfg);
+
+ pj_ice_sess_options_default(&cfg->opt);
+
+ cfg->af = pj_AF_INET();
+ cfg->stun.port = PJ_STUN_PORT;
+ cfg->turn.conn_type = PJ_TURN_TP_UDP;
+
+ cfg->stun.max_host_cands = 64;
+ cfg->stun.ignore_stun_error = PJ_FALSE;
+}
+
+
+/*
+ * Copy configuration.
+ */
+PJ_DEF(void) pj_ice_strans_cfg_copy( pj_pool_t *pool,
+ pj_ice_strans_cfg *dst,
+ const pj_ice_strans_cfg *src)
+{
+ pj_memcpy(dst, src, sizeof(*src));
+
+ if (src->stun.server.slen)
+ pj_strdup(pool, &dst->stun.server, &src->stun.server);
+ if (src->turn.server.slen)
+ pj_strdup(pool, &dst->turn.server, &src->turn.server);
+ pj_stun_auth_cred_dup(pool, &dst->turn.auth_cred,
+ &src->turn.auth_cred);
+}
+
+
+/*
+ * Add or update TURN candidate.
+ */
+static pj_status_t add_update_turn(pj_ice_strans *ice_st,
+ pj_ice_strans_comp *comp)
+{
+ pj_turn_sock_cb turn_sock_cb;
+ pj_ice_sess_cand *cand = NULL;
+ unsigned i;
+ pj_status_t status;
+
+ /* Find relayed candidate in the component */
+ for (i=0; i<comp->cand_cnt; ++i) {
+ if (comp->cand_list[i].type == PJ_ICE_CAND_TYPE_RELAYED) {
+ cand = &comp->cand_list[i];
+ break;
+ }
+ }
+
+ /* If candidate is found, invalidate it first */
+ if (cand) {
+ cand->status = PJ_EPENDING;
+
+ /* Also if this component's default candidate is set to relay,
+ * move it temporarily to something else.
+ */
+ if ((int)comp->default_cand == cand - comp->cand_list) {
+ /* Init to something */
+ comp->default_cand = 0;
+ /* Use srflx candidate as the default, if any */
+ for (i=0; i<comp->cand_cnt; ++i) {
+ if (comp->cand_list[i].type == PJ_ICE_CAND_TYPE_SRFLX) {
+ comp->default_cand = i;
+ break;
+ }
+ }
+ }
+ }
+
+ /* Init TURN socket */
+ pj_bzero(&turn_sock_cb, sizeof(turn_sock_cb));
+ turn_sock_cb.on_rx_data = &turn_on_rx_data;
+ turn_sock_cb.on_state = &turn_on_state;
+
+ /* Override with component specific QoS settings, if any */
+ if (ice_st->cfg.comp[comp->comp_id-1].qos_type) {
+ ice_st->cfg.turn.cfg.qos_type =
+ ice_st->cfg.comp[comp->comp_id-1].qos_type;
+ }
+ if (ice_st->cfg.comp[comp->comp_id-1].qos_params.flags) {
+ pj_memcpy(&ice_st->cfg.turn.cfg.qos_params,
+ &ice_st->cfg.comp[comp->comp_id-1].qos_params,
+ sizeof(ice_st->cfg.turn.cfg.qos_params));
+ }
+
+ /* Create the TURN transport */
+ status = pj_turn_sock_create(&ice_st->cfg.stun_cfg, ice_st->cfg.af,
+ ice_st->cfg.turn.conn_type,
+ &turn_sock_cb, &ice_st->cfg.turn.cfg,
+ comp, &comp->turn_sock);
+ if (status != PJ_SUCCESS) {
+ return status;
+ }
+
+ /* Add pending job */
+ ///sess_add_ref(ice_st);
+
+ /* Start allocation */
+ status=pj_turn_sock_alloc(comp->turn_sock,
+ &ice_st->cfg.turn.server,
+ ice_st->cfg.turn.port,
+ ice_st->cfg.resolver,
+ &ice_st->cfg.turn.auth_cred,
+ &ice_st->cfg.turn.alloc_param);
+ if (status != PJ_SUCCESS) {
+ ///sess_dec_ref(ice_st);
+ return status;
+ }
+
+ /* Add relayed candidate with pending status if there's no existing one */
+ if (cand == NULL) {
+ cand = &comp->cand_list[comp->cand_cnt++];
+ cand->type = PJ_ICE_CAND_TYPE_RELAYED;
+ cand->status = PJ_EPENDING;
+ cand->local_pref = RELAY_PREF;
+ cand->transport_id = TP_TURN;
+ cand->comp_id = (pj_uint8_t) comp->comp_id;
+ }
+
+ PJ_LOG(4,(ice_st->obj_name,
+ "Comp %d: TURN relay candidate waiting for allocation",
+ comp->comp_id));
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Create the component.
+ */
+static pj_status_t create_comp(pj_ice_strans *ice_st, unsigned comp_id)
+{
+ pj_ice_strans_comp *comp = NULL;
+ pj_status_t status;
+
+ /* Verify arguments */
+ PJ_ASSERT_RETURN(ice_st && comp_id, PJ_EINVAL);
+
+ /* Check that component ID present */
+ PJ_ASSERT_RETURN(comp_id <= ice_st->comp_cnt, PJNATH_EICEINCOMPID);
+
+ /* Create component */
+ comp = PJ_POOL_ZALLOC_T(ice_st->pool, pj_ice_strans_comp);
+ comp->ice_st = ice_st;
+ comp->comp_id = comp_id;
+
+ ice_st->comp[comp_id-1] = comp;
+
+ /* Initialize default candidate */
+ comp->default_cand = 0;
+
+ /* Create STUN transport if configured */
+ if (ice_st->cfg.stun.server.slen || ice_st->cfg.stun.max_host_cands) {
+ pj_stun_sock_cb stun_sock_cb;
+ pj_ice_sess_cand *cand;
+
+ pj_bzero(&stun_sock_cb, sizeof(stun_sock_cb));
+ stun_sock_cb.on_rx_data = &stun_on_rx_data;
+ stun_sock_cb.on_status = &stun_on_status;
+ stun_sock_cb.on_data_sent = &stun_on_data_sent;
+
+ /* Override component specific QoS settings, if any */
+ if (ice_st->cfg.comp[comp_id-1].qos_type) {
+ ice_st->cfg.stun.cfg.qos_type =
+ ice_st->cfg.comp[comp_id-1].qos_type;
+ }
+ if (ice_st->cfg.comp[comp_id-1].qos_params.flags) {
+ pj_memcpy(&ice_st->cfg.stun.cfg.qos_params,
+ &ice_st->cfg.comp[comp_id-1].qos_params,
+ sizeof(ice_st->cfg.stun.cfg.qos_params));
+ }
+
+ /* Create the STUN transport */
+ status = pj_stun_sock_create(&ice_st->cfg.stun_cfg, NULL,
+ ice_st->cfg.af, &stun_sock_cb,
+ &ice_st->cfg.stun.cfg,
+ comp, &comp->stun_sock);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ /* Start STUN Binding resolution and add srflx candidate
+ * only if server is set
+ */
+ if (ice_st->cfg.stun.server.slen) {
+ pj_stun_sock_info stun_sock_info;
+
+ /* Add pending job */
+ ///sess_add_ref(ice_st);
+
+ PJ_LOG(4,(ice_st->obj_name,
+ "Comp %d: srflx candidate starts Binding discovery",
+ comp_id));
+
+ pj_log_push_indent();
+
+ /* Start Binding resolution */
+ status = pj_stun_sock_start(comp->stun_sock,
+ &ice_st->cfg.stun.server,
+ ice_st->cfg.stun.port,
+ ice_st->cfg.resolver);
+ if (status != PJ_SUCCESS) {
+ ///sess_dec_ref(ice_st);
+ pj_log_pop_indent();
+ return status;
+ }
+
+ /* Enumerate addresses */
+ status = pj_stun_sock_get_info(comp->stun_sock, &stun_sock_info);
+ if (status != PJ_SUCCESS) {
+ ///sess_dec_ref(ice_st);
+ pj_log_pop_indent();
+ return status;
+ }
+
+ /* Add srflx candidate with pending status. */
+ cand = &comp->cand_list[comp->cand_cnt++];
+ cand->type = PJ_ICE_CAND_TYPE_SRFLX;
+ cand->status = PJ_EPENDING;
+ cand->local_pref = SRFLX_PREF;
+ cand->transport_id = TP_STUN;
+ cand->comp_id = (pj_uint8_t) comp_id;
+ pj_sockaddr_cp(&cand->base_addr, &stun_sock_info.aliases[0]);
+ pj_sockaddr_cp(&cand->rel_addr, &cand->base_addr);
+ pj_ice_calc_foundation(ice_st->pool, &cand->foundation,
+ cand->type, &cand->base_addr);
+
+ /* Set default candidate to srflx */
+ comp->default_cand = cand - comp->cand_list;
+
+ pj_log_pop_indent();
+ }
+
+ /* Add local addresses to host candidates, unless max_host_cands
+ * is set to zero.
+ */
+ if (ice_st->cfg.stun.max_host_cands) {
+ pj_stun_sock_info stun_sock_info;
+ unsigned i;
+
+ /* Enumerate addresses */
+ status = pj_stun_sock_get_info(comp->stun_sock, &stun_sock_info);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ for (i=0; i<stun_sock_info.alias_cnt &&
+ i<ice_st->cfg.stun.max_host_cands; ++i)
+ {
+ char addrinfo[PJ_INET6_ADDRSTRLEN+10];
+ const pj_sockaddr *addr = &stun_sock_info.aliases[i];
+
+ /* Leave one candidate for relay */
+ if (comp->cand_cnt >= PJ_ICE_ST_MAX_CAND-1) {
+ PJ_LOG(4,(ice_st->obj_name, "Too many host candidates"));
+ break;
+ }
+
+ /* Ignore loopback addresses unless cfg->stun.loop_addr
+ * is set
+ */
+ if ((pj_ntohl(addr->ipv4.sin_addr.s_addr)>>24)==127) {
+ if (ice_st->cfg.stun.loop_addr==PJ_FALSE)
+ continue;
+ }
+
+ cand = &comp->cand_list[comp->cand_cnt++];
+
+ cand->type = PJ_ICE_CAND_TYPE_HOST;
+ cand->status = PJ_SUCCESS;
+ cand->local_pref = HOST_PREF;
+ cand->transport_id = TP_STUN;
+ cand->comp_id = (pj_uint8_t) comp_id;
+ pj_sockaddr_cp(&cand->addr, addr);
+ pj_sockaddr_cp(&cand->base_addr, addr);
+ pj_bzero(&cand->rel_addr, sizeof(cand->rel_addr));
+ pj_ice_calc_foundation(ice_st->pool, &cand->foundation,
+ cand->type, &cand->base_addr);
+
+ PJ_LOG(4,(ice_st->obj_name,
+ "Comp %d: host candidate %s added",
+ comp_id, pj_sockaddr_print(&cand->addr, addrinfo,
+ sizeof(addrinfo), 3)));
+ }
+ }
+ }
+
+ /* Create TURN relay if configured. */
+ if (ice_st->cfg.turn.server.slen) {
+ add_update_turn(ice_st, comp);
+ }
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Create ICE stream transport
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_create( const char *name,
+ const pj_ice_strans_cfg *cfg,
+ unsigned comp_cnt,
+ void *user_data,
+ const pj_ice_strans_cb *cb,
+ pj_ice_strans **p_ice_st)
+{
+ pj_pool_t *pool;
+ pj_ice_strans *ice_st;
+ unsigned i;
+ pj_status_t status;
+
+ status = pj_ice_strans_cfg_check_valid(cfg);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ PJ_ASSERT_RETURN(comp_cnt && cb && p_ice_st &&
+ comp_cnt <= PJ_ICE_MAX_COMP , PJ_EINVAL);
+
+ if (name == NULL)
+ name = "ice%p";
+
+ pool = pj_pool_create(cfg->stun_cfg.pf, name, PJNATH_POOL_LEN_ICE_STRANS,
+ PJNATH_POOL_INC_ICE_STRANS, NULL);
+ ice_st = PJ_POOL_ZALLOC_T(pool, pj_ice_strans);
+ ice_st->pool = pool;
+ ice_st->obj_name = pool->obj_name;
+ ice_st->user_data = user_data;
+
+ PJ_LOG(4,(ice_st->obj_name,
+ "Creating ICE stream transport with %d component(s)",
+ comp_cnt));
+ pj_log_push_indent();
+
+ pj_ice_strans_cfg_copy(pool, &ice_st->cfg, cfg);
+ pj_memcpy(&ice_st->cb, cb, sizeof(*cb));
+
+ status = pj_atomic_create(pool, 0, &ice_st->busy_cnt);
+ if (status != PJ_SUCCESS) {
+ destroy_ice_st(ice_st);
+ return status;
+ }
+
+ status = pj_lock_create_recursive_mutex(pool, ice_st->obj_name,
+ &ice_st->init_lock);
+ if (status != PJ_SUCCESS) {
+ destroy_ice_st(ice_st);
+ pj_log_pop_indent();
+ return status;
+ }
+
+ ice_st->comp_cnt = comp_cnt;
+ ice_st->comp = (pj_ice_strans_comp**)
+ pj_pool_calloc(pool, comp_cnt, sizeof(pj_ice_strans_comp*));
+
+ /* Move state to candidate gathering */
+ ice_st->state = PJ_ICE_STRANS_STATE_INIT;
+
+ /* Acquire initialization mutex to prevent callback to be
+ * called before we finish initialization.
+ */
+ pj_lock_acquire(ice_st->init_lock);
+
+ for (i=0; i<comp_cnt; ++i) {
+ status = create_comp(ice_st, i+1);
+ if (status != PJ_SUCCESS) {
+ pj_lock_release(ice_st->init_lock);
+ destroy_ice_st(ice_st);
+ pj_log_pop_indent();
+ return status;
+ }
+ }
+
+ /* Done with initialization */
+ pj_lock_release(ice_st->init_lock);
+
+ PJ_LOG(4,(ice_st->obj_name, "ICE stream transport created"));
+
+ *p_ice_st = ice_st;
+
+ /* Check if all candidates are ready (this may call callback) */
+ sess_init_update(ice_st);
+
+ pj_log_pop_indent();
+
+ return PJ_SUCCESS;
+}
+
+/* Destroy ICE */
+static void destroy_ice_st(pj_ice_strans *ice_st)
+{
+ unsigned i;
+
+ PJ_LOG(5,(ice_st->obj_name, "ICE stream transport destroying.."));
+ pj_log_push_indent();
+
+ /* Destroy ICE if we have ICE */
+ if (ice_st->ice) {
+ pj_ice_sess_destroy(ice_st->ice);
+ ice_st->ice = NULL;
+ }
+
+ /* Destroy all components */
+ for (i=0; i<ice_st->comp_cnt; ++i) {
+ if (ice_st->comp[i]) {
+ if (ice_st->comp[i]->stun_sock) {
+ pj_stun_sock_set_user_data(ice_st->comp[i]->stun_sock, NULL);
+ pj_stun_sock_destroy(ice_st->comp[i]->stun_sock);
+ ice_st->comp[i]->stun_sock = NULL;
+ }
+ if (ice_st->comp[i]->turn_sock) {
+ pj_turn_sock_set_user_data(ice_st->comp[i]->turn_sock, NULL);
+ pj_turn_sock_destroy(ice_st->comp[i]->turn_sock);
+ ice_st->comp[i]->turn_sock = NULL;
+ }
+ }
+ }
+ ice_st->comp_cnt = 0;
+
+ /* Destroy mutex */
+ if (ice_st->init_lock) {
+ pj_lock_acquire(ice_st->init_lock);
+ pj_lock_release(ice_st->init_lock);
+ pj_lock_destroy(ice_st->init_lock);
+ ice_st->init_lock = NULL;
+ }
+
+ /* Destroy reference counter */
+ if (ice_st->busy_cnt) {
+ pj_assert(pj_atomic_get(ice_st->busy_cnt)==0);
+ pj_atomic_destroy(ice_st->busy_cnt);
+ ice_st->busy_cnt = NULL;
+ }
+
+ PJ_LOG(4,(ice_st->obj_name, "ICE stream transport destroyed"));
+
+ /* Done */
+ pj_pool_release(ice_st->pool);
+ pj_log_pop_indent();
+}
+
+/* Get ICE session state. */
+PJ_DEF(pj_ice_strans_state) pj_ice_strans_get_state(pj_ice_strans *ice_st)
+{
+ return ice_st->state;
+}
+
+/* State string */
+PJ_DEF(const char*) pj_ice_strans_state_name(pj_ice_strans_state state)
+{
+ const char *names[] = {
+ "Null",
+ "Candidate Gathering",
+ "Candidate Gathering Complete",
+ "Session Initialized",
+ "Negotiation In Progress",
+ "Negotiation Success",
+ "Negotiation Failed"
+ };
+
+ PJ_ASSERT_RETURN(state <= PJ_ICE_STRANS_STATE_FAILED, "???");
+ return names[state];
+}
+
+/* Notification about failure */
+static void sess_fail(pj_ice_strans *ice_st, pj_ice_strans_op op,
+ const char *title, pj_status_t status)
+{
+ char errmsg[PJ_ERR_MSG_SIZE];
+
+ pj_strerror(status, errmsg, sizeof(errmsg));
+ PJ_LOG(4,(ice_st->obj_name, "%s: %s", title, errmsg));
+ pj_log_push_indent();
+
+ if (op==PJ_ICE_STRANS_OP_INIT && ice_st->cb_called) {
+ pj_log_pop_indent();
+ return;
+ }
+
+ ice_st->cb_called = PJ_TRUE;
+
+ if (ice_st->cb.on_ice_complete)
+ (*ice_st->cb.on_ice_complete)(ice_st, op, status);
+
+ pj_log_pop_indent();
+}
+
+/* Update initialization status */
+static void sess_init_update(pj_ice_strans *ice_st)
+{
+ unsigned i;
+
+ /* Ignore if init callback has been called */
+ if (ice_st->cb_called)
+ return;
+
+ /* Notify application when all candidates have been gathered */
+ for (i=0; i<ice_st->comp_cnt; ++i) {
+ unsigned j;
+ pj_ice_strans_comp *comp = ice_st->comp[i];
+
+ for (j=0; j<comp->cand_cnt; ++j) {
+ pj_ice_sess_cand *cand = &comp->cand_list[j];
+
+ if (cand->status == PJ_EPENDING)
+ return;
+ }
+ }
+
+ /* All candidates have been gathered */
+ ice_st->cb_called = PJ_TRUE;
+ ice_st->state = PJ_ICE_STRANS_STATE_READY;
+ if (ice_st->cb.on_ice_complete)
+ (*ice_st->cb.on_ice_complete)(ice_st, PJ_ICE_STRANS_OP_INIT,
+ PJ_SUCCESS);
+}
+
+/*
+ * Destroy ICE stream transport.
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_destroy(pj_ice_strans *ice_st)
+{
+ PJ_ASSERT_RETURN(ice_st, PJ_EINVAL);
+
+ ice_st->destroy_req = PJ_TRUE;
+ if (pj_atomic_get(ice_st->busy_cnt) > 0) {
+ PJ_LOG(5,(ice_st->obj_name,
+ "ICE strans object is busy, will destroy later"));
+ return PJ_EPENDING;
+ }
+
+ destroy_ice_st(ice_st);
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Increment busy counter.
+ */
+static void sess_add_ref(pj_ice_strans *ice_st)
+{
+ pj_atomic_inc(ice_st->busy_cnt);
+}
+
+/*
+ * Decrement busy counter. If the counter has reached zero and destroy
+ * has been requested, destroy the object and return FALSE.
+ */
+static pj_bool_t sess_dec_ref(pj_ice_strans *ice_st)
+{
+ int count = pj_atomic_dec_and_get(ice_st->busy_cnt);
+ pj_assert(count >= 0);
+ if (count==0 && ice_st->destroy_req) {
+ pj_ice_strans_destroy(ice_st);
+ return PJ_FALSE;
+ } else {
+ return PJ_TRUE;
+ }
+}
+
+/*
+ * Get user data
+ */
+PJ_DEF(void*) pj_ice_strans_get_user_data(pj_ice_strans *ice_st)
+{
+ PJ_ASSERT_RETURN(ice_st, NULL);
+ return ice_st->user_data;
+}
+
+
+/*
+ * Get the value of various options of the ICE stream transport.
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_get_options( pj_ice_strans *ice_st,
+ pj_ice_sess_options *opt)
+{
+ PJ_ASSERT_RETURN(ice_st && opt, PJ_EINVAL);
+ pj_memcpy(opt, &ice_st->cfg.opt, sizeof(*opt));
+ return PJ_SUCCESS;
+}
+
+/*
+ * Specify various options for this ICE stream transport.
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_set_options(pj_ice_strans *ice_st,
+ const pj_ice_sess_options *opt)
+{
+ PJ_ASSERT_RETURN(ice_st && opt, PJ_EINVAL);
+ pj_memcpy(&ice_st->cfg.opt, opt, sizeof(*opt));
+ if (ice_st->ice)
+ pj_ice_sess_set_options(ice_st->ice, &ice_st->cfg.opt);
+ return PJ_SUCCESS;
+}
+
+/*
+ * Create ICE!
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_init_ice(pj_ice_strans *ice_st,
+ pj_ice_sess_role role,
+ const pj_str_t *local_ufrag,
+ const pj_str_t *local_passwd)
+{
+ pj_status_t status;
+ unsigned i;
+ pj_ice_sess_cb ice_cb;
+ //const pj_uint8_t srflx_prio[4] = { 100, 126, 110, 0 };
+
+ /* Check arguments */
+ PJ_ASSERT_RETURN(ice_st, PJ_EINVAL);
+ /* Must not have ICE */
+ PJ_ASSERT_RETURN(ice_st->ice == NULL, PJ_EINVALIDOP);
+ /* Components must have been created */
+ PJ_ASSERT_RETURN(ice_st->comp[0] != NULL, PJ_EINVALIDOP);
+
+ /* Init callback */
+ pj_bzero(&ice_cb, sizeof(ice_cb));
+ ice_cb.on_ice_complete = &on_ice_complete;
+ ice_cb.on_rx_data = &ice_rx_data;
+ ice_cb.on_tx_pkt = &ice_tx_pkt;
+
+ /* Create! */
+ status = pj_ice_sess_create(&ice_st->cfg.stun_cfg, ice_st->obj_name, role,
+ ice_st->comp_cnt, &ice_cb,
+ local_ufrag, local_passwd, &ice_st->ice);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ /* Associate user data */
+ ice_st->ice->user_data = (void*)ice_st;
+
+ /* Set options */
+ pj_ice_sess_set_options(ice_st->ice, &ice_st->cfg.opt);
+
+ /* If default candidate for components are SRFLX one, upload a custom
+ * type priority to ICE session so that SRFLX candidates will get
+ * checked first.
+ */
+ if (ice_st->comp[0]->default_cand >= 0 &&
+ ice_st->comp[0]->cand_list[ice_st->comp[0]->default_cand].type
+ == PJ_ICE_CAND_TYPE_SRFLX)
+ {
+ pj_ice_sess_set_prefs(ice_st->ice, srflx_pref_table);
+ }
+
+ /* Add components/candidates */
+ for (i=0; i<ice_st->comp_cnt; ++i) {
+ unsigned j;
+ pj_ice_strans_comp *comp = ice_st->comp[i];
+
+ /* Re-enable logging for Send/Data indications */
+ if (comp->turn_sock) {
+ PJ_LOG(5,(ice_st->obj_name,
+ "Disabling STUN Indication logging for "
+ "component %d", i+1));
+ pj_turn_sock_set_log(comp->turn_sock, 0xFFFF);
+ comp->turn_log_off = PJ_FALSE;
+ }
+
+ for (j=0; j<comp->cand_cnt; ++j) {
+ pj_ice_sess_cand *cand = &comp->cand_list[j];
+ unsigned ice_cand_id;
+
+ /* Skip if candidate is not ready */
+ if (cand->status != PJ_SUCCESS) {
+ PJ_LOG(5,(ice_st->obj_name,
+ "Candidate %d of comp %d is not added (pending)",
+ j, i));
+ continue;
+ }
+
+ /* Must have address */
+ pj_assert(pj_sockaddr_has_addr(&cand->addr));
+
+ /* Add the candidate */
+ status = pj_ice_sess_add_cand(ice_st->ice, comp->comp_id,
+ cand->transport_id, cand->type,
+ cand->local_pref,
+ &cand->foundation, &cand->addr,
+ &cand->base_addr, &cand->rel_addr,
+ pj_sockaddr_get_len(&cand->addr),
+ (unsigned*)&ice_cand_id);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+ }
+ }
+
+ /* ICE session is ready for negotiation */
+ ice_st->state = PJ_ICE_STRANS_STATE_SESS_READY;
+
+ return PJ_SUCCESS;
+
+on_error:
+ pj_ice_strans_stop_ice(ice_st);
+ return status;
+}
+
+/*
+ * Check if the ICE stream transport has the ICE session created.
+ */
+PJ_DEF(pj_bool_t) pj_ice_strans_has_sess(pj_ice_strans *ice_st)
+{
+ PJ_ASSERT_RETURN(ice_st, PJ_FALSE);
+ return ice_st->ice != NULL;
+}
+
+/*
+ * Check if ICE negotiation is still running.
+ */
+PJ_DEF(pj_bool_t) pj_ice_strans_sess_is_running(pj_ice_strans *ice_st)
+{
+ return ice_st && ice_st->ice && ice_st->ice->rcand_cnt &&
+ !pj_ice_strans_sess_is_complete(ice_st);
+}
+
+
+/*
+ * Check if ICE negotiation has completed.
+ */
+PJ_DEF(pj_bool_t) pj_ice_strans_sess_is_complete(pj_ice_strans *ice_st)
+{
+ return ice_st && ice_st->ice && ice_st->ice->is_complete;
+}
+
+
+/*
+ * Get the current/running component count.
+ */
+PJ_DEF(unsigned) pj_ice_strans_get_running_comp_cnt(pj_ice_strans *ice_st)
+{
+ PJ_ASSERT_RETURN(ice_st, PJ_EINVAL);
+
+ if (ice_st->ice && ice_st->ice->rcand_cnt) {
+ return ice_st->ice->comp_cnt;
+ } else {
+ return ice_st->comp_cnt;
+ }
+}
+
+
+/*
+ * Get the ICE username fragment and password of the ICE session.
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_get_ufrag_pwd( pj_ice_strans *ice_st,
+ pj_str_t *loc_ufrag,
+ pj_str_t *loc_pwd,
+ pj_str_t *rem_ufrag,
+ pj_str_t *rem_pwd)
+{
+ PJ_ASSERT_RETURN(ice_st && ice_st->ice, PJ_EINVALIDOP);
+
+ if (loc_ufrag) *loc_ufrag = ice_st->ice->rx_ufrag;
+ if (loc_pwd) *loc_pwd = ice_st->ice->rx_pass;
+
+ if (rem_ufrag || rem_pwd) {
+ PJ_ASSERT_RETURN(ice_st->ice->rcand_cnt != 0, PJ_EINVALIDOP);
+ if (rem_ufrag) *rem_ufrag = ice_st->ice->tx_ufrag;
+ if (rem_pwd) *rem_pwd = ice_st->ice->tx_pass;
+ }
+
+ return PJ_SUCCESS;
+}
+
+/*
+ * Get number of candidates
+ */
+PJ_DEF(unsigned) pj_ice_strans_get_cands_count(pj_ice_strans *ice_st,
+ unsigned comp_id)
+{
+ unsigned i, cnt;
+
+ PJ_ASSERT_RETURN(ice_st && ice_st->ice && comp_id &&
+ comp_id <= ice_st->comp_cnt, 0);
+
+ cnt = 0;
+ for (i=0; i<ice_st->ice->lcand_cnt; ++i) {
+ if (ice_st->ice->lcand[i].comp_id != comp_id)
+ continue;
+ ++cnt;
+ }
+
+ return cnt;
+}
+
+/*
+ * Enum candidates
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_enum_cands(pj_ice_strans *ice_st,
+ unsigned comp_id,
+ unsigned *count,
+ pj_ice_sess_cand cand[])
+{
+ unsigned i, cnt;
+
+ PJ_ASSERT_RETURN(ice_st && ice_st->ice && comp_id &&
+ comp_id <= ice_st->comp_cnt && count && cand, PJ_EINVAL);
+
+ cnt = 0;
+ for (i=0; i<ice_st->ice->lcand_cnt && cnt<*count; ++i) {
+ if (ice_st->ice->lcand[i].comp_id != comp_id)
+ continue;
+ pj_memcpy(&cand[cnt], &ice_st->ice->lcand[i],
+ sizeof(pj_ice_sess_cand));
+ ++cnt;
+ }
+
+ *count = cnt;
+ return PJ_SUCCESS;
+}
+
+/*
+ * Get default candidate.
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_get_def_cand( pj_ice_strans *ice_st,
+ unsigned comp_id,
+ pj_ice_sess_cand *cand)
+{
+ const pj_ice_sess_check *valid_pair;
+
+ PJ_ASSERT_RETURN(ice_st && comp_id && comp_id <= ice_st->comp_cnt &&
+ cand, PJ_EINVAL);
+
+ valid_pair = pj_ice_strans_get_valid_pair(ice_st, comp_id);
+ if (valid_pair) {
+ pj_memcpy(cand, valid_pair->lcand, sizeof(pj_ice_sess_cand));
+ } else {
+ pj_ice_strans_comp *comp = ice_st->comp[comp_id - 1];
+ pj_assert(comp->default_cand>=0 && comp->default_cand<comp->cand_cnt);
+ pj_memcpy(cand, &comp->cand_list[comp->default_cand],
+ sizeof(pj_ice_sess_cand));
+ }
+ return PJ_SUCCESS;
+}
+
+/*
+ * Get the current ICE role.
+ */
+PJ_DEF(pj_ice_sess_role) pj_ice_strans_get_role(pj_ice_strans *ice_st)
+{
+ PJ_ASSERT_RETURN(ice_st && ice_st->ice, PJ_ICE_SESS_ROLE_UNKNOWN);
+ return ice_st->ice->role;
+}
+
+/*
+ * Change session role.
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_change_role( pj_ice_strans *ice_st,
+ pj_ice_sess_role new_role)
+{
+ PJ_ASSERT_RETURN(ice_st && ice_st->ice, PJ_EINVALIDOP);
+ return pj_ice_sess_change_role(ice_st->ice, new_role);
+}
+
+/*
+ * Start ICE processing !
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_start_ice( pj_ice_strans *ice_st,
+ const pj_str_t *rem_ufrag,
+ const pj_str_t *rem_passwd,
+ unsigned rem_cand_cnt,
+ const pj_ice_sess_cand rem_cand[])
+{
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(ice_st && rem_ufrag && rem_passwd &&
+ rem_cand_cnt && rem_cand, PJ_EINVAL);
+
+ /* Mark start time */
+ pj_gettimeofday(&ice_st->start_time);
+
+ /* Build check list */
+ status = pj_ice_sess_create_check_list(ice_st->ice, rem_ufrag, rem_passwd,
+ rem_cand_cnt, rem_cand);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ /* If we have TURN candidate, now is the time to create the permissions */
+ if (ice_st->comp[0]->turn_sock) {
+ unsigned i;
+
+ for (i=0; i<ice_st->comp_cnt; ++i) {
+ pj_ice_strans_comp *comp = ice_st->comp[i];
+ pj_sockaddr addrs[PJ_ICE_ST_MAX_CAND];
+ unsigned j, count=0;
+
+ /* Gather remote addresses for this component */
+ for (j=0; j<rem_cand_cnt && count<PJ_ARRAY_SIZE(addrs); ++j) {
+ if (rem_cand[j].comp_id==i+1) {
+ pj_memcpy(&addrs[count++], &rem_cand[j].addr,
+ pj_sockaddr_get_len(&rem_cand[j].addr));
+ }
+ }
+
+ if (count) {
+ status = pj_turn_sock_set_perm(comp->turn_sock, count,
+ addrs, 0);
+ if (status != PJ_SUCCESS) {
+ pj_ice_strans_stop_ice(ice_st);
+ return status;
+ }
+ }
+ }
+ }
+
+ /* Start ICE negotiation! */
+ status = pj_ice_sess_start_check(ice_st->ice);
+ if (status != PJ_SUCCESS) {
+ pj_ice_strans_stop_ice(ice_st);
+ return status;
+ }
+
+ ice_st->state = PJ_ICE_STRANS_STATE_NEGO;
+ return status;
+}
+
+/*
+ * Get valid pair.
+ */
+PJ_DEF(const pj_ice_sess_check*)
+pj_ice_strans_get_valid_pair(const pj_ice_strans *ice_st,
+ unsigned comp_id)
+{
+ PJ_ASSERT_RETURN(ice_st && comp_id && comp_id <= ice_st->comp_cnt,
+ NULL);
+
+ if (ice_st->ice == NULL)
+ return NULL;
+
+ return ice_st->ice->comp[comp_id-1].valid_check;
+}
+
+/*
+ * Stop ICE!
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_stop_ice(pj_ice_strans *ice_st)
+{
+ if (ice_st->ice) {
+ pj_ice_sess_destroy(ice_st->ice);
+ ice_st->ice = NULL;
+ }
+
+ ice_st->state = PJ_ICE_STRANS_STATE_INIT;
+ return PJ_SUCCESS;
+}
+
+/*
+ * Application wants to send outgoing packet.
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_sendto( pj_ice_strans *ice_st,
+ unsigned comp_id,
+ const void *data,
+ pj_size_t data_len,
+ const pj_sockaddr_t *dst_addr,
+ int dst_addr_len)
+{
+ pj_ssize_t pkt_size;
+ pj_ice_strans_comp *comp;
+ unsigned def_cand;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(ice_st && comp_id && comp_id <= ice_st->comp_cnt &&
+ dst_addr && dst_addr_len, PJ_EINVAL);
+
+ comp = ice_st->comp[comp_id-1];
+
+ /* Check that default candidate for the component exists */
+ def_cand = comp->default_cand;
+ if (def_cand >= comp->cand_cnt)
+ return PJ_EINVALIDOP;
+
+ /* If ICE is available, send data with ICE, otherwise send with the
+ * default candidate selected during initialization.
+ *
+ * https://trac.pjsip.org/repos/ticket/1416:
+ * Once ICE has failed, also send data with the default candidate.
+ */
+ if (ice_st->ice && ice_st->state < PJ_ICE_STRANS_STATE_FAILED) {
+ if (comp->turn_sock) {
+ pj_turn_sock_lock(comp->turn_sock);
+ }
+ status = pj_ice_sess_send_data(ice_st->ice, comp_id, data, data_len);
+ if (comp->turn_sock) {
+ pj_turn_sock_unlock(comp->turn_sock);
+ }
+ return status;
+
+ } else if (comp->cand_list[def_cand].status == PJ_SUCCESS) {
+
+ if (comp->cand_list[def_cand].type == PJ_ICE_CAND_TYPE_RELAYED) {
+
+ enum {
+ msg_disable_ind = 0xFFFF &
+ ~(PJ_STUN_SESS_LOG_TX_IND|
+ PJ_STUN_SESS_LOG_RX_IND)
+ };
+
+ /* https://trac.pjsip.org/repos/ticket/1316 */
+ if (comp->turn_sock == NULL) {
+ /* TURN socket error */
+ return PJ_EINVALIDOP;
+ }
+
+ if (!comp->turn_log_off) {
+ /* Disable logging for Send/Data indications */
+ PJ_LOG(5,(ice_st->obj_name,
+ "Disabling STUN Indication logging for "
+ "component %d", comp->comp_id));
+ pj_turn_sock_set_log(comp->turn_sock, msg_disable_ind);
+ comp->turn_log_off = PJ_TRUE;
+ }
+
+ status = pj_turn_sock_sendto(comp->turn_sock, (const pj_uint8_t*)data, data_len,
+ dst_addr, dst_addr_len);
+ return (status==PJ_SUCCESS||status==PJ_EPENDING) ?
+ PJ_SUCCESS : status;
+ } else {
+ pkt_size = data_len;
+ status = pj_stun_sock_sendto(comp->stun_sock, NULL, data,
+ data_len, 0, dst_addr, dst_addr_len);
+ return (status==PJ_SUCCESS||status==PJ_EPENDING) ?
+ PJ_SUCCESS : status;
+ }
+
+ } else
+ return PJ_EINVALIDOP;
+}
+
+/*
+ * Callback called by ICE session when ICE processing is complete, either
+ * successfully or with failure.
+ */
+static void on_ice_complete(pj_ice_sess *ice, pj_status_t status)
+{
+ pj_ice_strans *ice_st = (pj_ice_strans*)ice->user_data;
+ pj_time_val t;
+ unsigned msec;
+
+ sess_add_ref(ice_st);
+
+ pj_gettimeofday(&t);
+ PJ_TIME_VAL_SUB(t, ice_st->start_time);
+ msec = PJ_TIME_VAL_MSEC(t);
+
+ if (ice_st->cb.on_ice_complete) {
+ if (status != PJ_SUCCESS) {
+ char errmsg[PJ_ERR_MSG_SIZE];
+ pj_strerror(status, errmsg, sizeof(errmsg));
+ PJ_LOG(4,(ice_st->obj_name,
+ "ICE negotiation failed after %ds:%03d: %s",
+ msec/1000, msec%1000, errmsg));
+ } else {
+ unsigned i;
+ enum {
+ msg_disable_ind = 0xFFFF &
+ ~(PJ_STUN_SESS_LOG_TX_IND|
+ PJ_STUN_SESS_LOG_RX_IND)
+ };
+
+ PJ_LOG(4,(ice_st->obj_name,
+ "ICE negotiation success after %ds:%03d",
+ msec/1000, msec%1000));
+
+ for (i=0; i<ice_st->comp_cnt; ++i) {
+ const pj_ice_sess_check *check;
+
+ check = pj_ice_strans_get_valid_pair(ice_st, i+1);
+ if (check) {
+ char lip[PJ_INET6_ADDRSTRLEN+10];
+ char rip[PJ_INET6_ADDRSTRLEN+10];
+
+ pj_sockaddr_print(&check->lcand->addr, lip,
+ sizeof(lip), 3);
+ pj_sockaddr_print(&check->rcand->addr, rip,
+ sizeof(rip), 3);
+
+ if (check->lcand->transport_id == TP_TURN) {
+ /* Activate channel binding for the remote address
+ * for more efficient data transfer using TURN.
+ */
+ status = pj_turn_sock_bind_channel(
+ ice_st->comp[i]->turn_sock,
+ &check->rcand->addr,
+ sizeof(check->rcand->addr));
+
+ /* Disable logging for Send/Data indications */
+ PJ_LOG(5,(ice_st->obj_name,
+ "Disabling STUN Indication logging for "
+ "component %d", i+1));
+ pj_turn_sock_set_log(ice_st->comp[i]->turn_sock,
+ msg_disable_ind);
+ ice_st->comp[i]->turn_log_off = PJ_TRUE;
+ }
+
+ PJ_LOG(4,(ice_st->obj_name, " Comp %d: "
+ "sending from %s candidate %s to "
+ "%s candidate %s",
+ i+1,
+ pj_ice_get_cand_type_name(check->lcand->type),
+ lip,
+ pj_ice_get_cand_type_name(check->rcand->type),
+ rip));
+
+ } else {
+ PJ_LOG(4,(ice_st->obj_name,
+ "Comp %d: disabled", i+1));
+ }
+ }
+ }
+
+ ice_st->state = (status==PJ_SUCCESS) ? PJ_ICE_STRANS_STATE_RUNNING :
+ PJ_ICE_STRANS_STATE_FAILED;
+
+ pj_log_push_indent();
+ (*ice_st->cb.on_ice_complete)(ice_st, PJ_ICE_STRANS_OP_NEGOTIATION,
+ status);
+ pj_log_pop_indent();
+
+ }
+
+ sess_dec_ref(ice_st);
+}
+
+/*
+ * Callback called by ICE session when it wants to send outgoing packet.
+ */
+static pj_status_t ice_tx_pkt(pj_ice_sess *ice,
+ unsigned comp_id,
+ unsigned transport_id,
+ const void *pkt, pj_size_t size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned dst_addr_len)
+{
+ pj_ice_strans *ice_st = (pj_ice_strans*)ice->user_data;
+ pj_ice_strans_comp *comp;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(comp_id && comp_id <= ice_st->comp_cnt, PJ_EINVAL);
+
+ comp = ice_st->comp[comp_id-1];
+
+ TRACE_PKT((comp->ice_st->obj_name,
+ "Component %d TX packet to %s:%d with transport %d",
+ comp_id,
+ pj_inet_ntoa(((pj_sockaddr_in*)dst_addr)->sin_addr),
+ (int)pj_ntohs(((pj_sockaddr_in*)dst_addr)->sin_port),
+ transport_id));
+
+ if (transport_id == TP_TURN) {
+ if (comp->turn_sock) {
+ status = pj_turn_sock_sendto(comp->turn_sock,
+ (const pj_uint8_t*)pkt, size,
+ dst_addr, dst_addr_len);
+ } else {
+ status = PJ_EINVALIDOP;
+ }
+ } else if (transport_id == TP_STUN) {
+ status = pj_stun_sock_sendto(comp->stun_sock, NULL,
+ pkt, size, 0,
+ dst_addr, dst_addr_len);
+ } else {
+ pj_assert(!"Invalid transport ID");
+ status = PJ_EINVALIDOP;
+ }
+
+ return (status==PJ_SUCCESS||status==PJ_EPENDING) ? PJ_SUCCESS : status;
+}
+
+/*
+ * Callback called by ICE session when it receives application data.
+ */
+static void ice_rx_data(pj_ice_sess *ice,
+ unsigned comp_id,
+ unsigned transport_id,
+ void *pkt, pj_size_t size,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_ice_strans *ice_st = (pj_ice_strans*)ice->user_data;
+
+ PJ_UNUSED_ARG(transport_id);
+
+ if (ice_st->cb.on_rx_data) {
+ (*ice_st->cb.on_rx_data)(ice_st, comp_id, pkt, size,
+ src_addr, src_addr_len);
+ }
+}
+
+/* Notification when incoming packet has been received from
+ * the STUN socket.
+ */
+static pj_bool_t stun_on_rx_data(pj_stun_sock *stun_sock,
+ void *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *src_addr,
+ unsigned addr_len)
+{
+ pj_ice_strans_comp *comp;
+ pj_ice_strans *ice_st;
+ pj_status_t status;
+
+ comp = (pj_ice_strans_comp*) pj_stun_sock_get_user_data(stun_sock);
+ if (comp == NULL) {
+ /* We have disassociated ourselves from the STUN socket */
+ return PJ_FALSE;
+ }
+
+ ice_st = comp->ice_st;
+
+ sess_add_ref(ice_st);
+
+ if (ice_st->ice == NULL) {
+ /* The ICE session is gone, but we're still receiving packets.
+ * This could also happen if remote doesn't do ICE. So just
+ * report this to application.
+ */
+ if (ice_st->cb.on_rx_data) {
+ (*ice_st->cb.on_rx_data)(ice_st, comp->comp_id, pkt, pkt_len,
+ src_addr, addr_len);
+ }
+
+ } else {
+
+ /* Hand over the packet to ICE session */
+ status = pj_ice_sess_on_rx_pkt(comp->ice_st->ice, comp->comp_id,
+ TP_STUN, pkt, pkt_len,
+ src_addr, addr_len);
+
+ if (status != PJ_SUCCESS) {
+ ice_st_perror(comp->ice_st, "Error processing packet",
+ status);
+ }
+ }
+
+ return sess_dec_ref(ice_st);
+}
+
+/* Notifification when asynchronous send operation to the STUN socket
+ * has completed.
+ */
+static pj_bool_t stun_on_data_sent(pj_stun_sock *stun_sock,
+ pj_ioqueue_op_key_t *send_key,
+ pj_ssize_t sent)
+{
+ PJ_UNUSED_ARG(stun_sock);
+ PJ_UNUSED_ARG(send_key);
+ PJ_UNUSED_ARG(sent);
+ return PJ_TRUE;
+}
+
+/* Notification when the status of the STUN transport has changed. */
+static pj_bool_t stun_on_status(pj_stun_sock *stun_sock,
+ pj_stun_sock_op op,
+ pj_status_t status)
+{
+ pj_ice_strans_comp *comp;
+ pj_ice_strans *ice_st;
+ pj_ice_sess_cand *cand = NULL;
+ unsigned i;
+
+ pj_assert(status != PJ_EPENDING);
+
+ comp = (pj_ice_strans_comp*) pj_stun_sock_get_user_data(stun_sock);
+ ice_st = comp->ice_st;
+
+ sess_add_ref(ice_st);
+
+ /* Wait until initialization completes */
+ pj_lock_acquire(ice_st->init_lock);
+
+ /* Find the srflx cancidate */
+ for (i=0; i<comp->cand_cnt; ++i) {
+ if (comp->cand_list[i].type == PJ_ICE_CAND_TYPE_SRFLX) {
+ cand = &comp->cand_list[i];
+ break;
+ }
+ }
+
+ pj_lock_release(ice_st->init_lock);
+
+ /* It is possible that we don't have srflx candidate even though this
+ * callback is called. This could happen when we cancel adding srflx
+ * candidate due to initialization error.
+ */
+ if (cand == NULL) {
+ return sess_dec_ref(ice_st);
+ }
+
+ switch (op) {
+ case PJ_STUN_SOCK_DNS_OP:
+ if (status != PJ_SUCCESS) {
+ /* May not have cand, e.g. when error during init */
+ if (cand)
+ cand->status = status;
+ if (!ice_st->cfg.stun.ignore_stun_error) {
+ sess_fail(ice_st, PJ_ICE_STRANS_OP_INIT,
+ "DNS resolution failed", status);
+ } else {
+ PJ_LOG(4,(ice_st->obj_name,
+ "STUN error is ignored for comp %d",
+ comp->comp_id));
+ }
+ }
+ break;
+ case PJ_STUN_SOCK_BINDING_OP:
+ case PJ_STUN_SOCK_MAPPED_ADDR_CHANGE:
+ if (status == PJ_SUCCESS) {
+ pj_stun_sock_info info;
+
+ status = pj_stun_sock_get_info(stun_sock, &info);
+ if (status == PJ_SUCCESS) {
+ char ipaddr[PJ_INET6_ADDRSTRLEN+10];
+ const char *op_name = (op==PJ_STUN_SOCK_BINDING_OP) ?
+ "Binding discovery complete" :
+ "srflx address changed";
+ pj_bool_t dup = PJ_FALSE;
+
+ /* Eliminate the srflx candidate if the address is
+ * equal to other (host) candidates.
+ */
+ for (i=0; i<comp->cand_cnt; ++i) {
+ if (comp->cand_list[i].type == PJ_ICE_CAND_TYPE_HOST &&
+ pj_sockaddr_cmp(&comp->cand_list[i].addr,
+ &info.mapped_addr) == 0)
+ {
+ dup = PJ_TRUE;
+ break;
+ }
+ }
+
+ if (dup) {
+ /* Duplicate found, remove the srflx candidate */
+ unsigned idx = cand - comp->cand_list;
+
+ /* Update default candidate index */
+ if (comp->default_cand > idx) {
+ --comp->default_cand;
+ } else if (comp->default_cand == idx) {
+ comp->default_cand = !idx;
+ }
+
+ /* Remove srflx candidate */
+ pj_array_erase(comp->cand_list, sizeof(comp->cand_list[0]),
+ comp->cand_cnt, idx);
+ --comp->cand_cnt;
+ } else {
+ /* Otherwise update the address */
+ pj_sockaddr_cp(&cand->addr, &info.mapped_addr);
+ cand->status = PJ_SUCCESS;
+ }
+
+ PJ_LOG(4,(comp->ice_st->obj_name,
+ "Comp %d: %s, "
+ "srflx address is %s",
+ comp->comp_id, op_name,
+ pj_sockaddr_print(&info.mapped_addr, ipaddr,
+ sizeof(ipaddr), 3)));
+
+ sess_init_update(ice_st);
+ }
+ }
+
+ if (status != PJ_SUCCESS) {
+ /* May not have cand, e.g. when error during init */
+ if (cand)
+ cand->status = status;
+ if (!ice_st->cfg.stun.ignore_stun_error) {
+ sess_fail(ice_st, PJ_ICE_STRANS_OP_INIT,
+ "STUN binding request failed", status);
+ } else {
+ PJ_LOG(4,(ice_st->obj_name,
+ "STUN error is ignored for comp %d",
+ comp->comp_id));
+
+ if (cand) {
+ unsigned idx = cand - comp->cand_list;
+
+ /* Update default candidate index */
+ if (comp->default_cand == idx) {
+ comp->default_cand = !idx;
+ }
+ }
+
+ sess_init_update(ice_st);
+ }
+ }
+ break;
+ case PJ_STUN_SOCK_KEEP_ALIVE_OP:
+ if (status != PJ_SUCCESS) {
+ pj_assert(cand != NULL);
+ cand->status = status;
+ if (!ice_st->cfg.stun.ignore_stun_error) {
+ sess_fail(ice_st, PJ_ICE_STRANS_OP_INIT,
+ "STUN keep-alive failed", status);
+ } else {
+ PJ_LOG(4,(ice_st->obj_name, "STUN error is ignored"));
+ }
+ }
+ break;
+ }
+
+ return sess_dec_ref(ice_st);
+}
+
+/* Callback when TURN socket has received a packet */
+static void turn_on_rx_data(pj_turn_sock *turn_sock,
+ void *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *peer_addr,
+ unsigned addr_len)
+{
+ pj_ice_strans_comp *comp;
+ pj_status_t status;
+
+ comp = (pj_ice_strans_comp*) pj_turn_sock_get_user_data(turn_sock);
+ if (comp == NULL) {
+ /* We have disassociated ourselves from the TURN socket */
+ return;
+ }
+
+ sess_add_ref(comp->ice_st);
+
+ if (comp->ice_st->ice == NULL) {
+ /* The ICE session is gone, but we're still receiving packets.
+ * This could also happen if remote doesn't do ICE and application
+ * specifies TURN as the default address in SDP.
+ * So in this case just give the packet to application.
+ */
+ if (comp->ice_st->cb.on_rx_data) {
+ (*comp->ice_st->cb.on_rx_data)(comp->ice_st, comp->comp_id, pkt,
+ pkt_len, peer_addr, addr_len);
+ }
+
+ } else {
+
+ /* Hand over the packet to ICE */
+ status = pj_ice_sess_on_rx_pkt(comp->ice_st->ice, comp->comp_id,
+ TP_TURN, pkt, pkt_len,
+ peer_addr, addr_len);
+
+ if (status != PJ_SUCCESS) {
+ ice_st_perror(comp->ice_st,
+ "Error processing packet from TURN relay",
+ status);
+ }
+ }
+
+ sess_dec_ref(comp->ice_st);
+}
+
+
+/* Callback when TURN client state has changed */
+static void turn_on_state(pj_turn_sock *turn_sock, pj_turn_state_t old_state,
+ pj_turn_state_t new_state)
+{
+ pj_ice_strans_comp *comp;
+
+ comp = (pj_ice_strans_comp*) pj_turn_sock_get_user_data(turn_sock);
+ if (comp == NULL) {
+ /* Not interested in further state notification once the relay is
+ * disconnecting.
+ */
+ return;
+ }
+
+ PJ_LOG(5,(comp->ice_st->obj_name, "TURN client state changed %s --> %s",
+ pj_turn_state_name(old_state), pj_turn_state_name(new_state)));
+ pj_log_push_indent();
+
+ sess_add_ref(comp->ice_st);
+
+ if (new_state == PJ_TURN_STATE_READY) {
+ pj_turn_session_info rel_info;
+ char ipaddr[PJ_INET6_ADDRSTRLEN+8];
+ pj_ice_sess_cand *cand = NULL;
+ unsigned i;
+
+ comp->turn_err_cnt = 0;
+
+ /* Get allocation info */
+ pj_turn_sock_get_info(turn_sock, &rel_info);
+
+ /* Wait until initialization completes */
+ pj_lock_acquire(comp->ice_st->init_lock);
+
+ /* Find relayed candidate in the component */
+ for (i=0; i<comp->cand_cnt; ++i) {
+ if (comp->cand_list[i].type == PJ_ICE_CAND_TYPE_RELAYED) {
+ cand = &comp->cand_list[i];
+ break;
+ }
+ }
+ pj_assert(cand != NULL);
+
+ pj_lock_release(comp->ice_st->init_lock);
+
+ /* Update candidate */
+ pj_sockaddr_cp(&cand->addr, &rel_info.relay_addr);
+ pj_sockaddr_cp(&cand->base_addr, &rel_info.relay_addr);
+ pj_sockaddr_cp(&cand->rel_addr, &rel_info.mapped_addr);
+ pj_ice_calc_foundation(comp->ice_st->pool, &cand->foundation,
+ PJ_ICE_CAND_TYPE_RELAYED,
+ &rel_info.relay_addr);
+ cand->status = PJ_SUCCESS;
+
+ /* Set default candidate to relay */
+ comp->default_cand = cand - comp->cand_list;
+
+ PJ_LOG(4,(comp->ice_st->obj_name,
+ "Comp %d: TURN allocation complete, relay address is %s",
+ comp->comp_id,
+ pj_sockaddr_print(&rel_info.relay_addr, ipaddr,
+ sizeof(ipaddr), 3)));
+
+ sess_init_update(comp->ice_st);
+
+ } else if (new_state >= PJ_TURN_STATE_DEALLOCATING) {
+ pj_turn_session_info info;
+
+ ++comp->turn_err_cnt;
+
+ pj_turn_sock_get_info(turn_sock, &info);
+
+ /* Unregister ourself from the TURN relay */
+ pj_turn_sock_set_user_data(turn_sock, NULL);
+ comp->turn_sock = NULL;
+
+ /* Set session to fail if we're still initializing */
+ if (comp->ice_st->state < PJ_ICE_STRANS_STATE_READY) {
+ sess_fail(comp->ice_st, PJ_ICE_STRANS_OP_INIT,
+ "TURN allocation failed", info.last_status);
+ } else if (comp->turn_err_cnt > 1) {
+ sess_fail(comp->ice_st, PJ_ICE_STRANS_OP_KEEP_ALIVE,
+ "TURN refresh failed", info.last_status);
+ } else {
+ PJ_PERROR(4,(comp->ice_st->obj_name, info.last_status,
+ "Comp %d: TURN allocation failed, retrying",
+ comp->comp_id));
+ add_update_turn(comp->ice_st, comp);
+ }
+ }
+
+ sess_dec_ref(comp->ice_st);
+
+ pj_log_pop_indent();
+}
+
diff --git a/pjnath/src/pjnath/nat_detect.c b/pjnath/src/pjnath/nat_detect.c
new file mode 100644
index 0000000..86ac694
--- /dev/null
+++ b/pjnath/src/pjnath/nat_detect.c
@@ -0,0 +1,911 @@
+/* $Id: nat_detect.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/nat_detect.h>
+#include <pjnath/errno.h>
+#include <pj/assert.h>
+#include <pj/ioqueue.h>
+#include <pj/log.h>
+#include <pj/os.h>
+#include <pj/pool.h>
+#include <pj/rand.h>
+#include <pj/string.h>
+#include <pj/timer.h>
+#include <pj/compat/socket.h>
+
+
+static const char *nat_type_names[] =
+{
+ "Unknown",
+ "ErrUnknown",
+ "Open",
+ "Blocked",
+ "Symmetric UDP",
+ "Full Cone",
+ "Symmetric",
+ "Restricted",
+ "Port Restricted"
+};
+
+
+#define CHANGE_IP_FLAG 4
+#define CHANGE_PORT_FLAG 2
+#define CHANGE_IP_PORT_FLAG (CHANGE_IP_FLAG | CHANGE_PORT_FLAG)
+#define TEST_INTERVAL 50
+
+enum test_type
+{
+ ST_TEST_1,
+ ST_TEST_2,
+ ST_TEST_3,
+ ST_TEST_1B,
+ ST_MAX
+};
+
+static const char *test_names[] =
+{
+ "Test I: Binding request",
+ "Test II: Binding request with change address and port request",
+ "Test III: Binding request with change port request",
+ "Test IB: Binding request to alternate address"
+};
+
+enum timer_type
+{
+ TIMER_TEST = 1,
+ TIMER_DESTROY = 2
+};
+
+typedef struct nat_detect_session
+{
+ pj_pool_t *pool;
+ pj_mutex_t *mutex;
+
+ pj_timer_heap_t *timer_heap;
+ pj_timer_entry timer;
+ unsigned timer_executed;
+
+ void *user_data;
+ pj_stun_nat_detect_cb *cb;
+ pj_sock_t sock;
+ pj_sockaddr_in local_addr;
+ pj_ioqueue_key_t *key;
+ pj_sockaddr_in server;
+ pj_sockaddr_in *cur_server;
+ pj_stun_session *stun_sess;
+
+ pj_ioqueue_op_key_t read_op, write_op;
+ pj_uint8_t rx_pkt[PJ_STUN_MAX_PKT_LEN];
+ pj_ssize_t rx_pkt_len;
+ pj_sockaddr_in src_addr;
+ int src_addr_len;
+
+ struct result
+ {
+ pj_bool_t executed;
+ pj_bool_t complete;
+ pj_status_t status;
+ pj_sockaddr_in ma;
+ pj_sockaddr_in ca;
+ pj_stun_tx_data *tdata;
+ } result[ST_MAX];
+
+} nat_detect_session;
+
+
+static void on_read_complete(pj_ioqueue_key_t *key,
+ pj_ioqueue_op_key_t *op_key,
+ pj_ssize_t bytes_read);
+static void on_request_complete(pj_stun_session *sess,
+ pj_status_t status,
+ void *token,
+ pj_stun_tx_data *tdata,
+ const pj_stun_msg *response,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len);
+static pj_status_t on_send_msg(pj_stun_session *sess,
+ void *token,
+ const void *pkt,
+ pj_size_t pkt_size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len);
+
+static pj_status_t send_test(nat_detect_session *sess,
+ enum test_type test_id,
+ const pj_sockaddr_in *alt_addr,
+ pj_uint32_t change_flag);
+static void on_sess_timer(pj_timer_heap_t *th,
+ pj_timer_entry *te);
+static void sess_destroy(nat_detect_session *sess);
+
+
+/*
+ * Get the NAT name from the specified NAT type.
+ */
+PJ_DEF(const char*) pj_stun_get_nat_name(pj_stun_nat_type type)
+{
+ PJ_ASSERT_RETURN(type >= 0 && type <= PJ_STUN_NAT_TYPE_PORT_RESTRICTED,
+ "*Invalid*");
+
+ return nat_type_names[type];
+}
+
+static int test_executed(nat_detect_session *sess)
+{
+ unsigned i, count;
+ for (i=0, count=0; i<PJ_ARRAY_SIZE(sess->result); ++i) {
+ if (sess->result[i].executed)
+ ++count;
+ }
+ return count;
+}
+
+static int test_completed(nat_detect_session *sess)
+{
+ unsigned i, count;
+ for (i=0, count=0; i<PJ_ARRAY_SIZE(sess->result); ++i) {
+ if (sess->result[i].complete)
+ ++count;
+ }
+ return count;
+}
+
+static pj_status_t get_local_interface(const pj_sockaddr_in *server,
+ pj_in_addr *local_addr)
+{
+ pj_sock_t sock;
+ pj_sockaddr_in tmp;
+ int addr_len;
+ pj_status_t status;
+
+ status = pj_sock_socket(pj_AF_INET(), pj_SOCK_DGRAM(), 0, &sock);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ status = pj_sock_bind_in(sock, 0, 0);
+ if (status != PJ_SUCCESS) {
+ pj_sock_close(sock);
+ return status;
+ }
+
+ status = pj_sock_connect(sock, server, sizeof(pj_sockaddr_in));
+ if (status != PJ_SUCCESS) {
+ pj_sock_close(sock);
+ return status;
+ }
+
+ addr_len = sizeof(pj_sockaddr_in);
+ status = pj_sock_getsockname(sock, &tmp, &addr_len);
+ if (status != PJ_SUCCESS) {
+ pj_sock_close(sock);
+ return status;
+ }
+
+ local_addr->s_addr = tmp.sin_addr.s_addr;
+
+ pj_sock_close(sock);
+ return PJ_SUCCESS;
+}
+
+
+PJ_DEF(pj_status_t) pj_stun_detect_nat_type(const pj_sockaddr_in *server,
+ pj_stun_config *stun_cfg,
+ void *user_data,
+ pj_stun_nat_detect_cb *cb)
+{
+ pj_pool_t *pool;
+ nat_detect_session *sess;
+ pj_stun_session_cb sess_cb;
+ pj_ioqueue_callback ioqueue_cb;
+ int addr_len;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(server && stun_cfg, PJ_EINVAL);
+ PJ_ASSERT_RETURN(stun_cfg->pf && stun_cfg->ioqueue && stun_cfg->timer_heap,
+ PJ_EINVAL);
+
+ /*
+ * Init NAT detection session.
+ */
+ pool = pj_pool_create(stun_cfg->pf, "natck%p", PJNATH_POOL_LEN_NATCK,
+ PJNATH_POOL_INC_NATCK, NULL);
+ if (!pool)
+ return PJ_ENOMEM;
+
+ sess = PJ_POOL_ZALLOC_T(pool, nat_detect_session);
+ sess->pool = pool;
+ sess->user_data = user_data;
+ sess->cb = cb;
+
+ status = pj_mutex_create_recursive(pool, pool->obj_name, &sess->mutex);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ pj_memcpy(&sess->server, server, sizeof(pj_sockaddr_in));
+
+ /*
+ * Init timer to self-destroy.
+ */
+ sess->timer_heap = stun_cfg->timer_heap;
+ sess->timer.cb = &on_sess_timer;
+ sess->timer.user_data = sess;
+
+
+ /*
+ * Initialize socket.
+ */
+ status = pj_sock_socket(pj_AF_INET(), pj_SOCK_DGRAM(), 0, &sess->sock);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /*
+ * Bind to any.
+ */
+ pj_bzero(&sess->local_addr, sizeof(pj_sockaddr_in));
+ sess->local_addr.sin_family = pj_AF_INET();
+ status = pj_sock_bind(sess->sock, &sess->local_addr,
+ sizeof(pj_sockaddr_in));
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /*
+ * Get local/bound address.
+ */
+ addr_len = sizeof(sess->local_addr);
+ status = pj_sock_getsockname(sess->sock, &sess->local_addr, &addr_len);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /*
+ * Find out which interface is used to send to the server.
+ */
+ status = get_local_interface(server, &sess->local_addr.sin_addr);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ PJ_LOG(5,(sess->pool->obj_name, "Local address is %s:%d",
+ pj_inet_ntoa(sess->local_addr.sin_addr),
+ pj_ntohs(sess->local_addr.sin_port)));
+
+ PJ_LOG(5,(sess->pool->obj_name, "Server set to %s:%d",
+ pj_inet_ntoa(server->sin_addr),
+ pj_ntohs(server->sin_port)));
+
+ /*
+ * Register socket to ioqueue to receive asynchronous input
+ * notification.
+ */
+ pj_bzero(&ioqueue_cb, sizeof(ioqueue_cb));
+ ioqueue_cb.on_read_complete = &on_read_complete;
+
+ status = pj_ioqueue_register_sock(sess->pool, stun_cfg->ioqueue,
+ sess->sock, sess, &ioqueue_cb,
+ &sess->key);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /*
+ * Create STUN session.
+ */
+ pj_bzero(&sess_cb, sizeof(sess_cb));
+ sess_cb.on_request_complete = &on_request_complete;
+ sess_cb.on_send_msg = &on_send_msg;
+ status = pj_stun_session_create(stun_cfg, pool->obj_name, &sess_cb,
+ PJ_FALSE, &sess->stun_sess);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ pj_stun_session_set_user_data(sess->stun_sess, sess);
+
+ /*
+ * Kick-off ioqueue reading.
+ */
+ pj_ioqueue_op_key_init(&sess->read_op, sizeof(sess->read_op));
+ pj_ioqueue_op_key_init(&sess->write_op, sizeof(sess->write_op));
+ on_read_complete(sess->key, &sess->read_op, 0);
+
+ /*
+ * Start TEST_1
+ */
+ sess->timer.id = TIMER_TEST;
+ on_sess_timer(stun_cfg->timer_heap, &sess->timer);
+
+ return PJ_SUCCESS;
+
+on_error:
+ sess_destroy(sess);
+ return status;
+}
+
+
+static void sess_destroy(nat_detect_session *sess)
+{
+ if (sess->stun_sess) {
+ pj_stun_session_destroy(sess->stun_sess);
+ }
+
+ if (sess->key) {
+ pj_ioqueue_unregister(sess->key);
+ } else if (sess->sock && sess->sock != PJ_INVALID_SOCKET) {
+ pj_sock_close(sess->sock);
+ }
+
+ if (sess->mutex) {
+ pj_mutex_destroy(sess->mutex);
+ }
+
+ if (sess->pool) {
+ pj_pool_release(sess->pool);
+ }
+}
+
+
+static void end_session(nat_detect_session *sess,
+ pj_status_t status,
+ pj_stun_nat_type nat_type)
+{
+ pj_stun_nat_detect_result result;
+ char errmsg[PJ_ERR_MSG_SIZE];
+ pj_time_val delay;
+
+ if (sess->timer.id != 0) {
+ pj_timer_heap_cancel(sess->timer_heap, &sess->timer);
+ sess->timer.id = 0;
+ }
+
+ pj_bzero(&result, sizeof(result));
+ errmsg[0] = '\0';
+ result.status_text = errmsg;
+
+ result.status = status;
+ pj_strerror(status, errmsg, sizeof(errmsg));
+ result.nat_type = nat_type;
+ result.nat_type_name = nat_type_names[result.nat_type];
+
+ if (sess->cb)
+ (*sess->cb)(sess->user_data, &result);
+
+ delay.sec = 0;
+ delay.msec = 0;
+
+ sess->timer.id = TIMER_DESTROY;
+ pj_timer_heap_schedule(sess->timer_heap, &sess->timer, &delay);
+}
+
+
+/*
+ * Callback upon receiving packet from network.
+ */
+static void on_read_complete(pj_ioqueue_key_t *key,
+ pj_ioqueue_op_key_t *op_key,
+ pj_ssize_t bytes_read)
+{
+ nat_detect_session *sess;
+ pj_status_t status;
+
+ sess = (nat_detect_session *) pj_ioqueue_get_user_data(key);
+ pj_assert(sess != NULL);
+
+ pj_mutex_lock(sess->mutex);
+
+ if (bytes_read < 0) {
+ if (-bytes_read != PJ_STATUS_FROM_OS(OSERR_EWOULDBLOCK) &&
+ -bytes_read != PJ_STATUS_FROM_OS(OSERR_EINPROGRESS) &&
+ -bytes_read != PJ_STATUS_FROM_OS(OSERR_ECONNRESET))
+ {
+ /* Permanent error */
+ end_session(sess, -bytes_read, PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
+ goto on_return;
+ }
+
+ } else if (bytes_read > 0) {
+ pj_stun_session_on_rx_pkt(sess->stun_sess, sess->rx_pkt, bytes_read,
+ PJ_STUN_IS_DATAGRAM|PJ_STUN_CHECK_PACKET,
+ NULL, NULL,
+ &sess->src_addr, sess->src_addr_len);
+ }
+
+
+ sess->rx_pkt_len = sizeof(sess->rx_pkt);
+ sess->src_addr_len = sizeof(sess->src_addr);
+ status = pj_ioqueue_recvfrom(key, op_key, sess->rx_pkt, &sess->rx_pkt_len,
+ PJ_IOQUEUE_ALWAYS_ASYNC,
+ &sess->src_addr, &sess->src_addr_len);
+
+ if (status != PJ_EPENDING) {
+ pj_assert(status != PJ_SUCCESS);
+ end_session(sess, status, PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
+ }
+
+on_return:
+ pj_mutex_unlock(sess->mutex);
+}
+
+
+/*
+ * Callback to send outgoing packet from STUN session.
+ */
+static pj_status_t on_send_msg(pj_stun_session *stun_sess,
+ void *token,
+ const void *pkt,
+ pj_size_t pkt_size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len)
+{
+ nat_detect_session *sess;
+ pj_ssize_t pkt_len;
+ pj_status_t status;
+
+ PJ_UNUSED_ARG(token);
+
+ sess = (nat_detect_session*) pj_stun_session_get_user_data(stun_sess);
+
+ pkt_len = pkt_size;
+ status = pj_ioqueue_sendto(sess->key, &sess->write_op, pkt, &pkt_len, 0,
+ dst_addr, addr_len);
+
+ return status;
+
+}
+
+/*
+ * Callback upon request completion.
+ */
+static void on_request_complete(pj_stun_session *stun_sess,
+ pj_status_t status,
+ void *token,
+ pj_stun_tx_data *tdata,
+ const pj_stun_msg *response,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ nat_detect_session *sess;
+ pj_stun_sockaddr_attr *mattr = NULL;
+ pj_stun_changed_addr_attr *ca = NULL;
+ pj_uint32_t *tsx_id;
+ int cmp;
+ unsigned test_id;
+
+ PJ_UNUSED_ARG(token);
+ PJ_UNUSED_ARG(tdata);
+ PJ_UNUSED_ARG(src_addr);
+ PJ_UNUSED_ARG(src_addr_len);
+
+ sess = (nat_detect_session*) pj_stun_session_get_user_data(stun_sess);
+
+ pj_mutex_lock(sess->mutex);
+
+ /* Find errors in the response */
+ if (status == PJ_SUCCESS) {
+
+ /* Check error message */
+ if (PJ_STUN_IS_ERROR_RESPONSE(response->hdr.type)) {
+ pj_stun_errcode_attr *eattr;
+ int err_code;
+
+ eattr = (pj_stun_errcode_attr*)
+ pj_stun_msg_find_attr(response, PJ_STUN_ATTR_ERROR_CODE, 0);
+
+ if (eattr != NULL)
+ err_code = eattr->err_code;
+ else
+ err_code = PJ_STUN_SC_SERVER_ERROR;
+
+ status = PJ_STATUS_FROM_STUN_CODE(err_code);
+
+
+ } else {
+
+ /* Get MAPPED-ADDRESS or XOR-MAPPED-ADDRESS */
+ mattr = (pj_stun_sockaddr_attr*)
+ pj_stun_msg_find_attr(response, PJ_STUN_ATTR_XOR_MAPPED_ADDR, 0);
+ if (mattr == NULL) {
+ mattr = (pj_stun_sockaddr_attr*)
+ pj_stun_msg_find_attr(response, PJ_STUN_ATTR_MAPPED_ADDR, 0);
+ }
+
+ if (mattr == NULL) {
+ status = PJNATH_ESTUNNOMAPPEDADDR;
+ }
+
+ /* Get CHANGED-ADDRESS attribute */
+ ca = (pj_stun_changed_addr_attr*)
+ pj_stun_msg_find_attr(response, PJ_STUN_ATTR_CHANGED_ADDR, 0);
+
+ if (ca == NULL) {
+ status = PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_SERVER_ERROR);
+ }
+
+ }
+ }
+
+ /* Save the result */
+ tsx_id = (pj_uint32_t*) tdata->msg->hdr.tsx_id;
+ test_id = tsx_id[2];
+
+ if (test_id >= ST_MAX) {
+ PJ_LOG(4,(sess->pool->obj_name, "Invalid transaction ID %u in response",
+ test_id));
+ end_session(sess, PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_SERVER_ERROR),
+ PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
+ goto on_return;
+ }
+
+ PJ_LOG(5,(sess->pool->obj_name, "Completed %s, status=%d",
+ test_names[test_id], status));
+
+ sess->result[test_id].complete = PJ_TRUE;
+ sess->result[test_id].status = status;
+ if (status == PJ_SUCCESS) {
+ pj_memcpy(&sess->result[test_id].ma, &mattr->sockaddr.ipv4,
+ sizeof(pj_sockaddr_in));
+ pj_memcpy(&sess->result[test_id].ca, &ca->sockaddr.ipv4,
+ sizeof(pj_sockaddr_in));
+ }
+
+ /* Send Test 1B only when Test 2 completes. Must not send Test 1B
+ * before Test 2 completes to avoid creating mapping on the NAT.
+ */
+ if (!sess->result[ST_TEST_1B].executed &&
+ sess->result[ST_TEST_2].complete &&
+ sess->result[ST_TEST_2].status != PJ_SUCCESS &&
+ sess->result[ST_TEST_1].complete &&
+ sess->result[ST_TEST_1].status == PJ_SUCCESS)
+ {
+ cmp = pj_memcmp(&sess->local_addr, &sess->result[ST_TEST_1].ma,
+ sizeof(pj_sockaddr_in));
+ if (cmp != 0)
+ send_test(sess, ST_TEST_1B, &sess->result[ST_TEST_1].ca, 0);
+ }
+
+ if (test_completed(sess)<3 || test_completed(sess)!=test_executed(sess))
+ goto on_return;
+
+ /* Handle the test result according to RFC 3489 page 22:
+
+
+ +--------+
+ | Test |
+ | 1 |
+ +--------+
+ |
+ |
+ V
+ /\ /\
+ N / \ Y / \ Y +--------+
+ UDP <-------/Resp\--------->/ IP \------------->| Test |
+ Blocked \ ? / \Same/ | 2 |
+ \ / \? / +--------+
+ \/ \/ |
+ | N |
+ | V
+ V /\
+ +--------+ Sym. N / \
+ | Test | UDP <---/Resp\
+ | 2 | Firewall \ ? /
+ +--------+ \ /
+ | \/
+ V |Y
+ /\ /\ |
+ Symmetric N / \ +--------+ N / \ V
+ NAT <--- / IP \<-----| Test |<--- /Resp\ Open
+ \Same/ | 1B | \ ? / Internet
+ \? / +--------+ \ /
+ \/ \/
+ | |Y
+ | |
+ | V
+ | Full
+ | Cone
+ V /\
+ +--------+ / \ Y
+ | Test |------>/Resp\---->Restricted
+ | 3 | \ ? /
+ +--------+ \ /
+ \/
+ |N
+ | Port
+ +------>Restricted
+
+ Figure 2: Flow for type discovery process
+ */
+
+ switch (sess->result[ST_TEST_1].status) {
+ case PJNATH_ESTUNTIMEDOUT:
+ /*
+ * Test 1 has timed-out. Conclude with NAT_TYPE_BLOCKED.
+ */
+ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_BLOCKED);
+ break;
+ case PJ_SUCCESS:
+ /*
+ * Test 1 is successful. Further tests are needed to detect
+ * NAT type. Compare the MAPPED-ADDRESS with the local address.
+ */
+ cmp = pj_memcmp(&sess->local_addr, &sess->result[ST_TEST_1].ma,
+ sizeof(pj_sockaddr_in));
+ if (cmp==0) {
+ /*
+ * MAPPED-ADDRESS and local address is equal. Need one more
+ * test to determine NAT type.
+ */
+ switch (sess->result[ST_TEST_2].status) {
+ case PJ_SUCCESS:
+ /*
+ * Test 2 is also successful. We're in the open.
+ */
+ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_OPEN);
+ break;
+ case PJNATH_ESTUNTIMEDOUT:
+ /*
+ * Test 2 has timed out. We're behind somekind of UDP
+ * firewall.
+ */
+ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_SYMMETRIC_UDP);
+ break;
+ default:
+ /*
+ * We've got other error with Test 2.
+ */
+ end_session(sess, sess->result[ST_TEST_2].status,
+ PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
+ break;
+ }
+ } else {
+ /*
+ * MAPPED-ADDRESS is different than local address.
+ * We're behind NAT.
+ */
+ switch (sess->result[ST_TEST_2].status) {
+ case PJ_SUCCESS:
+ /*
+ * Test 2 is successful. We're behind a full-cone NAT.
+ */
+ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_FULL_CONE);
+ break;
+ case PJNATH_ESTUNTIMEDOUT:
+ /*
+ * Test 2 has timed-out Check result of test 1B..
+ */
+ switch (sess->result[ST_TEST_1B].status) {
+ case PJ_SUCCESS:
+ /*
+ * Compare the MAPPED-ADDRESS of test 1B with the
+ * MAPPED-ADDRESS returned in test 1..
+ */
+ cmp = pj_memcmp(&sess->result[ST_TEST_1].ma,
+ &sess->result[ST_TEST_1B].ma,
+ sizeof(pj_sockaddr_in));
+ if (cmp != 0) {
+ /*
+ * MAPPED-ADDRESS is different, we're behind a
+ * symmetric NAT.
+ */
+ end_session(sess, PJ_SUCCESS,
+ PJ_STUN_NAT_TYPE_SYMMETRIC);
+ } else {
+ /*
+ * MAPPED-ADDRESS is equal. We're behind a restricted
+ * or port-restricted NAT, depending on the result of
+ * test 3.
+ */
+ switch (sess->result[ST_TEST_3].status) {
+ case PJ_SUCCESS:
+ /*
+ * Test 3 is successful, we're behind a restricted
+ * NAT.
+ */
+ end_session(sess, PJ_SUCCESS,
+ PJ_STUN_NAT_TYPE_RESTRICTED);
+ break;
+ case PJNATH_ESTUNTIMEDOUT:
+ /*
+ * Test 3 failed, we're behind a port restricted
+ * NAT.
+ */
+ end_session(sess, PJ_SUCCESS,
+ PJ_STUN_NAT_TYPE_PORT_RESTRICTED);
+ break;
+ default:
+ /*
+ * Got other error with test 3.
+ */
+ end_session(sess, sess->result[ST_TEST_3].status,
+ PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
+ break;
+ }
+ }
+ break;
+ case PJNATH_ESTUNTIMEDOUT:
+ /*
+ * Strangely test 1B has failed. Maybe connectivity was
+ * lost? Or perhaps port 3489 (the usual port number in
+ * CHANGED-ADDRESS) is blocked?
+ */
+ switch (sess->result[ST_TEST_3].status) {
+ case PJ_SUCCESS:
+ /* Although test 1B failed, test 3 was successful.
+ * It could be that port 3489 is blocked, while the
+ * NAT itself looks to be a Restricted one.
+ */
+ end_session(sess, PJ_SUCCESS,
+ PJ_STUN_NAT_TYPE_RESTRICTED);
+ break;
+ default:
+ /* Can't distinguish between Symmetric and Port
+ * Restricted, so set the type to Unknown
+ */
+ end_session(sess, PJ_SUCCESS,
+ PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
+ break;
+ }
+ break;
+ default:
+ /*
+ * Got other error with test 1B.
+ */
+ end_session(sess, sess->result[ST_TEST_1B].status,
+ PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
+ break;
+ }
+ break;
+ default:
+ /*
+ * We've got other error with Test 2.
+ */
+ end_session(sess, sess->result[ST_TEST_2].status,
+ PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
+ break;
+ }
+ }
+ break;
+ default:
+ /*
+ * We've got other error with Test 1.
+ */
+ end_session(sess, sess->result[ST_TEST_1].status,
+ PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
+ break;
+ }
+
+on_return:
+ pj_mutex_unlock(sess->mutex);
+}
+
+
+/* Perform test */
+static pj_status_t send_test(nat_detect_session *sess,
+ enum test_type test_id,
+ const pj_sockaddr_in *alt_addr,
+ pj_uint32_t change_flag)
+{
+ pj_uint32_t magic, tsx_id[3];
+ pj_status_t status;
+
+ sess->result[test_id].executed = PJ_TRUE;
+
+ /* Randomize tsx id */
+ do {
+ magic = pj_rand();
+ } while (magic == PJ_STUN_MAGIC);
+
+ tsx_id[0] = pj_rand();
+ tsx_id[1] = pj_rand();
+ tsx_id[2] = test_id;
+
+ /* Create BIND request */
+ status = pj_stun_session_create_req(sess->stun_sess,
+ PJ_STUN_BINDING_REQUEST, magic,
+ (pj_uint8_t*)tsx_id,
+ &sess->result[test_id].tdata);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Add CHANGE-REQUEST attribute */
+ status = pj_stun_msg_add_uint_attr(sess->pool,
+ sess->result[test_id].tdata->msg,
+ PJ_STUN_ATTR_CHANGE_REQUEST,
+ change_flag);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Configure alternate address */
+ if (alt_addr)
+ sess->cur_server = (pj_sockaddr_in*) alt_addr;
+ else
+ sess->cur_server = &sess->server;
+
+ PJ_LOG(5,(sess->pool->obj_name,
+ "Performing %s to %s:%d",
+ test_names[test_id],
+ pj_inet_ntoa(sess->cur_server->sin_addr),
+ pj_ntohs(sess->cur_server->sin_port)));
+
+ /* Send the request */
+ status = pj_stun_session_send_msg(sess->stun_sess, NULL, PJ_TRUE,
+ PJ_TRUE, sess->cur_server,
+ sizeof(pj_sockaddr_in),
+ sess->result[test_id].tdata);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ return PJ_SUCCESS;
+
+on_error:
+ sess->result[test_id].complete = PJ_TRUE;
+ sess->result[test_id].status = status;
+
+ return status;
+}
+
+
+/* Timer callback */
+static void on_sess_timer(pj_timer_heap_t *th,
+ pj_timer_entry *te)
+{
+ nat_detect_session *sess;
+
+ sess = (nat_detect_session*) te->user_data;
+
+ if (te->id == TIMER_DESTROY) {
+ pj_mutex_lock(sess->mutex);
+ pj_ioqueue_unregister(sess->key);
+ sess->key = NULL;
+ sess->sock = PJ_INVALID_SOCKET;
+ te->id = 0;
+ pj_mutex_unlock(sess->mutex);
+
+ sess_destroy(sess);
+
+ } else if (te->id == TIMER_TEST) {
+
+ pj_bool_t next_timer;
+
+ pj_mutex_lock(sess->mutex);
+
+ next_timer = PJ_FALSE;
+
+ if (sess->timer_executed == 0) {
+ send_test(sess, ST_TEST_1, NULL, 0);
+ next_timer = PJ_TRUE;
+ } else if (sess->timer_executed == 1) {
+ send_test(sess, ST_TEST_2, NULL, CHANGE_IP_PORT_FLAG);
+ next_timer = PJ_TRUE;
+ } else if (sess->timer_executed == 2) {
+ send_test(sess, ST_TEST_3, NULL, CHANGE_PORT_FLAG);
+ } else {
+ pj_assert(!"Shouldn't have timer at this state");
+ }
+
+ ++sess->timer_executed;
+
+ if (next_timer) {
+ pj_time_val delay = {0, TEST_INTERVAL};
+ pj_timer_heap_schedule(th, te, &delay);
+ } else {
+ te->id = 0;
+ }
+
+ pj_mutex_unlock(sess->mutex);
+
+ } else {
+ pj_assert(!"Invalid timer ID");
+ }
+}
+
diff --git a/pjnath/src/pjnath/stun_auth.c b/pjnath/src/pjnath/stun_auth.c
new file mode 100644
index 0000000..9041186
--- /dev/null
+++ b/pjnath/src/pjnath/stun_auth.c
@@ -0,0 +1,631 @@
+/* $Id: stun_auth.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/stun_auth.h>
+#include <pjnath/errno.h>
+#include <pjlib-util/hmac_sha1.h>
+#include <pjlib-util/md5.h>
+#include <pjlib-util/sha1.h>
+#include <pj/assert.h>
+#include <pj/log.h>
+#include <pj/pool.h>
+#include <pj/string.h>
+
+#define THIS_FILE "stun_auth.c"
+
+/* Duplicate credential */
+PJ_DEF(void) pj_stun_auth_cred_dup( pj_pool_t *pool,
+ pj_stun_auth_cred *dst,
+ const pj_stun_auth_cred *src)
+{
+ dst->type = src->type;
+
+ switch (src->type) {
+ case PJ_STUN_AUTH_CRED_STATIC:
+ pj_strdup(pool, &dst->data.static_cred.realm,
+ &src->data.static_cred.realm);
+ pj_strdup(pool, &dst->data.static_cred.username,
+ &src->data.static_cred.username);
+ dst->data.static_cred.data_type = src->data.static_cred.data_type;
+ pj_strdup(pool, &dst->data.static_cred.data,
+ &src->data.static_cred.data);
+ pj_strdup(pool, &dst->data.static_cred.nonce,
+ &src->data.static_cred.nonce);
+ break;
+ case PJ_STUN_AUTH_CRED_DYNAMIC:
+ pj_memcpy(&dst->data.dyn_cred, &src->data.dyn_cred,
+ sizeof(src->data.dyn_cred));
+ break;
+ }
+}
+
+
+/*
+ * Duplicate request credential.
+ */
+PJ_DEF(void) pj_stun_req_cred_info_dup( pj_pool_t *pool,
+ pj_stun_req_cred_info *dst,
+ const pj_stun_req_cred_info *src)
+{
+ pj_strdup(pool, &dst->realm, &src->realm);
+ pj_strdup(pool, &dst->username, &src->username);
+ pj_strdup(pool, &dst->nonce, &src->nonce);
+ pj_strdup(pool, &dst->auth_key, &src->auth_key);
+}
+
+
+/* Calculate HMAC-SHA1 key for long term credential, by getting
+ * MD5 digest of username, realm, and password.
+ */
+static void calc_md5_key(pj_uint8_t digest[16],
+ const pj_str_t *realm,
+ const pj_str_t *username,
+ const pj_str_t *passwd)
+{
+ /* The 16-byte key for MESSAGE-INTEGRITY HMAC is formed by taking
+ * the MD5 hash of the result of concatenating the following five
+ * fields: (1) The username, with any quotes and trailing nulls
+ * removed, (2) A single colon, (3) The realm, with any quotes and
+ * trailing nulls removed, (4) A single colon, and (5) The
+ * password, with any trailing nulls removed.
+ */
+ pj_md5_context ctx;
+ pj_str_t s;
+
+ pj_md5_init(&ctx);
+
+#define REMOVE_QUOTE(s) if (s.slen && *s.ptr=='"') \
+ s.ptr++, s.slen--; \
+ if (s.slen && s.ptr[s.slen-1]=='"') \
+ s.slen--;
+
+ /* Add username */
+ s = *username;
+ REMOVE_QUOTE(s);
+ pj_md5_update(&ctx, (pj_uint8_t*)s.ptr, s.slen);
+
+ /* Add single colon */
+ pj_md5_update(&ctx, (pj_uint8_t*)":", 1);
+
+ /* Add realm */
+ s = *realm;
+ REMOVE_QUOTE(s);
+ pj_md5_update(&ctx, (pj_uint8_t*)s.ptr, s.slen);
+
+#undef REMOVE_QUOTE
+
+ /* Another colon */
+ pj_md5_update(&ctx, (pj_uint8_t*)":", 1);
+
+ /* Add password */
+ pj_md5_update(&ctx, (pj_uint8_t*)passwd->ptr, passwd->slen);
+
+ /* Done */
+ pj_md5_final(&ctx, digest);
+}
+
+
+/*
+ * Create authentication key to be used for encoding the message with
+ * MESSAGE-INTEGRITY.
+ */
+PJ_DEF(void) pj_stun_create_key(pj_pool_t *pool,
+ pj_str_t *key,
+ const pj_str_t *realm,
+ const pj_str_t *username,
+ pj_stun_passwd_type data_type,
+ const pj_str_t *data)
+{
+ PJ_ASSERT_ON_FAIL(pool && key && username && data, return);
+
+ if (realm && realm->slen) {
+ if (data_type == PJ_STUN_PASSWD_PLAIN) {
+ key->ptr = (char*) pj_pool_alloc(pool, 16);
+ calc_md5_key((pj_uint8_t*)key->ptr, realm, username, data);
+ key->slen = 16;
+ } else {
+ pj_strdup(pool, key, data);
+ }
+ } else {
+ pj_assert(data_type == PJ_STUN_PASSWD_PLAIN);
+ pj_strdup(pool, key, data);
+ }
+}
+
+
+PJ_INLINE(pj_uint16_t) GET_VAL16(const pj_uint8_t *pdu, unsigned pos)
+{
+ return (pj_uint16_t) ((pdu[pos] << 8) + pdu[pos+1]);
+}
+
+
+PJ_INLINE(void) PUT_VAL16(pj_uint8_t *buf, unsigned pos, pj_uint16_t hval)
+{
+ buf[pos+0] = (pj_uint8_t) ((hval & 0xFF00) >> 8);
+ buf[pos+1] = (pj_uint8_t) ((hval & 0x00FF) >> 0);
+}
+
+
+/* Send 401 response */
+static pj_status_t create_challenge(pj_pool_t *pool,
+ const pj_stun_msg *msg,
+ int err_code,
+ const char *errstr,
+ const pj_str_t *realm,
+ const pj_str_t *nonce,
+ pj_stun_msg **p_response)
+{
+ pj_stun_msg *response;
+ pj_str_t tmp_nonce;
+ pj_str_t err_msg;
+ pj_status_t rc;
+
+ rc = pj_stun_msg_create_response(pool, msg, err_code,
+ (errstr?pj_cstr(&err_msg, errstr):NULL),
+ &response);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ /* SHOULD NOT add REALM, NONCE, USERNAME, and M-I on 400 response */
+ if (err_code!=400 && realm && realm->slen) {
+ rc = pj_stun_msg_add_string_attr(pool, response,
+ PJ_STUN_ATTR_REALM,
+ realm);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ /* long term must include nonce */
+ if (!nonce || nonce->slen == 0) {
+ tmp_nonce = pj_str("pjstun");
+ nonce = &tmp_nonce;
+ }
+ }
+
+ if (err_code!=400 && nonce && nonce->slen) {
+ rc = pj_stun_msg_add_string_attr(pool, response,
+ PJ_STUN_ATTR_NONCE,
+ nonce);
+ if (rc != PJ_SUCCESS)
+ return rc;
+ }
+
+ *p_response = response;
+
+ return PJ_SUCCESS;
+}
+
+
+/* Verify credential in the request */
+PJ_DEF(pj_status_t) pj_stun_authenticate_request(const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_stun_msg *msg,
+ pj_stun_auth_cred *cred,
+ pj_pool_t *pool,
+ pj_stun_req_cred_info *p_info,
+ pj_stun_msg **p_response)
+{
+ pj_stun_req_cred_info tmp_info;
+ const pj_stun_msgint_attr *amsgi;
+ unsigned i, amsgi_pos;
+ pj_bool_t has_attr_beyond_mi;
+ const pj_stun_username_attr *auser;
+ const pj_stun_realm_attr *arealm;
+ const pj_stun_realm_attr *anonce;
+ pj_hmac_sha1_context ctx;
+ pj_uint8_t digest[PJ_SHA1_DIGEST_SIZE];
+ pj_stun_status err_code;
+ const char *err_text = NULL;
+ pj_status_t status;
+
+ /* msg and credential MUST be specified */
+ PJ_ASSERT_RETURN(pkt && pkt_len && msg && cred, PJ_EINVAL);
+
+ /* If p_response is specified, pool MUST be specified. */
+ PJ_ASSERT_RETURN(!p_response || pool, PJ_EINVAL);
+
+ if (p_response)
+ *p_response = NULL;
+
+ if (!PJ_STUN_IS_REQUEST(msg->hdr.type))
+ p_response = NULL;
+
+ if (p_info == NULL)
+ p_info = &tmp_info;
+
+ pj_bzero(p_info, sizeof(pj_stun_req_cred_info));
+
+ /* Get realm and nonce from credential */
+ p_info->realm.slen = p_info->nonce.slen = 0;
+ if (cred->type == PJ_STUN_AUTH_CRED_STATIC) {
+ p_info->realm = cred->data.static_cred.realm;
+ p_info->nonce = cred->data.static_cred.nonce;
+ } else if (cred->type == PJ_STUN_AUTH_CRED_DYNAMIC) {
+ status = cred->data.dyn_cred.get_auth(cred->data.dyn_cred.user_data,
+ pool, &p_info->realm,
+ &p_info->nonce);
+ if (status != PJ_SUCCESS)
+ return status;
+ } else {
+ pj_assert(!"Invalid credential type");
+ return PJ_EBUG;
+ }
+
+ /* Look for MESSAGE-INTEGRITY while counting the position */
+ amsgi_pos = 0;
+ has_attr_beyond_mi = PJ_FALSE;
+ amsgi = NULL;
+ for (i=0; i<msg->attr_count; ++i) {
+ if (msg->attr[i]->type == PJ_STUN_ATTR_MESSAGE_INTEGRITY) {
+ amsgi = (const pj_stun_msgint_attr*) msg->attr[i];
+ } else if (amsgi) {
+ has_attr_beyond_mi = PJ_TRUE;
+ break;
+ } else {
+ amsgi_pos += ((msg->attr[i]->length+3) & ~0x03) + 4;
+ }
+ }
+
+ if (amsgi == NULL) {
+ /* According to rfc3489bis-10 Sec 10.1.2/10.2.2, we should return 400
+ for short term, and 401 for long term.
+ The rule has been changed from rfc3489bis-06
+ */
+ err_code = p_info->realm.slen ? PJ_STUN_SC_UNAUTHORIZED :
+ PJ_STUN_SC_BAD_REQUEST;
+ goto on_auth_failed;
+ }
+
+ /* Next check that USERNAME is present */
+ auser = (const pj_stun_username_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_USERNAME, 0);
+ if (auser == NULL) {
+ /* According to rfc3489bis-10 Sec 10.1.2/10.2.2, we should return 400
+ for both short and long term, since M-I is present.
+ The rule has been changed from rfc3489bis-06
+ */
+ err_code = PJ_STUN_SC_BAD_REQUEST;
+ err_text = "Missing USERNAME";
+ goto on_auth_failed;
+ }
+
+ /* Get REALM, if any */
+ arealm = (const pj_stun_realm_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_REALM, 0);
+
+ /* Reject with 400 if we have long term credential and the request
+ * is missing REALM attribute.
+ */
+ if (p_info->realm.slen && arealm==NULL) {
+ err_code = PJ_STUN_SC_BAD_REQUEST;
+ err_text = "Missing REALM";
+ goto on_auth_failed;
+ }
+
+ /* Check if username match */
+ if (cred->type == PJ_STUN_AUTH_CRED_STATIC) {
+ pj_bool_t username_ok;
+ username_ok = !pj_strcmp(&auser->value,
+ &cred->data.static_cred.username);
+ if (username_ok) {
+ pj_strdup(pool, &p_info->username,
+ &cred->data.static_cred.username);
+ pj_stun_create_key(pool, &p_info->auth_key, &p_info->realm,
+ &auser->value, cred->data.static_cred.data_type,
+ &cred->data.static_cred.data);
+ } else {
+ /* Username mismatch */
+ /* According to rfc3489bis-10 Sec 10.1.2/10.2.2, we should
+ * return 401
+ */
+ err_code = PJ_STUN_SC_UNAUTHORIZED;
+ goto on_auth_failed;
+ }
+ } else if (cred->type == PJ_STUN_AUTH_CRED_DYNAMIC) {
+ pj_stun_passwd_type data_type = PJ_STUN_PASSWD_PLAIN;
+ pj_str_t password;
+ pj_status_t rc;
+
+ rc = cred->data.dyn_cred.get_password(msg,
+ cred->data.dyn_cred.user_data,
+ (arealm?&arealm->value:NULL),
+ &auser->value, pool,
+ &data_type, &password);
+ if (rc == PJ_SUCCESS) {
+ pj_strdup(pool, &p_info->username, &auser->value);
+ pj_stun_create_key(pool, &p_info->auth_key,
+ (arealm?&arealm->value:NULL), &auser->value,
+ data_type, &password);
+ } else {
+ err_code = PJ_STUN_SC_UNAUTHORIZED;
+ goto on_auth_failed;
+ }
+ } else {
+ pj_assert(!"Invalid credential type");
+ return PJ_EBUG;
+ }
+
+
+
+ /* Get NONCE attribute */
+ anonce = (pj_stun_nonce_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_NONCE, 0);
+
+ /* Check for long term/short term requirements. */
+ if (p_info->realm.slen != 0 && arealm == NULL) {
+ /* Long term credential is required and REALM is not present */
+ err_code = PJ_STUN_SC_BAD_REQUEST;
+ err_text = "Missing REALM";
+ goto on_auth_failed;
+
+ } else if (p_info->realm.slen != 0 && arealm != NULL) {
+ /* We want long term, and REALM is present */
+
+ /* NONCE must be present. */
+ if (anonce == NULL && p_info->nonce.slen) {
+ err_code = PJ_STUN_SC_BAD_REQUEST;
+ err_text = "Missing NONCE";
+ goto on_auth_failed;
+ }
+
+ /* Verify REALM matches */
+ if (pj_stricmp(&arealm->value, &p_info->realm)) {
+ /* REALM doesn't match */
+ err_code = PJ_STUN_SC_UNAUTHORIZED;
+ err_text = "Invalid REALM";
+ goto on_auth_failed;
+ }
+
+ /* Valid case, will validate the message integrity later */
+
+ } else if (p_info->realm.slen == 0 && arealm != NULL) {
+ /* We want to use short term credential, but client uses long
+ * term credential. The draft doesn't mention anything about
+ * switching between long term and short term.
+ */
+
+ /* For now just accept the credential, anyway it will probably
+ * cause wrong message integrity value later.
+ */
+ } else if (p_info->realm.slen==0 && arealm == NULL) {
+ /* Short term authentication is wanted, and one is supplied */
+
+ /* Application MAY request NONCE to be supplied */
+ if (p_info->nonce.slen != 0) {
+ err_code = PJ_STUN_SC_UNAUTHORIZED;
+ err_text = "NONCE required";
+ goto on_auth_failed;
+ }
+ }
+
+ /* If NONCE is present, validate it */
+ if (anonce) {
+ pj_bool_t ok;
+
+ if (cred->type == PJ_STUN_AUTH_CRED_DYNAMIC &&
+ cred->data.dyn_cred.verify_nonce != NULL)
+ {
+ ok=cred->data.dyn_cred.verify_nonce(msg,
+ cred->data.dyn_cred.user_data,
+ (arealm?&arealm->value:NULL),
+ &auser->value,
+ &anonce->value);
+ } else if (cred->type == PJ_STUN_AUTH_CRED_DYNAMIC) {
+ ok = PJ_TRUE;
+ } else {
+ if (p_info->nonce.slen) {
+ ok = !pj_strcmp(&anonce->value, &p_info->nonce);
+ } else {
+ ok = PJ_TRUE;
+ }
+ }
+
+ if (!ok) {
+ err_code = PJ_STUN_SC_STALE_NONCE;
+ goto on_auth_failed;
+ }
+ }
+
+ /* Now calculate HMAC of the message. */
+ pj_hmac_sha1_init(&ctx, (pj_uint8_t*)p_info->auth_key.ptr,
+ p_info->auth_key.slen);
+
+#if PJ_STUN_OLD_STYLE_MI_FINGERPRINT
+ /* Pre rfc3489bis-06 style of calculation */
+ pj_hmac_sha1_update(&ctx, pkt, 20);
+#else
+ /* First calculate HMAC for the header.
+ * The calculation is different depending on whether FINGERPRINT attribute
+ * is present in the message.
+ */
+ if (has_attr_beyond_mi) {
+ pj_uint8_t hdr_copy[20];
+ pj_memcpy(hdr_copy, pkt, 20);
+ PUT_VAL16(hdr_copy, 2, (pj_uint16_t)(amsgi_pos + 24));
+ pj_hmac_sha1_update(&ctx, hdr_copy, 20);
+ } else {
+ pj_hmac_sha1_update(&ctx, pkt, 20);
+ }
+#endif /* PJ_STUN_OLD_STYLE_MI_FINGERPRINT */
+
+ /* Now update with the message body */
+ pj_hmac_sha1_update(&ctx, pkt+20, amsgi_pos);
+#if PJ_STUN_OLD_STYLE_MI_FINGERPRINT
+ // This is no longer necessary as per rfc3489bis-08
+ if ((amsgi_pos+20) & 0x3F) {
+ pj_uint8_t zeroes[64];
+ pj_bzero(zeroes, sizeof(zeroes));
+ pj_hmac_sha1_update(&ctx, zeroes, 64-((amsgi_pos+20) & 0x3F));
+ }
+#endif
+ pj_hmac_sha1_final(&ctx, digest);
+
+
+ /* Compare HMACs */
+ if (pj_memcmp(amsgi->hmac, digest, 20)) {
+ /* HMAC value mismatch */
+ /* According to rfc3489bis-10 Sec 10.1.2 we should return 401 */
+ err_code = PJ_STUN_SC_UNAUTHORIZED;
+ err_text = "MESSAGE-INTEGRITY mismatch";
+ goto on_auth_failed;
+ }
+
+ /* Everything looks okay! */
+ return PJ_SUCCESS;
+
+on_auth_failed:
+ if (p_response) {
+ create_challenge(pool, msg, err_code, err_text,
+ &p_info->realm, &p_info->nonce, p_response);
+ }
+ return PJ_STATUS_FROM_STUN_CODE(err_code);
+}
+
+
+/* Determine if STUN message can be authenticated */
+PJ_DEF(pj_bool_t) pj_stun_auth_valid_for_msg(const pj_stun_msg *msg)
+{
+ unsigned msg_type = msg->hdr.type;
+ const pj_stun_errcode_attr *err_attr;
+
+ /* STUN requests and success response can be authenticated */
+ if (!PJ_STUN_IS_ERROR_RESPONSE(msg_type) &&
+ !PJ_STUN_IS_INDICATION(msg_type))
+ {
+ return PJ_TRUE;
+ }
+
+ /* STUN Indication cannot be authenticated */
+ if (PJ_STUN_IS_INDICATION(msg_type))
+ return PJ_FALSE;
+
+ /* Authentication for STUN error responses depend on the error
+ * code.
+ */
+ err_attr = (const pj_stun_errcode_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_ERROR_CODE, 0);
+ if (err_attr == NULL) {
+ PJ_LOG(4,(THIS_FILE, "STUN error code attribute not present in "
+ "error response"));
+ return PJ_TRUE;
+ }
+
+ switch (err_attr->err_code) {
+ case PJ_STUN_SC_BAD_REQUEST: /* 400 (Bad Request) */
+ case PJ_STUN_SC_UNAUTHORIZED: /* 401 (Unauthorized) */
+ case PJ_STUN_SC_STALE_NONCE: /* 438 (Stale Nonce) */
+
+ /* Due to the way this response is generated here, we can't really
+ * authenticate 420 (Unknown Attribute) response */
+ case PJ_STUN_SC_UNKNOWN_ATTRIBUTE:
+ return PJ_FALSE;
+ default:
+ return PJ_TRUE;
+ }
+}
+
+
+/* Authenticate MESSAGE-INTEGRITY in the response */
+PJ_DEF(pj_status_t) pj_stun_authenticate_response(const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_stun_msg *msg,
+ const pj_str_t *key)
+{
+ const pj_stun_msgint_attr *amsgi;
+ unsigned i, amsgi_pos;
+ pj_bool_t has_attr_beyond_mi;
+ pj_hmac_sha1_context ctx;
+ pj_uint8_t digest[PJ_SHA1_DIGEST_SIZE];
+
+ PJ_ASSERT_RETURN(pkt && pkt_len && msg && key, PJ_EINVAL);
+
+ /* First check that MESSAGE-INTEGRITY is present */
+ amsgi = (const pj_stun_msgint_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_MESSAGE_INTEGRITY, 0);
+ if (amsgi == NULL) {
+ return PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_UNAUTHORIZED);
+ }
+
+
+ /* Check that message length is valid */
+ if (msg->hdr.length < 24) {
+ return PJNATH_EINSTUNMSGLEN;
+ }
+
+ /* Look for MESSAGE-INTEGRITY while counting the position */
+ amsgi_pos = 0;
+ has_attr_beyond_mi = PJ_FALSE;
+ amsgi = NULL;
+ for (i=0; i<msg->attr_count; ++i) {
+ if (msg->attr[i]->type == PJ_STUN_ATTR_MESSAGE_INTEGRITY) {
+ amsgi = (const pj_stun_msgint_attr*) msg->attr[i];
+ } else if (amsgi) {
+ has_attr_beyond_mi = PJ_TRUE;
+ break;
+ } else {
+ amsgi_pos += ((msg->attr[i]->length+3) & ~0x03) + 4;
+ }
+ }
+
+ if (amsgi == NULL) {
+ return PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_BAD_REQUEST);
+ }
+
+ /* Now calculate HMAC of the message. */
+ pj_hmac_sha1_init(&ctx, (pj_uint8_t*)key->ptr, key->slen);
+
+#if PJ_STUN_OLD_STYLE_MI_FINGERPRINT
+ /* Pre rfc3489bis-06 style of calculation */
+ pj_hmac_sha1_update(&ctx, pkt, 20);
+#else
+ /* First calculate HMAC for the header.
+ * The calculation is different depending on whether FINGERPRINT attribute
+ * is present in the message.
+ */
+ if (has_attr_beyond_mi) {
+ pj_uint8_t hdr_copy[20];
+ pj_memcpy(hdr_copy, pkt, 20);
+ PUT_VAL16(hdr_copy, 2, (pj_uint16_t)(amsgi_pos+24));
+ pj_hmac_sha1_update(&ctx, hdr_copy, 20);
+ } else {
+ pj_hmac_sha1_update(&ctx, pkt, 20);
+ }
+#endif /* PJ_STUN_OLD_STYLE_MI_FINGERPRINT */
+
+ /* Now update with the message body */
+ pj_hmac_sha1_update(&ctx, pkt+20, amsgi_pos);
+#if PJ_STUN_OLD_STYLE_MI_FINGERPRINT
+ // This is no longer necessary as per rfc3489bis-08
+ if ((amsgi_pos+20) & 0x3F) {
+ pj_uint8_t zeroes[64];
+ pj_bzero(zeroes, sizeof(zeroes));
+ pj_hmac_sha1_update(&ctx, zeroes, 64-((amsgi_pos+20) & 0x3F));
+ }
+#endif
+ pj_hmac_sha1_final(&ctx, digest);
+
+ /* Compare HMACs */
+ if (pj_memcmp(amsgi->hmac, digest, 20)) {
+ /* HMAC value mismatch */
+ return PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_UNAUTHORIZED);
+ }
+
+ /* Everything looks okay! */
+ return PJ_SUCCESS;
+}
+
diff --git a/pjnath/src/pjnath/stun_msg.c b/pjnath/src/pjnath/stun_msg.c
new file mode 100644
index 0000000..b295705
--- /dev/null
+++ b/pjnath/src/pjnath/stun_msg.c
@@ -0,0 +1,2827 @@
+/* $Id: stun_msg.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/stun_msg.h>
+#include <pjnath/errno.h>
+#include <pjlib-util/crc32.h>
+#include <pjlib-util/hmac_sha1.h>
+#include <pj/assert.h>
+#include <pj/log.h>
+#include <pj/os.h>
+#include <pj/pool.h>
+#include <pj/rand.h>
+#include <pj/string.h>
+
+#define THIS_FILE "stun_msg.c"
+#define STUN_XOR_FINGERPRINT 0x5354554eL
+
+static int padding_char;
+
+static const char *stun_method_names[PJ_STUN_METHOD_MAX] =
+{
+ "Unknown", /* 0 */
+ "Binding", /* 1 */
+ "SharedSecret", /* 2 */
+ "Allocate", /* 3 */
+ "Refresh", /* 4 */
+ "???", /* 5 */
+ "Send", /* 6 */
+ "Data", /* 7 */
+ "CreatePermission", /* 8 */
+ "ChannelBind", /* 9 */
+};
+
+static struct
+{
+ int err_code;
+ const char *err_msg;
+} stun_err_msg_map[] =
+{
+ { PJ_STUN_SC_TRY_ALTERNATE, "Try Alternate"},
+ { PJ_STUN_SC_BAD_REQUEST, "Bad Request"},
+ { PJ_STUN_SC_UNAUTHORIZED, "Unauthorized"},
+ { PJ_STUN_SC_FORBIDDEN, "Forbidden"},
+ { PJ_STUN_SC_UNKNOWN_ATTRIBUTE, "Unknown Attribute"},
+ //{ PJ_STUN_SC_STALE_CREDENTIALS, "Stale Credentials"},
+ //{ PJ_STUN_SC_INTEGRITY_CHECK_FAILURE, "Integrity Check Failure"},
+ //{ PJ_STUN_SC_MISSING_USERNAME, "Missing Username"},
+ //{ PJ_STUN_SC_USE_TLS, "Use TLS"},
+ //{ PJ_STUN_SC_MISSING_REALM, "Missing Realm"},
+ //{ PJ_STUN_SC_MISSING_NONCE, "Missing Nonce"},
+ //{ PJ_STUN_SC_UNKNOWN_USERNAME, "Unknown Username"},
+ { PJ_STUN_SC_ALLOCATION_MISMATCH, "Allocation Mismatch"},
+ { PJ_STUN_SC_STALE_NONCE, "Stale Nonce"},
+ { PJ_STUN_SC_TRANSITIONING, "Active Destination Already Set"},
+ { PJ_STUN_SC_WRONG_CREDENTIALS, "Wrong Credentials"},
+ { PJ_STUN_SC_UNSUPP_TRANSPORT_PROTO, "Unsupported Transport Protocol"},
+ { PJ_STUN_SC_OPER_TCP_ONLY, "Operation for TCP Only"},
+ { PJ_STUN_SC_CONNECTION_FAILURE, "Connection Failure"},
+ { PJ_STUN_SC_CONNECTION_TIMEOUT, "Connection Timeout"},
+ { PJ_STUN_SC_ALLOCATION_QUOTA_REACHED, "Allocation Quota Reached"},
+ { PJ_STUN_SC_ROLE_CONFLICT, "Role Conflict"},
+ { PJ_STUN_SC_SERVER_ERROR, "Server Error"},
+ { PJ_STUN_SC_INSUFFICIENT_CAPACITY, "Insufficient Capacity"},
+ { PJ_STUN_SC_GLOBAL_FAILURE, "Global Failure"}
+};
+
+
+
+struct attr_desc
+{
+ const char *name;
+ pj_status_t (*decode_attr)(pj_pool_t *pool, const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr, void **p_attr);
+ pj_status_t (*encode_attr)(const void *a, pj_uint8_t *buf,
+ unsigned len, const pj_stun_msg_hdr *msghdr,
+ unsigned *printed);
+ void* (*clone_attr)(pj_pool_t *pool, const void *src);
+};
+
+static pj_status_t decode_sockaddr_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr);
+static pj_status_t decode_xored_sockaddr_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr);
+static pj_status_t encode_sockaddr_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed);
+static void* clone_sockaddr_attr(pj_pool_t *pool, const void *src);
+static pj_status_t decode_string_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr);
+static pj_status_t encode_string_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed);
+static void* clone_string_attr(pj_pool_t *pool, const void *src);
+static pj_status_t decode_msgint_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr);
+static pj_status_t encode_msgint_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed);
+static void* clone_msgint_attr(pj_pool_t *pool, const void *src);
+static pj_status_t decode_errcode_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr);
+static pj_status_t encode_errcode_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed);
+static void* clone_errcode_attr(pj_pool_t *pool, const void *src);
+static pj_status_t decode_unknown_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr);
+static pj_status_t encode_unknown_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed);
+static void* clone_unknown_attr(pj_pool_t *pool, const void *src);
+static pj_status_t decode_uint_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr);
+static pj_status_t encode_uint_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed);
+static void* clone_uint_attr(pj_pool_t *pool, const void *src);
+static pj_status_t decode_uint64_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr);
+static pj_status_t encode_uint64_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed);
+static void* clone_uint64_attr(pj_pool_t *pool, const void *src);
+static pj_status_t decode_binary_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr);
+static pj_status_t encode_binary_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed);
+static void* clone_binary_attr(pj_pool_t *pool, const void *src);
+static pj_status_t decode_empty_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr);
+static pj_status_t encode_empty_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed);
+static void* clone_empty_attr(pj_pool_t *pool, const void *src);
+
+static struct attr_desc mandatory_attr_desc[] =
+{
+ {
+ /* type zero */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* PJ_STUN_ATTR_MAPPED_ADDR, */
+ "MAPPED-ADDRESS",
+ &decode_sockaddr_attr,
+ &encode_sockaddr_attr,
+ &clone_sockaddr_attr
+ },
+ {
+ /* PJ_STUN_ATTR_RESPONSE_ADDR, */
+ "RESPONSE-ADDRESS",
+ &decode_sockaddr_attr,
+ &encode_sockaddr_attr,
+ &clone_sockaddr_attr
+ },
+ {
+ /* PJ_STUN_ATTR_CHANGE_REQUEST, */
+ "CHANGE-REQUEST",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* PJ_STUN_ATTR_SOURCE_ADDR, */
+ "SOURCE-ADDRESS",
+ &decode_sockaddr_attr,
+ &encode_sockaddr_attr,
+ &clone_sockaddr_attr
+ },
+ {
+ /* PJ_STUN_ATTR_CHANGED_ADDR, */
+ "CHANGED-ADDRESS",
+ &decode_sockaddr_attr,
+ &encode_sockaddr_attr,
+ &clone_sockaddr_attr
+ },
+ {
+ /* PJ_STUN_ATTR_USERNAME, */
+ "USERNAME",
+ &decode_string_attr,
+ &encode_string_attr,
+ &clone_string_attr
+ },
+ {
+ /* PJ_STUN_ATTR_PASSWORD, */
+ "PASSWORD",
+ &decode_string_attr,
+ &encode_string_attr,
+ &clone_string_attr
+ },
+ {
+ /* PJ_STUN_ATTR_MESSAGE_INTEGRITY, */
+ "MESSAGE-INTEGRITY",
+ &decode_msgint_attr,
+ &encode_msgint_attr,
+ &clone_msgint_attr
+ },
+ {
+ /* PJ_STUN_ATTR_ERROR_CODE, */
+ "ERROR-CODE",
+ &decode_errcode_attr,
+ &encode_errcode_attr,
+ &clone_errcode_attr
+ },
+ {
+ /* PJ_STUN_ATTR_UNKNOWN_ATTRIBUTES, */
+ "UNKNOWN-ATTRIBUTES",
+ &decode_unknown_attr,
+ &encode_unknown_attr,
+ &clone_unknown_attr
+ },
+ {
+ /* PJ_STUN_ATTR_REFLECTED_FROM, */
+ "REFLECTED-FROM",
+ &decode_sockaddr_attr,
+ &encode_sockaddr_attr,
+ &clone_sockaddr_attr
+ },
+ {
+ /* PJ_STUN_ATTR_CHANNEL_NUMBER (0x000C) */
+ "CHANNEL-NUMBER",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* PJ_STUN_ATTR_LIFETIME, */
+ "LIFETIME",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* ID 0x000E is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* PJ_STUN_ATTR_MAGIC_COOKIE */
+ "MAGIC-COOKIE",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* PJ_STUN_ATTR_BANDWIDTH, */
+ "BANDWIDTH",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* ID 0x0011 is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* PJ_STUN_ATTR_XOR_PEER_ADDRESS, */
+ "XOR-PEER-ADDRESS",
+ &decode_xored_sockaddr_attr,
+ &encode_sockaddr_attr,
+ &clone_sockaddr_attr
+ },
+ {
+ /* PJ_STUN_ATTR_DATA, */
+ "DATA",
+ &decode_binary_attr,
+ &encode_binary_attr,
+ &clone_binary_attr
+ },
+ {
+ /* PJ_STUN_ATTR_REALM, */
+ "REALM",
+ &decode_string_attr,
+ &encode_string_attr,
+ &clone_string_attr
+ },
+ {
+ /* PJ_STUN_ATTR_NONCE, */
+ "NONCE",
+ &decode_string_attr,
+ &encode_string_attr,
+ &clone_string_attr
+ },
+ {
+ /* PJ_STUN_ATTR_XOR_RELAYED_ADDR, */
+ "XOR-RELAYED-ADDRESS",
+ &decode_xored_sockaddr_attr,
+ &encode_sockaddr_attr,
+ &clone_sockaddr_attr
+ },
+ {
+ /* PJ_STUN_ATTR_REQUESTED_ADDR_TYPE, */
+ "REQUESTED-ADDRESS-TYPE",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* PJ_STUN_ATTR_EVEN_PORT, */
+ "EVEN-PORT",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* PJ_STUN_ATTR_REQUESTED_TRANSPORT, */
+ "REQUESTED-TRANSPORT",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* PJ_STUN_ATTR_DONT_FRAGMENT */
+ "DONT-FRAGMENT",
+ &decode_empty_attr,
+ &encode_empty_attr,
+ &clone_empty_attr
+ },
+ {
+ /* ID 0x001B is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x001C is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x001D is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x001E is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x001F is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* PJ_STUN_ATTR_XOR_MAPPED_ADDRESS, */
+ "XOR-MAPPED-ADDRESS",
+ &decode_xored_sockaddr_attr,
+ &encode_sockaddr_attr,
+ &clone_sockaddr_attr
+ },
+ {
+ /* PJ_STUN_ATTR_TIMER_VAL, */
+ "TIMER-VAL",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* PJ_STUN_ATTR_RESERVATION_TOKEN, */
+ "RESERVATION-TOKEN",
+ &decode_uint64_attr,
+ &encode_uint64_attr,
+ &clone_uint64_attr
+ },
+ {
+ /* PJ_STUN_ATTR_XOR_REFLECTED_FROM, */
+ "XOR-REFLECTED-FROM",
+ &decode_xored_sockaddr_attr,
+ &encode_sockaddr_attr,
+ &clone_sockaddr_attr
+ },
+ {
+ /* PJ_STUN_ATTR_PRIORITY, */
+ "PRIORITY",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* PJ_STUN_ATTR_USE_CANDIDATE, */
+ "USE-CANDIDATE",
+ &decode_empty_attr,
+ &encode_empty_attr,
+ &clone_empty_attr
+ },
+ {
+ /* ID 0x0026 is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x0027 is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x0028 is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x0029 is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x002a is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x002b is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x002c is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x002d is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x002e is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x002f is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* PJ_STUN_ATTR_ICMP, */
+ "ICMP",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+
+ /* Sentinel */
+ {
+ /* PJ_STUN_ATTR_END_MANDATORY_ATTR */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ }
+};
+
+static struct attr_desc extended_attr_desc[] =
+{
+ {
+ /* ID 0x8021 is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* PJ_STUN_ATTR_SOFTWARE, */
+ "SOFTWARE",
+ &decode_string_attr,
+ &encode_string_attr,
+ &clone_string_attr
+ },
+ {
+ /* PJ_STUN_ATTR_ALTERNATE_SERVER, */
+ "ALTERNATE-SERVER",
+ &decode_sockaddr_attr,
+ &encode_sockaddr_attr,
+ &clone_sockaddr_attr
+ },
+ {
+ /* PJ_STUN_ATTR_REFRESH_INTERVAL, */
+ "REFRESH-INTERVAL",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* ID 0x8025 is not assigned*/
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* PADDING, 0x8026 */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* CACHE-TIMEOUT, 0x8027 */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* PJ_STUN_ATTR_FINGERPRINT, */
+ "FINGERPRINT",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* PJ_STUN_ATTR_ICE_CONTROLLED, */
+ "ICE-CONTROLLED",
+ &decode_uint64_attr,
+ &encode_uint64_attr,
+ &clone_uint64_attr
+ },
+ {
+ /* PJ_STUN_ATTR_ICE_CONTROLLING, */
+ "ICE-CONTROLLING",
+ &decode_uint64_attr,
+ &encode_uint64_attr,
+ &clone_uint64_attr
+ }
+};
+
+
+
+/*
+ * Get STUN message type name.
+ */
+PJ_DEF(const char*) pj_stun_get_method_name(unsigned msg_type)
+{
+ unsigned method = PJ_STUN_GET_METHOD(msg_type);
+
+ if (method >= PJ_ARRAY_SIZE(stun_method_names))
+ return "???";
+
+ return stun_method_names[method];
+}
+
+
+/*
+ * Get STUN message class name.
+ */
+PJ_DEF(const char*) pj_stun_get_class_name(unsigned msg_type)
+{
+ if (PJ_STUN_IS_REQUEST(msg_type))
+ return "request";
+ else if (PJ_STUN_IS_SUCCESS_RESPONSE(msg_type))
+ return "success response";
+ else if (PJ_STUN_IS_ERROR_RESPONSE(msg_type))
+ return "error response";
+ else if (PJ_STUN_IS_INDICATION(msg_type))
+ return "indication";
+ else
+ return "???";
+}
+
+
+static const struct attr_desc *find_attr_desc(unsigned attr_type)
+{
+ struct attr_desc *desc;
+
+ /* Check that attr_desc array is valid */
+ pj_assert(PJ_ARRAY_SIZE(mandatory_attr_desc)==
+ PJ_STUN_ATTR_END_MANDATORY_ATTR+1);
+ pj_assert(mandatory_attr_desc[PJ_STUN_ATTR_END_MANDATORY_ATTR].decode_attr
+ == NULL);
+ pj_assert(mandatory_attr_desc[PJ_STUN_ATTR_USE_CANDIDATE].decode_attr
+ == &decode_empty_attr);
+ pj_assert(PJ_ARRAY_SIZE(extended_attr_desc) ==
+ PJ_STUN_ATTR_END_EXTENDED_ATTR-PJ_STUN_ATTR_START_EXTENDED_ATTR);
+
+ if (attr_type < PJ_STUN_ATTR_END_MANDATORY_ATTR)
+ desc = &mandatory_attr_desc[attr_type];
+ else if (attr_type >= PJ_STUN_ATTR_START_EXTENDED_ATTR &&
+ attr_type < PJ_STUN_ATTR_END_EXTENDED_ATTR)
+ desc = &extended_attr_desc[attr_type-PJ_STUN_ATTR_START_EXTENDED_ATTR];
+ else
+ return NULL;
+
+ return desc->decode_attr == NULL ? NULL : desc;
+}
+
+
+/*
+ * Get STUN attribute name.
+ */
+PJ_DEF(const char*) pj_stun_get_attr_name(unsigned attr_type)
+{
+ const struct attr_desc *attr_desc;
+
+ attr_desc = find_attr_desc(attr_type);
+ if (!attr_desc || attr_desc->name==NULL)
+ return "???";
+
+ return attr_desc->name;
+}
+
+
+/**
+ * Get STUN standard reason phrase for the specified error code.
+ */
+PJ_DEF(pj_str_t) pj_stun_get_err_reason(int err_code)
+{
+#if 0
+ /* Find error using linear search */
+ unsigned i;
+
+ for (i=0; i<PJ_ARRAY_SIZE(stun_err_msg_map); ++i) {
+ if (stun_err_msg_map[i].err_code == err_code)
+ return pj_str((char*)stun_err_msg_map[i].err_msg);
+ }
+ return pj_str(NULL);
+#else
+ /* Find error message using binary search */
+ int first = 0;
+ int n = PJ_ARRAY_SIZE(stun_err_msg_map);
+
+ while (n > 0) {
+ int half = n/2;
+ int mid = first + half;
+
+ if (stun_err_msg_map[mid].err_code < err_code) {
+ first = mid+1;
+ n -= (half+1);
+ } else if (stun_err_msg_map[mid].err_code > err_code) {
+ n = half;
+ } else {
+ first = mid;
+ break;
+ }
+ }
+
+
+ if (stun_err_msg_map[first].err_code == err_code) {
+ return pj_str((char*)stun_err_msg_map[first].err_msg);
+ } else {
+ return pj_str(NULL);
+ }
+#endif
+}
+
+
+/*
+ * Set padding character.
+ */
+PJ_DEF(int) pj_stun_set_padding_char(int chr)
+{
+ int old_pad = padding_char;
+ padding_char = chr;
+ return old_pad;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+
+
+#define INIT_ATTR(a,t,l) (a)->hdr.type=(pj_uint16_t)(t), \
+ (a)->hdr.length=(pj_uint16_t)(l)
+#define ATTR_HDR_LEN 4
+
+static pj_uint16_t GETVAL16H(const pj_uint8_t *buf, unsigned pos)
+{
+ return (pj_uint16_t) ((buf[pos + 0] << 8) | \
+ (buf[pos + 1] << 0));
+}
+
+PJ_INLINE(pj_uint16_t) GETVAL16N(const pj_uint8_t *buf, unsigned pos)
+{
+ return pj_htons(GETVAL16H(buf,pos));
+}
+
+static void PUTVAL16H(pj_uint8_t *buf, unsigned pos, pj_uint16_t hval)
+{
+ buf[pos+0] = (pj_uint8_t) ((hval & 0xFF00) >> 8);
+ buf[pos+1] = (pj_uint8_t) ((hval & 0x00FF) >> 0);
+}
+
+PJ_INLINE(pj_uint32_t) GETVAL32H(const pj_uint8_t *buf, unsigned pos)
+{
+ return (pj_uint32_t) ((buf[pos + 0] << 24UL) | \
+ (buf[pos + 1] << 16UL) | \
+ (buf[pos + 2] << 8UL) | \
+ (buf[pos + 3] << 0UL));
+}
+
+PJ_INLINE(pj_uint32_t) GETVAL32N(const pj_uint8_t *buf, unsigned pos)
+{
+ return pj_htonl(GETVAL32H(buf,pos));
+}
+
+static void PUTVAL32H(pj_uint8_t *buf, unsigned pos, pj_uint32_t hval)
+{
+ buf[pos+0] = (pj_uint8_t) ((hval & 0xFF000000UL) >> 24);
+ buf[pos+1] = (pj_uint8_t) ((hval & 0x00FF0000UL) >> 16);
+ buf[pos+2] = (pj_uint8_t) ((hval & 0x0000FF00UL) >> 8);
+ buf[pos+3] = (pj_uint8_t) ((hval & 0x000000FFUL) >> 0);
+}
+
+static void GETVAL64H(const pj_uint8_t *buf, unsigned pos, pj_timestamp *ts)
+{
+ ts->u32.hi = GETVAL32H(buf, pos);
+ ts->u32.lo = GETVAL32H(buf, pos+4);
+}
+
+static void PUTVAL64H(pj_uint8_t *buf, unsigned pos, const pj_timestamp *ts)
+{
+ PUTVAL32H(buf, pos, ts->u32.hi);
+ PUTVAL32H(buf, pos+4, ts->u32.lo);
+}
+
+
+static void GETATTRHDR(const pj_uint8_t *buf, pj_stun_attr_hdr *hdr)
+{
+ hdr->type = GETVAL16H(buf, 0);
+ hdr->length = GETVAL16H(buf, 2);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+/*
+ * STUN generic IP address container
+ */
+#define STUN_GENERIC_IPV4_ADDR_LEN 8
+#define STUN_GENERIC_IPV6_ADDR_LEN 20
+
+/*
+ * Init sockaddr attr
+ */
+PJ_DEF(pj_status_t) pj_stun_sockaddr_attr_init( pj_stun_sockaddr_attr *attr,
+ int attr_type,
+ pj_bool_t xor_ed,
+ const pj_sockaddr_t *addr,
+ unsigned addr_len)
+{
+ unsigned attr_len;
+
+ PJ_ASSERT_RETURN(attr && addr_len && addr, PJ_EINVAL);
+ PJ_ASSERT_RETURN(addr_len == sizeof(pj_sockaddr_in) ||
+ addr_len == sizeof(pj_sockaddr_in6), PJ_EINVAL);
+
+ attr_len = pj_sockaddr_get_addr_len(addr) + 4;
+ INIT_ATTR(attr, attr_type, attr_len);
+
+ pj_memcpy(&attr->sockaddr, addr, addr_len);
+ attr->xor_ed = xor_ed;
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Create a generic STUN IP address attribute for IPv4 address.
+ */
+PJ_DEF(pj_status_t) pj_stun_sockaddr_attr_create(pj_pool_t *pool,
+ int attr_type,
+ pj_bool_t xor_ed,
+ const pj_sockaddr_t *addr,
+ unsigned addr_len,
+ pj_stun_sockaddr_attr **p_attr)
+{
+ pj_stun_sockaddr_attr *attr;
+
+ PJ_ASSERT_RETURN(pool && p_attr, PJ_EINVAL);
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_sockaddr_attr);
+ *p_attr = attr;
+ return pj_stun_sockaddr_attr_init(attr, attr_type, xor_ed,
+ addr, addr_len);
+}
+
+
+/*
+ * Create and add generic STUN IP address attribute to a STUN message.
+ */
+PJ_DEF(pj_status_t) pj_stun_msg_add_sockaddr_attr(pj_pool_t *pool,
+ pj_stun_msg *msg,
+ int attr_type,
+ pj_bool_t xor_ed,
+ const pj_sockaddr_t *addr,
+ unsigned addr_len)
+{
+ pj_stun_sockaddr_attr *attr;
+ pj_status_t status;
+
+ status = pj_stun_sockaddr_attr_create(pool, attr_type, xor_ed,
+ addr, addr_len, &attr);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ return pj_stun_msg_add_attr(msg, &attr->hdr);
+}
+
+static pj_status_t decode_sockaddr_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr)
+{
+ pj_stun_sockaddr_attr *attr;
+ int af;
+ unsigned addr_len;
+ pj_uint32_t val;
+
+ PJ_CHECK_STACK();
+
+ PJ_UNUSED_ARG(msghdr);
+
+ /* Create the attribute */
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_sockaddr_attr);
+ GETATTRHDR(buf, &attr->hdr);
+
+ /* Check that the attribute length is valid */
+ if (attr->hdr.length != STUN_GENERIC_IPV4_ADDR_LEN &&
+ attr->hdr.length != STUN_GENERIC_IPV6_ADDR_LEN)
+ {
+ return PJNATH_ESTUNINATTRLEN;
+ }
+
+ /* Check address family */
+ val = *(pj_uint8_t*)(buf + ATTR_HDR_LEN + 1);
+
+ /* Check address family is valid */
+ if (val == 1) {
+ if (attr->hdr.length != STUN_GENERIC_IPV4_ADDR_LEN)
+ return PJNATH_ESTUNINATTRLEN;
+ af = pj_AF_INET();
+ addr_len = 4;
+ } else if (val == 2) {
+ if (attr->hdr.length != STUN_GENERIC_IPV6_ADDR_LEN)
+ return PJNATH_ESTUNINATTRLEN;
+ af = pj_AF_INET6();
+ addr_len = 16;
+ } else {
+ /* Invalid address family */
+ return PJNATH_EINVAF;
+ }
+
+ /* Get port and address */
+ pj_sockaddr_init(af, &attr->sockaddr, NULL, 0);
+ pj_sockaddr_set_port(&attr->sockaddr,
+ GETVAL16H(buf, ATTR_HDR_LEN+2));
+ pj_memcpy(pj_sockaddr_get_addr(&attr->sockaddr),
+ buf+ATTR_HDR_LEN+4,
+ addr_len);
+
+ /* Done */
+ *p_attr = (void*)attr;
+
+ return PJ_SUCCESS;
+}
+
+
+static pj_status_t decode_xored_sockaddr_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr)
+{
+ pj_stun_sockaddr_attr *attr;
+ pj_status_t status;
+
+ status = decode_sockaddr_attr(pool, buf, msghdr, p_attr);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ attr = *(pj_stun_sockaddr_attr**)p_attr;
+
+ attr->xor_ed = PJ_TRUE;
+
+ if (attr->sockaddr.addr.sa_family == pj_AF_INET()) {
+ attr->sockaddr.ipv4.sin_port ^= pj_htons(PJ_STUN_MAGIC >> 16);
+ attr->sockaddr.ipv4.sin_addr.s_addr ^= pj_htonl(PJ_STUN_MAGIC);
+ } else if (attr->sockaddr.addr.sa_family == pj_AF_INET6()) {
+ unsigned i;
+ pj_uint8_t *dst = (pj_uint8_t*) &attr->sockaddr.ipv6.sin6_addr;
+ pj_uint32_t magic = pj_htonl(PJ_STUN_MAGIC);
+
+ attr->sockaddr.ipv6.sin6_port ^= pj_htons(PJ_STUN_MAGIC >> 16);
+
+ /* If the IP address family is IPv6, X-Address is computed by
+ * taking the mapped IP address in host byte order, XOR'ing it
+ * with the concatenation of the magic cookie and the 96-bit
+ * transaction ID, and converting the result to network byte
+ * order.
+ */
+ for (i=0; i<4; ++i) {
+ dst[i] ^= ((const pj_uint8_t*)&magic)[i];
+ }
+ pj_assert(sizeof(msghdr->tsx_id[0]) == 1);
+ for (i=0; i<12; ++i) {
+ dst[i+4] ^= msghdr->tsx_id[i];
+ }
+
+ } else {
+ return PJNATH_EINVAF;
+ }
+
+ /* Done */
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+
+static pj_status_t encode_sockaddr_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed)
+{
+ pj_uint8_t *start_buf = buf;
+ const pj_stun_sockaddr_attr *ca =
+ (const pj_stun_sockaddr_attr *)a;
+
+ PJ_CHECK_STACK();
+
+ /* Common: attribute type */
+ PUTVAL16H(buf, 0, ca->hdr.type);
+
+ if (ca->sockaddr.addr.sa_family == pj_AF_INET()) {
+ enum {
+ ATTR_LEN = ATTR_HDR_LEN + STUN_GENERIC_IPV4_ADDR_LEN
+ };
+
+ if (len < ATTR_LEN)
+ return PJ_ETOOSMALL;
+
+ /* attribute len */
+ PUTVAL16H(buf, 2, STUN_GENERIC_IPV4_ADDR_LEN);
+ buf += ATTR_HDR_LEN;
+
+ /* Ignored */
+ *buf++ = '\0';
+
+ /* Address family, 1 for IPv4 */
+ *buf++ = 1;
+
+ /* IPv4 address */
+ if (ca->xor_ed) {
+ pj_uint32_t addr;
+ pj_uint16_t port;
+
+ addr = ca->sockaddr.ipv4.sin_addr.s_addr;
+ port = ca->sockaddr.ipv4.sin_port;
+
+ port ^= pj_htons(PJ_STUN_MAGIC >> 16);
+ addr ^= pj_htonl(PJ_STUN_MAGIC);
+
+ /* Port */
+ pj_memcpy(buf, &port, 2);
+ buf += 2;
+
+ /* Address */
+ pj_memcpy(buf, &addr, 4);
+ buf += 4;
+
+ } else {
+ /* Port */
+ pj_memcpy(buf, &ca->sockaddr.ipv4.sin_port, 2);
+ buf += 2;
+
+ /* Address */
+ pj_memcpy(buf, &ca->sockaddr.ipv4.sin_addr, 4);
+ buf += 4;
+ }
+
+ pj_assert(buf - start_buf == ATTR_LEN);
+
+ } else if (ca->sockaddr.addr.sa_family == pj_AF_INET6()) {
+ /* IPv6 address */
+ enum {
+ ATTR_LEN = ATTR_HDR_LEN + STUN_GENERIC_IPV6_ADDR_LEN
+ };
+
+ if (len < ATTR_LEN)
+ return PJ_ETOOSMALL;
+
+ /* attribute len */
+ PUTVAL16H(buf, 2, STUN_GENERIC_IPV6_ADDR_LEN);
+ buf += ATTR_HDR_LEN;
+
+ /* Ignored */
+ *buf++ = '\0';
+
+ /* Address family, 2 for IPv6 */
+ *buf++ = 2;
+
+ /* IPv6 address */
+ if (ca->xor_ed) {
+ unsigned i;
+ pj_uint8_t *dst;
+ const pj_uint8_t *src;
+ pj_uint32_t magic = pj_htonl(PJ_STUN_MAGIC);
+ pj_uint16_t port = ca->sockaddr.ipv6.sin6_port;
+
+ /* Port */
+ port ^= pj_htons(PJ_STUN_MAGIC >> 16);
+ pj_memcpy(buf, &port, 2);
+ buf += 2;
+
+ /* Address */
+ dst = buf;
+ src = (const pj_uint8_t*) &ca->sockaddr.ipv6.sin6_addr;
+ for (i=0; i<4; ++i) {
+ dst[i] = (pj_uint8_t)(src[i] ^ ((const pj_uint8_t*)&magic)[i]);
+ }
+ pj_assert(sizeof(msghdr->tsx_id[0]) == 1);
+ for (i=0; i<12; ++i) {
+ dst[i+4] = (pj_uint8_t)(src[i+4] ^ msghdr->tsx_id[i]);
+ }
+
+ buf += 16;
+
+ } else {
+ /* Port */
+ pj_memcpy(buf, &ca->sockaddr.ipv6.sin6_port, 2);
+ buf += 2;
+
+ /* Address */
+ pj_memcpy(buf, &ca->sockaddr.ipv6.sin6_addr, 16);
+ buf += 16;
+ }
+
+ pj_assert(buf - start_buf == ATTR_LEN);
+
+ } else {
+ return PJNATH_EINVAF;
+ }
+
+ /* Done */
+ *printed = buf - start_buf;
+
+ return PJ_SUCCESS;
+}
+
+
+static void* clone_sockaddr_attr(pj_pool_t *pool, const void *src)
+{
+ pj_stun_sockaddr_attr *dst = PJ_POOL_ALLOC_T(pool, pj_stun_sockaddr_attr);
+ pj_memcpy(dst, src, sizeof(pj_stun_sockaddr_attr));
+ return (void*)dst;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+/*
+ * STUN generic string attribute
+ */
+
+/*
+ * Initialize a STUN generic string attribute.
+ */
+PJ_DEF(pj_status_t) pj_stun_string_attr_init( pj_stun_string_attr *attr,
+ pj_pool_t *pool,
+ int attr_type,
+ const pj_str_t *value)
+{
+ INIT_ATTR(attr, attr_type, value->slen);
+ if (value && value->slen)
+ pj_strdup(pool, &attr->value, value);
+ else
+ attr->value.slen = 0;
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Create a STUN generic string attribute.
+ */
+PJ_DEF(pj_status_t) pj_stun_string_attr_create(pj_pool_t *pool,
+ int attr_type,
+ const pj_str_t *value,
+ pj_stun_string_attr **p_attr)
+{
+ pj_stun_string_attr *attr;
+
+ PJ_ASSERT_RETURN(pool && value && p_attr, PJ_EINVAL);
+
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_string_attr);
+ *p_attr = attr;
+
+ return pj_stun_string_attr_init(attr, pool, attr_type, value);
+}
+
+
+/*
+ * Create and add STUN generic string attribute to the message.
+ */
+PJ_DEF(pj_status_t) pj_stun_msg_add_string_attr(pj_pool_t *pool,
+ pj_stun_msg *msg,
+ int attr_type,
+ const pj_str_t *value)
+{
+ pj_stun_string_attr *attr = NULL;
+ pj_status_t status;
+
+ status = pj_stun_string_attr_create(pool, attr_type, value,
+ &attr);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ return pj_stun_msg_add_attr(msg, &attr->hdr);
+}
+
+
+static pj_status_t decode_string_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr)
+{
+ pj_stun_string_attr *attr;
+ pj_str_t value;
+
+ PJ_UNUSED_ARG(msghdr);
+
+ /* Create the attribute */
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_string_attr);
+ GETATTRHDR(buf, &attr->hdr);
+
+ /* Get pointer to the string in the message */
+ value.ptr = ((char*)buf + ATTR_HDR_LEN);
+ value.slen = attr->hdr.length;
+
+ /* Copy the string to the attribute */
+ pj_strdup(pool, &attr->value, &value);
+
+ /* Done */
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+
+}
+
+
+static pj_status_t encode_string_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed)
+{
+ const pj_stun_string_attr *ca =
+ (const pj_stun_string_attr*)a;
+
+ PJ_CHECK_STACK();
+
+ PJ_UNUSED_ARG(msghdr);
+
+ /* Calculated total attr_len (add padding if necessary) */
+ *printed = (ca->value.slen + ATTR_HDR_LEN + 3) & (~3);
+ if (len < *printed) {
+ *printed = 0;
+ return PJ_ETOOSMALL;
+ }
+
+ PUTVAL16H(buf, 0, ca->hdr.type);
+
+ /* Special treatment for SOFTWARE attribute:
+ * This attribute had caused interop problem when talking to
+ * legacy RFC 3489 STUN servers, due to different "length"
+ * rules with RFC 5389.
+ */
+ if (msghdr->magic != PJ_STUN_MAGIC ||
+ ca->hdr.type == PJ_STUN_ATTR_SOFTWARE)
+ {
+ /* Set the length to be 4-bytes aligned so that we can
+ * communicate with RFC 3489 endpoints
+ */
+ PUTVAL16H(buf, 2, (pj_uint16_t)((ca->value.slen + 3) & (~3)));
+ } else {
+ /* Use RFC 5389 rule */
+ PUTVAL16H(buf, 2, (pj_uint16_t)ca->value.slen);
+ }
+
+ /* Copy the string */
+ pj_memcpy(buf+ATTR_HDR_LEN, ca->value.ptr, ca->value.slen);
+
+ /* Add padding character, if string is not 4-bytes aligned. */
+ if (ca->value.slen & 0x03) {
+ pj_uint8_t pad[3];
+ pj_memset(pad, padding_char, sizeof(pad));
+ pj_memcpy(buf+ATTR_HDR_LEN+ca->value.slen, pad,
+ 4-(ca->value.slen & 0x03));
+ }
+
+ /* Done */
+ return PJ_SUCCESS;
+}
+
+
+static void* clone_string_attr(pj_pool_t *pool, const void *src)
+{
+ const pj_stun_string_attr *asrc = (const pj_stun_string_attr*)src;
+ pj_stun_string_attr *dst = PJ_POOL_ALLOC_T(pool, pj_stun_string_attr);
+
+ pj_memcpy(dst, src, sizeof(pj_stun_attr_hdr));
+ pj_strdup(pool, &dst->value, &asrc->value);
+
+ return (void*)dst;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+/*
+ * STUN empty attribute (used by USE-CANDIDATE).
+ */
+
+/*
+ * Create a STUN empty attribute.
+ */
+PJ_DEF(pj_status_t) pj_stun_empty_attr_create(pj_pool_t *pool,
+ int attr_type,
+ pj_stun_empty_attr **p_attr)
+{
+ pj_stun_empty_attr *attr;
+
+ PJ_ASSERT_RETURN(pool && p_attr, PJ_EINVAL);
+
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_empty_attr);
+ INIT_ATTR(attr, attr_type, 0);
+
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Create STUN empty attribute and add the attribute to the message.
+ */
+PJ_DEF(pj_status_t) pj_stun_msg_add_empty_attr( pj_pool_t *pool,
+ pj_stun_msg *msg,
+ int attr_type)
+{
+ pj_stun_empty_attr *attr = NULL;
+ pj_status_t status;
+
+ status = pj_stun_empty_attr_create(pool, attr_type, &attr);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ return pj_stun_msg_add_attr(msg, &attr->hdr);
+}
+
+static pj_status_t decode_empty_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr)
+{
+ pj_stun_empty_attr *attr;
+
+ PJ_UNUSED_ARG(msghdr);
+
+ /* Check that the struct address is valid */
+ pj_assert(sizeof(pj_stun_empty_attr) == ATTR_HDR_LEN);
+
+ /* Create the attribute */
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_empty_attr);
+ GETATTRHDR(buf, &attr->hdr);
+
+ /* Check that the attribute length is valid */
+ if (attr->hdr.length != 0)
+ return PJNATH_ESTUNINATTRLEN;
+
+ /* Done */
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+
+static pj_status_t encode_empty_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed)
+{
+ const pj_stun_empty_attr *ca = (pj_stun_empty_attr*)a;
+
+ PJ_UNUSED_ARG(msghdr);
+
+ if (len < ATTR_HDR_LEN)
+ return PJ_ETOOSMALL;
+
+ PUTVAL16H(buf, 0, ca->hdr.type);
+ PUTVAL16H(buf, 2, 0);
+
+ /* Done */
+ *printed = ATTR_HDR_LEN;
+
+ return PJ_SUCCESS;
+}
+
+
+static void* clone_empty_attr(pj_pool_t *pool, const void *src)
+{
+ pj_stun_empty_attr *dst = PJ_POOL_ALLOC_T(pool, pj_stun_empty_attr);
+
+ pj_memcpy(dst, src, sizeof(pj_stun_empty_attr));
+
+ return (void*) dst;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+/*
+ * STUN generic 32bit integer attribute.
+ */
+
+/*
+ * Create a STUN generic 32bit value attribute.
+ */
+PJ_DEF(pj_status_t) pj_stun_uint_attr_create(pj_pool_t *pool,
+ int attr_type,
+ pj_uint32_t value,
+ pj_stun_uint_attr **p_attr)
+{
+ pj_stun_uint_attr *attr;
+
+ PJ_ASSERT_RETURN(pool && p_attr, PJ_EINVAL);
+
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_uint_attr);
+ INIT_ATTR(attr, attr_type, 4);
+ attr->value = value;
+
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+/* Create and add STUN generic 32bit value attribute to the message. */
+PJ_DEF(pj_status_t) pj_stun_msg_add_uint_attr(pj_pool_t *pool,
+ pj_stun_msg *msg,
+ int attr_type,
+ pj_uint32_t value)
+{
+ pj_stun_uint_attr *attr = NULL;
+ pj_status_t status;
+
+ status = pj_stun_uint_attr_create(pool, attr_type, value, &attr);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ return pj_stun_msg_add_attr(msg, &attr->hdr);
+}
+
+static pj_status_t decode_uint_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr)
+{
+ pj_stun_uint_attr *attr;
+
+ PJ_UNUSED_ARG(msghdr);
+
+ /* Create the attribute */
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_uint_attr);
+ GETATTRHDR(buf, &attr->hdr);
+
+ attr->value = GETVAL32H(buf, 4);
+
+ /* Check that the attribute length is valid */
+ if (attr->hdr.length != 4)
+ return PJNATH_ESTUNINATTRLEN;
+
+ /* Done */
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+
+static pj_status_t encode_uint_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed)
+{
+ const pj_stun_uint_attr *ca = (const pj_stun_uint_attr*)a;
+
+ PJ_CHECK_STACK();
+
+ PJ_UNUSED_ARG(msghdr);
+
+ if (len < 8)
+ return PJ_ETOOSMALL;
+
+ PUTVAL16H(buf, 0, ca->hdr.type);
+ PUTVAL16H(buf, 2, (pj_uint16_t)4);
+ PUTVAL32H(buf, 4, ca->value);
+
+ /* Done */
+ *printed = 8;
+
+ return PJ_SUCCESS;
+}
+
+
+static void* clone_uint_attr(pj_pool_t *pool, const void *src)
+{
+ pj_stun_uint_attr *dst = PJ_POOL_ALLOC_T(pool, pj_stun_uint_attr);
+
+ pj_memcpy(dst, src, sizeof(pj_stun_uint_attr));
+
+ return (void*)dst;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+/*
+ * Create a STUN generic 64bit value attribute.
+ */
+PJ_DEF(pj_status_t) pj_stun_uint64_attr_create(pj_pool_t *pool,
+ int attr_type,
+ const pj_timestamp *value,
+ pj_stun_uint64_attr **p_attr)
+{
+ pj_stun_uint64_attr *attr;
+
+ PJ_ASSERT_RETURN(pool && p_attr, PJ_EINVAL);
+
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_uint64_attr);
+ INIT_ATTR(attr, attr_type, 8);
+
+ if (value) {
+ attr->value.u32.hi = value->u32.hi;
+ attr->value.u32.lo = value->u32.lo;
+ }
+
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+/* Create and add STUN generic 64bit value attribute to the message. */
+PJ_DEF(pj_status_t) pj_stun_msg_add_uint64_attr(pj_pool_t *pool,
+ pj_stun_msg *msg,
+ int attr_type,
+ const pj_timestamp *value)
+{
+ pj_stun_uint64_attr *attr = NULL;
+ pj_status_t status;
+
+ status = pj_stun_uint64_attr_create(pool, attr_type, value, &attr);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ return pj_stun_msg_add_attr(msg, &attr->hdr);
+}
+
+static pj_status_t decode_uint64_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr)
+{
+ pj_stun_uint64_attr *attr;
+
+ PJ_UNUSED_ARG(msghdr);
+
+ /* Create the attribute */
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_uint64_attr);
+ GETATTRHDR(buf, &attr->hdr);
+
+ if (attr->hdr.length != 8)
+ return PJNATH_ESTUNINATTRLEN;
+
+ GETVAL64H(buf, 4, &attr->value);
+
+ /* Done */
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+
+static pj_status_t encode_uint64_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed)
+{
+ const pj_stun_uint64_attr *ca = (const pj_stun_uint64_attr*)a;
+
+ PJ_CHECK_STACK();
+
+ PJ_UNUSED_ARG(msghdr);
+
+ if (len < 12)
+ return PJ_ETOOSMALL;
+
+ PUTVAL16H(buf, 0, ca->hdr.type);
+ PUTVAL16H(buf, 2, (pj_uint16_t)8);
+ PUTVAL64H(buf, 4, &ca->value);
+
+ /* Done */
+ *printed = 12;
+
+ return PJ_SUCCESS;
+}
+
+
+static void* clone_uint64_attr(pj_pool_t *pool, const void *src)
+{
+ pj_stun_uint64_attr *dst = PJ_POOL_ALLOC_T(pool, pj_stun_uint64_attr);
+
+ pj_memcpy(dst, src, sizeof(pj_stun_uint64_attr));
+
+ return (void*)dst;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+/*
+ * STUN MESSAGE-INTEGRITY attribute.
+ */
+
+/*
+ * Create a STUN MESSAGE-INTEGRITY attribute.
+ */
+PJ_DEF(pj_status_t) pj_stun_msgint_attr_create(pj_pool_t *pool,
+ pj_stun_msgint_attr **p_attr)
+{
+ pj_stun_msgint_attr *attr;
+
+ PJ_ASSERT_RETURN(pool && p_attr, PJ_EINVAL);
+
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_msgint_attr);
+ INIT_ATTR(attr, PJ_STUN_ATTR_MESSAGE_INTEGRITY, 20);
+
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+
+PJ_DEF(pj_status_t) pj_stun_msg_add_msgint_attr(pj_pool_t *pool,
+ pj_stun_msg *msg)
+{
+ pj_stun_msgint_attr *attr = NULL;
+ pj_status_t status;
+
+ status = pj_stun_msgint_attr_create(pool, &attr);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ return pj_stun_msg_add_attr(msg, &attr->hdr);
+}
+
+static pj_status_t decode_msgint_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr)
+{
+ pj_stun_msgint_attr *attr;
+
+ PJ_UNUSED_ARG(msghdr);
+
+ /* Create attribute */
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_msgint_attr);
+ GETATTRHDR(buf, &attr->hdr);
+
+ /* Check that the attribute length is valid */
+ if (attr->hdr.length != 20)
+ return PJNATH_ESTUNINATTRLEN;
+
+ /* Copy hmac */
+ pj_memcpy(attr->hmac, buf+4, 20);
+
+ /* Done */
+ *p_attr = attr;
+ return PJ_SUCCESS;
+}
+
+
+static pj_status_t encode_msgint_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed)
+{
+ const pj_stun_msgint_attr *ca = (const pj_stun_msgint_attr*)a;
+
+ PJ_CHECK_STACK();
+
+ PJ_UNUSED_ARG(msghdr);
+
+ if (len < 24)
+ return PJ_ETOOSMALL;
+
+ /* Copy and convert attribute to network byte order */
+ PUTVAL16H(buf, 0, ca->hdr.type);
+ PUTVAL16H(buf, 2, ca->hdr.length);
+
+ pj_memcpy(buf+4, ca->hmac, 20);
+
+ /* Done */
+ *printed = 24;
+
+ return PJ_SUCCESS;
+}
+
+
+static void* clone_msgint_attr(pj_pool_t *pool, const void *src)
+{
+ pj_stun_msgint_attr *dst = PJ_POOL_ALLOC_T(pool, pj_stun_msgint_attr);
+
+ pj_memcpy(dst, src, sizeof(pj_stun_msgint_attr));
+
+ return (void*) dst;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+/*
+ * STUN ERROR-CODE
+ */
+
+/*
+ * Create a STUN ERROR-CODE attribute.
+ */
+PJ_DEF(pj_status_t) pj_stun_errcode_attr_create(pj_pool_t *pool,
+ int err_code,
+ const pj_str_t *err_reason,
+ pj_stun_errcode_attr **p_attr)
+{
+ pj_stun_errcode_attr *attr;
+ char err_buf[80];
+ pj_str_t str;
+
+ PJ_ASSERT_RETURN(pool && err_code && p_attr, PJ_EINVAL);
+
+ if (err_reason == NULL) {
+ str = pj_stun_get_err_reason(err_code);
+ if (str.slen == 0) {
+ str.slen = pj_ansi_snprintf(err_buf, sizeof(err_buf),
+ "Unknown error %d", err_code);
+ str.ptr = err_buf;
+ }
+ err_reason = &str;
+ }
+
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_errcode_attr);
+ INIT_ATTR(attr, PJ_STUN_ATTR_ERROR_CODE, 4+err_reason->slen);
+ attr->err_code = err_code;
+ pj_strdup(pool, &attr->reason, err_reason);
+
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+
+PJ_DEF(pj_status_t) pj_stun_msg_add_errcode_attr(pj_pool_t *pool,
+ pj_stun_msg *msg,
+ int err_code,
+ const pj_str_t *err_reason)
+{
+ pj_stun_errcode_attr *err_attr = NULL;
+ pj_status_t status;
+
+ status = pj_stun_errcode_attr_create(pool, err_code, err_reason,
+ &err_attr);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ return pj_stun_msg_add_attr(msg, &err_attr->hdr);
+}
+
+static pj_status_t decode_errcode_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr)
+{
+ pj_stun_errcode_attr *attr;
+ pj_str_t value;
+
+ PJ_UNUSED_ARG(msghdr);
+
+ /* Create the attribute */
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_errcode_attr);
+ GETATTRHDR(buf, &attr->hdr);
+
+ attr->err_code = buf[6] * 100 + buf[7];
+
+ /* Get pointer to the string in the message */
+ value.ptr = ((char*)buf + ATTR_HDR_LEN + 4);
+ value.slen = attr->hdr.length - 4;
+
+ /* Copy the string to the attribute */
+ pj_strdup(pool, &attr->reason, &value);
+
+ /* Done */
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+
+static pj_status_t encode_errcode_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed)
+{
+ const pj_stun_errcode_attr *ca =
+ (const pj_stun_errcode_attr*)a;
+
+ PJ_CHECK_STACK();
+
+ PJ_UNUSED_ARG(msghdr);
+
+ if (len < ATTR_HDR_LEN + 4 + (unsigned)ca->reason.slen)
+ return PJ_ETOOSMALL;
+
+ /* Copy and convert attribute to network byte order */
+ PUTVAL16H(buf, 0, ca->hdr.type);
+ PUTVAL16H(buf, 2, (pj_uint16_t)(4 + ca->reason.slen));
+ PUTVAL16H(buf, 4, 0);
+ buf[6] = (pj_uint8_t)(ca->err_code / 100);
+ buf[7] = (pj_uint8_t)(ca->err_code % 100);
+
+ /* Copy error string */
+ pj_memcpy(buf + ATTR_HDR_LEN + 4, ca->reason.ptr, ca->reason.slen);
+
+ /* Done */
+ *printed = (ATTR_HDR_LEN + 4 + ca->reason.slen + 3) & (~3);
+
+ return PJ_SUCCESS;
+}
+
+
+static void* clone_errcode_attr(pj_pool_t *pool, const void *src)
+{
+ const pj_stun_errcode_attr *asrc = (const pj_stun_errcode_attr*)src;
+ pj_stun_errcode_attr *dst = PJ_POOL_ALLOC_T(pool, pj_stun_errcode_attr);
+
+ pj_memcpy(dst, src, sizeof(pj_stun_errcode_attr));
+ pj_strdup(pool, &dst->reason, &asrc->reason);
+
+ return (void*)dst;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+/*
+ * STUN UNKNOWN-ATTRIBUTES attribute
+ */
+
+/*
+ * Create an empty instance of STUN UNKNOWN-ATTRIBUTES attribute.
+ *
+ * @param pool The pool to allocate memory from.
+ * @param p_attr Pointer to receive the attribute.
+ *
+ * @return PJ_SUCCESS on success or the appropriate error code.
+ */
+PJ_DEF(pj_status_t) pj_stun_unknown_attr_create(pj_pool_t *pool,
+ unsigned attr_cnt,
+ const pj_uint16_t attr_array[],
+ pj_stun_unknown_attr **p_attr)
+{
+ pj_stun_unknown_attr *attr;
+ unsigned i;
+
+ PJ_ASSERT_RETURN(pool && attr_cnt < PJ_STUN_MAX_ATTR && p_attr, PJ_EINVAL);
+
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_unknown_attr);
+ INIT_ATTR(attr, PJ_STUN_ATTR_UNKNOWN_ATTRIBUTES, attr_cnt * 2);
+
+ attr->attr_count = attr_cnt;
+ for (i=0; i<attr_cnt; ++i) {
+ attr->attrs[i] = attr_array[i];
+ }
+
+ /* If the number of unknown attributes is an odd number, one of the
+ * attributes MUST be repeated in the list.
+ */
+ /* No longer necessary
+ if ((attr_cnt & 0x01)) {
+ attr->attrs[attr_cnt] = attr_array[attr_cnt-1];
+ }
+ */
+
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+
+/* Create and add STUN UNKNOWN-ATTRIBUTES attribute to the message. */
+PJ_DEF(pj_status_t) pj_stun_msg_add_unknown_attr(pj_pool_t *pool,
+ pj_stun_msg *msg,
+ unsigned attr_cnt,
+ const pj_uint16_t attr_type[])
+{
+ pj_stun_unknown_attr *attr = NULL;
+ pj_status_t status;
+
+ status = pj_stun_unknown_attr_create(pool, attr_cnt, attr_type, &attr);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ return pj_stun_msg_add_attr(msg, &attr->hdr);
+}
+
+static pj_status_t decode_unknown_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr)
+{
+ pj_stun_unknown_attr *attr;
+ const pj_uint16_t *punk_attr;
+ unsigned i;
+
+ PJ_UNUSED_ARG(msghdr);
+
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_unknown_attr);
+ GETATTRHDR(buf, &attr->hdr);
+
+ attr->attr_count = (attr->hdr.length >> 1);
+ if (attr->attr_count > PJ_STUN_MAX_ATTR)
+ return PJ_ETOOMANY;
+
+ punk_attr = (const pj_uint16_t*)(buf + ATTR_HDR_LEN);
+ for (i=0; i<attr->attr_count; ++i) {
+ attr->attrs[i] = pj_ntohs(punk_attr[i]);
+ }
+
+ /* Done */
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+
+static pj_status_t encode_unknown_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed)
+{
+ const pj_stun_unknown_attr *ca = (const pj_stun_unknown_attr*) a;
+ pj_uint16_t *dst_unk_attr;
+ unsigned i;
+
+ PJ_CHECK_STACK();
+
+ PJ_UNUSED_ARG(msghdr);
+
+ /* Check that buffer is enough */
+ if (len < ATTR_HDR_LEN + (ca->attr_count << 1))
+ return PJ_ETOOSMALL;
+
+ PUTVAL16H(buf, 0, ca->hdr.type);
+ PUTVAL16H(buf, 2, (pj_uint16_t)(ca->attr_count << 1));
+
+ /* Copy individual attribute */
+ dst_unk_attr = (pj_uint16_t*)(buf + ATTR_HDR_LEN);
+ for (i=0; i < ca->attr_count; ++i, ++dst_unk_attr) {
+ *dst_unk_attr = pj_htons(ca->attrs[i]);
+ }
+
+ /* Done */
+ *printed = (ATTR_HDR_LEN + (ca->attr_count << 1) + 3) & (~3);
+
+ return PJ_SUCCESS;
+}
+
+
+static void* clone_unknown_attr(pj_pool_t *pool, const void *src)
+{
+ pj_stun_unknown_attr *dst = PJ_POOL_ALLOC_T(pool, pj_stun_unknown_attr);
+
+ pj_memcpy(dst, src, sizeof(pj_stun_unknown_attr));
+
+ return (void*)dst;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+/*
+ * STUN generic binary attribute
+ */
+
+/*
+ * Initialize STUN binary attribute.
+ */
+PJ_DEF(pj_status_t) pj_stun_binary_attr_init( pj_stun_binary_attr *attr,
+ pj_pool_t *pool,
+ int attr_type,
+ const pj_uint8_t *data,
+ unsigned length)
+{
+ PJ_ASSERT_RETURN(attr_type, PJ_EINVAL);
+
+ INIT_ATTR(attr, attr_type, length);
+
+ attr->magic = PJ_STUN_MAGIC;
+
+ if (data && length) {
+ attr->length = length;
+ attr->data = (pj_uint8_t*) pj_pool_alloc(pool, length);
+ pj_memcpy(attr->data, data, length);
+ } else {
+ attr->data = NULL;
+ attr->length = 0;
+ }
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Create a blank binary attribute.
+ */
+PJ_DEF(pj_status_t) pj_stun_binary_attr_create(pj_pool_t *pool,
+ int attr_type,
+ const pj_uint8_t *data,
+ unsigned length,
+ pj_stun_binary_attr **p_attr)
+{
+ pj_stun_binary_attr *attr;
+
+ PJ_ASSERT_RETURN(pool && attr_type && p_attr, PJ_EINVAL);
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_binary_attr);
+ *p_attr = attr;
+ return pj_stun_binary_attr_init(attr, pool, attr_type, data, length);
+}
+
+
+/* Create and add binary attr. */
+PJ_DEF(pj_status_t) pj_stun_msg_add_binary_attr(pj_pool_t *pool,
+ pj_stun_msg *msg,
+ int attr_type,
+ const pj_uint8_t *data,
+ unsigned length)
+{
+ pj_stun_binary_attr *attr = NULL;
+ pj_status_t status;
+
+ status = pj_stun_binary_attr_create(pool, attr_type,
+ data, length, &attr);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ return pj_stun_msg_add_attr(msg, &attr->hdr);
+}
+
+
+static pj_status_t decode_binary_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr)
+{
+ pj_stun_binary_attr *attr;
+
+ PJ_UNUSED_ARG(msghdr);
+
+ /* Create the attribute */
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_binary_attr);
+ GETATTRHDR(buf, &attr->hdr);
+
+ /* Copy the data to the attribute */
+ attr->length = attr->hdr.length;
+ attr->data = (pj_uint8_t*) pj_pool_alloc(pool, attr->length);
+ pj_memcpy(attr->data, buf+ATTR_HDR_LEN, attr->length);
+
+ /* Done */
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+
+}
+
+
+static pj_status_t encode_binary_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed)
+{
+ const pj_stun_binary_attr *ca = (const pj_stun_binary_attr*)a;
+
+ PJ_CHECK_STACK();
+
+ PJ_UNUSED_ARG(msghdr);
+
+ /* Calculated total attr_len (add padding if necessary) */
+ *printed = (ca->length + ATTR_HDR_LEN + 3) & (~3);
+ if (len < *printed)
+ return PJ_ETOOSMALL;
+
+ PUTVAL16H(buf, 0, ca->hdr.type);
+ PUTVAL16H(buf, 2, (pj_uint16_t) ca->length);
+
+ /* Copy the data */
+ pj_memcpy(buf+ATTR_HDR_LEN, ca->data, ca->length);
+
+ /* Done */
+ return PJ_SUCCESS;
+}
+
+
+static void* clone_binary_attr(pj_pool_t *pool, const void *src)
+{
+ const pj_stun_binary_attr *asrc = (const pj_stun_binary_attr*)src;
+ pj_stun_binary_attr *dst = PJ_POOL_ALLOC_T(pool, pj_stun_binary_attr);
+
+ pj_memcpy(dst, src, sizeof(pj_stun_binary_attr));
+
+ if (asrc->length) {
+ dst->data = (pj_uint8_t*) pj_pool_alloc(pool, asrc->length);
+ pj_memcpy(dst->data, asrc->data, asrc->length);
+ }
+
+ return (void*)dst;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+/*
+ * Initialize a generic STUN message.
+ */
+PJ_DEF(pj_status_t) pj_stun_msg_init( pj_stun_msg *msg,
+ unsigned msg_type,
+ pj_uint32_t magic,
+ const pj_uint8_t tsx_id[12])
+{
+ PJ_ASSERT_RETURN(msg && msg_type, PJ_EINVAL);
+
+ msg->hdr.type = (pj_uint16_t) msg_type;
+ msg->hdr.length = 0;
+ msg->hdr.magic = magic;
+ msg->attr_count = 0;
+
+ if (tsx_id) {
+ pj_memcpy(&msg->hdr.tsx_id, tsx_id, sizeof(msg->hdr.tsx_id));
+ } else {
+ struct transaction_id
+ {
+ pj_uint32_t proc_id;
+ pj_uint32_t random;
+ pj_uint32_t counter;
+ } id;
+ static pj_uint32_t pj_stun_tsx_id_counter;
+
+ if (!pj_stun_tsx_id_counter)
+ pj_stun_tsx_id_counter = pj_rand();
+
+ id.proc_id = pj_getpid();
+ id.random = pj_rand();
+ id.counter = pj_stun_tsx_id_counter++;
+
+ pj_memcpy(&msg->hdr.tsx_id, &id, sizeof(msg->hdr.tsx_id));
+ }
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Create a blank STUN message.
+ */
+PJ_DEF(pj_status_t) pj_stun_msg_create( pj_pool_t *pool,
+ unsigned msg_type,
+ pj_uint32_t magic,
+ const pj_uint8_t tsx_id[12],
+ pj_stun_msg **p_msg)
+{
+ pj_stun_msg *msg;
+
+ PJ_ASSERT_RETURN(pool && msg_type && p_msg, PJ_EINVAL);
+
+ msg = PJ_POOL_ZALLOC_T(pool, pj_stun_msg);
+ *p_msg = msg;
+ return pj_stun_msg_init(msg, msg_type, magic, tsx_id);
+}
+
+
+/*
+ * Clone a STUN message with all of its attributes.
+ */
+PJ_DEF(pj_stun_msg*) pj_stun_msg_clone( pj_pool_t *pool,
+ const pj_stun_msg *src)
+{
+ pj_stun_msg *dst;
+ unsigned i;
+
+ PJ_ASSERT_RETURN(pool && src, NULL);
+
+ dst = PJ_POOL_ZALLOC_T(pool, pj_stun_msg);
+ pj_memcpy(dst, src, sizeof(pj_stun_msg));
+
+ /* Duplicate the attributes */
+ for (i=0, dst->attr_count=0; i<src->attr_count; ++i) {
+ dst->attr[dst->attr_count] = pj_stun_attr_clone(pool, src->attr[i]);
+ if (dst->attr[dst->attr_count])
+ ++dst->attr_count;
+ }
+
+ return dst;
+}
+
+
+/*
+ * Add STUN attribute to STUN message.
+ */
+PJ_DEF(pj_status_t) pj_stun_msg_add_attr(pj_stun_msg *msg,
+ pj_stun_attr_hdr *attr)
+{
+ PJ_ASSERT_RETURN(msg && attr, PJ_EINVAL);
+ PJ_ASSERT_RETURN(msg->attr_count < PJ_STUN_MAX_ATTR, PJ_ETOOMANY);
+
+ msg->attr[msg->attr_count++] = attr;
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Check that the PDU is potentially a valid STUN message.
+ */
+PJ_DEF(pj_status_t) pj_stun_msg_check(const pj_uint8_t *pdu, pj_size_t pdu_len,
+ unsigned options)
+{
+ pj_size_t msg_len;
+
+ PJ_ASSERT_RETURN(pdu, PJ_EINVAL);
+
+ if (pdu_len < sizeof(pj_stun_msg_hdr))
+ return PJNATH_EINSTUNMSGLEN;
+
+ /* First byte of STUN message is always 0x00 or 0x01. */
+ if (*pdu != 0x00 && *pdu != 0x01)
+ return PJNATH_EINSTUNMSGTYPE;
+
+ /* Check the PDU length */
+ msg_len = GETVAL16H(pdu, 2);
+ if ((msg_len + 20 > pdu_len) ||
+ ((options & PJ_STUN_IS_DATAGRAM) && msg_len + 20 != pdu_len))
+ {
+ return PJNATH_EINSTUNMSGLEN;
+ }
+
+ /* STUN message is always padded to the nearest 4 bytes, thus
+ * the last two bits of the length field are always zero.
+ */
+ if ((msg_len & 0x03) != 0) {
+ return PJNATH_EINSTUNMSGLEN;
+ }
+
+ /* If magic is set, then there is great possibility that this is
+ * a STUN message.
+ */
+ if (GETVAL32H(pdu, 4) == PJ_STUN_MAGIC) {
+
+ /* Check if FINGERPRINT attribute is present */
+ if ((options & PJ_STUN_NO_FINGERPRINT_CHECK )==0 &&
+ GETVAL16H(pdu, msg_len + 20 - 8) == PJ_STUN_ATTR_FINGERPRINT)
+ {
+ pj_uint16_t attr_len = GETVAL16H(pdu, msg_len + 20 - 8 + 2);
+ pj_uint32_t fingerprint = GETVAL32H(pdu, msg_len + 20 - 8 + 4);
+ pj_uint32_t crc;
+
+ if (attr_len != 4)
+ return PJNATH_ESTUNINATTRLEN;
+
+ crc = pj_crc32_calc(pdu, msg_len + 20 - 8);
+ crc ^= STUN_XOR_FINGERPRINT;
+
+ if (crc != fingerprint)
+ return PJNATH_ESTUNFINGERPRINT;
+ }
+ }
+
+ /* Could be a STUN message */
+ return PJ_SUCCESS;
+}
+
+
+/* Create error response */
+PJ_DEF(pj_status_t) pj_stun_msg_create_response(pj_pool_t *pool,
+ const pj_stun_msg *req_msg,
+ unsigned err_code,
+ const pj_str_t *err_msg,
+ pj_stun_msg **p_response)
+{
+ unsigned msg_type = req_msg->hdr.type;
+ pj_stun_msg *response = NULL;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(pool && p_response, PJ_EINVAL);
+
+ PJ_ASSERT_RETURN(PJ_STUN_IS_REQUEST(msg_type),
+ PJNATH_EINSTUNMSGTYPE);
+
+ /* Create response or error response */
+ if (err_code)
+ msg_type |= PJ_STUN_ERROR_RESPONSE_BIT;
+ else
+ msg_type |= PJ_STUN_SUCCESS_RESPONSE_BIT;
+
+ status = pj_stun_msg_create(pool, msg_type, req_msg->hdr.magic,
+ req_msg->hdr.tsx_id, &response);
+ if (status != PJ_SUCCESS) {
+ return status;
+ }
+
+ /* Add error code attribute */
+ if (err_code) {
+ status = pj_stun_msg_add_errcode_attr(pool, response,
+ err_code, err_msg);
+ if (status != PJ_SUCCESS) {
+ return status;
+ }
+ }
+
+ *p_response = response;
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Parse incoming packet into STUN message.
+ */
+PJ_DEF(pj_status_t) pj_stun_msg_decode(pj_pool_t *pool,
+ const pj_uint8_t *pdu,
+ pj_size_t pdu_len,
+ unsigned options,
+ pj_stun_msg **p_msg,
+ pj_size_t *p_parsed_len,
+ pj_stun_msg **p_response)
+{
+
+ pj_stun_msg *msg;
+ unsigned uattr_cnt;
+ const pj_uint8_t *start_pdu = pdu;
+ pj_bool_t has_msg_int = PJ_FALSE;
+ pj_bool_t has_fingerprint = PJ_FALSE;
+ pj_status_t status;
+
+ PJ_UNUSED_ARG(options);
+
+ PJ_ASSERT_RETURN(pool && pdu && pdu_len && p_msg, PJ_EINVAL);
+ PJ_ASSERT_RETURN(sizeof(pj_stun_msg_hdr) == 20, PJ_EBUG);
+
+ if (p_parsed_len)
+ *p_parsed_len = 0;
+ if (p_response)
+ *p_response = NULL;
+
+ /* Check if this is a STUN message, if necessary */
+ if (options & PJ_STUN_CHECK_PACKET) {
+ status = pj_stun_msg_check(pdu, pdu_len, options);
+ if (status != PJ_SUCCESS)
+ return status;
+ }
+
+ /* Create the message, copy the header, and convert to host byte order */
+ msg = PJ_POOL_ZALLOC_T(pool, pj_stun_msg);
+ pj_memcpy(&msg->hdr, pdu, sizeof(pj_stun_msg_hdr));
+ msg->hdr.type = pj_ntohs(msg->hdr.type);
+ msg->hdr.length = pj_ntohs(msg->hdr.length);
+ msg->hdr.magic = pj_ntohl(msg->hdr.magic);
+
+ pdu += sizeof(pj_stun_msg_hdr);
+ /* pdu_len -= sizeof(pj_stun_msg_hdr); */
+ pdu_len = msg->hdr.length;
+
+ /* No need to create response if this is not a request */
+ if (!PJ_STUN_IS_REQUEST(msg->hdr.type))
+ p_response = NULL;
+
+ /* Parse attributes */
+ uattr_cnt = 0;
+ while (pdu_len >= 4) {
+ unsigned attr_type, attr_val_len;
+ const struct attr_desc *adesc;
+
+ /* Get attribute type and length. If length is not aligned
+ * to 4 bytes boundary, add padding.
+ */
+ attr_type = GETVAL16H(pdu, 0);
+ attr_val_len = GETVAL16H(pdu, 2);
+ attr_val_len = (attr_val_len + 3) & (~3);
+
+ /* Check length */
+ if (pdu_len < attr_val_len) {
+ pj_str_t err_msg;
+ char err_msg_buf[80];
+
+ err_msg.ptr = err_msg_buf;
+ err_msg.slen = pj_ansi_snprintf(err_msg_buf, sizeof(err_msg_buf),
+ "Attribute %s has invalid length",
+ pj_stun_get_attr_name(attr_type));
+
+ PJ_LOG(4,(THIS_FILE, "Error decoding message: %.*s",
+ (int)err_msg.slen, err_msg.ptr));
+
+ if (p_response) {
+ pj_stun_msg_create_response(pool, msg,
+ PJ_STUN_SC_BAD_REQUEST,
+ &err_msg, p_response);
+ }
+ return PJNATH_ESTUNINATTRLEN;
+ }
+
+ /* Get the attribute descriptor */
+ adesc = find_attr_desc(attr_type);
+
+ if (adesc == NULL) {
+ /* Unrecognized attribute */
+ pj_stun_binary_attr *attr = NULL;
+
+ PJ_LOG(5,(THIS_FILE, "Unrecognized attribute type 0x%x",
+ attr_type));
+
+ /* Is this a fatal condition? */
+ if (attr_type <= 0x7FFF) {
+ /* This is a mandatory attribute, we must return error
+ * if we don't understand the attribute.
+ */
+ if (p_response) {
+ unsigned err_code = PJ_STUN_SC_UNKNOWN_ATTRIBUTE;
+
+ status = pj_stun_msg_create_response(pool, msg,
+ err_code, NULL,
+ p_response);
+ if (status==PJ_SUCCESS) {
+ pj_uint16_t d = (pj_uint16_t)attr_type;
+ pj_stun_msg_add_unknown_attr(pool, *p_response, 1, &d);
+ }
+ }
+
+ return PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_UNKNOWN_ATTRIBUTE);
+ }
+
+ /* Make sure we have rooms for the new attribute */
+ if (msg->attr_count >= PJ_STUN_MAX_ATTR) {
+ if (p_response) {
+ pj_stun_msg_create_response(pool, msg,
+ PJ_STUN_SC_SERVER_ERROR,
+ NULL, p_response);
+ }
+ return PJNATH_ESTUNTOOMANYATTR;
+ }
+
+ /* Create binary attribute to represent this */
+ status = pj_stun_binary_attr_create(pool, attr_type, pdu+4,
+ GETVAL16H(pdu, 2), &attr);
+ if (status != PJ_SUCCESS) {
+ if (p_response) {
+ pj_stun_msg_create_response(pool, msg,
+ PJ_STUN_SC_SERVER_ERROR,
+ NULL, p_response);
+ }
+
+ PJ_LOG(4,(THIS_FILE,
+ "Error parsing unknown STUN attribute type %d",
+ attr_type));
+
+ return status;
+ }
+
+ /* Add the attribute */
+ msg->attr[msg->attr_count++] = &attr->hdr;
+
+ } else {
+ void *attr;
+ char err_msg1[PJ_ERR_MSG_SIZE],
+ err_msg2[PJ_ERR_MSG_SIZE];
+
+ /* Parse the attribute */
+ status = (adesc->decode_attr)(pool, pdu, &msg->hdr, &attr);
+
+ if (status != PJ_SUCCESS) {
+ pj_strerror(status, err_msg1, sizeof(err_msg1));
+
+ if (p_response) {
+ pj_str_t e;
+
+ e.ptr = err_msg2;
+ e.slen= pj_ansi_snprintf(err_msg2, sizeof(err_msg2),
+ "%s in %s",
+ err_msg1,
+ pj_stun_get_attr_name(attr_type));
+
+ pj_stun_msg_create_response(pool, msg,
+ PJ_STUN_SC_BAD_REQUEST,
+ &e, p_response);
+ }
+
+ PJ_LOG(4,(THIS_FILE,
+ "Error parsing STUN attribute %s: %s",
+ pj_stun_get_attr_name(attr_type),
+ err_msg1));
+
+ return status;
+ }
+
+ if (attr_type == PJ_STUN_ATTR_MESSAGE_INTEGRITY &&
+ !has_fingerprint)
+ {
+ if (has_msg_int) {
+ /* Already has MESSAGE-INTEGRITY */
+ if (p_response) {
+ pj_stun_msg_create_response(pool, msg,
+ PJ_STUN_SC_BAD_REQUEST,
+ NULL, p_response);
+ }
+ return PJNATH_ESTUNDUPATTR;
+ }
+ has_msg_int = PJ_TRUE;
+
+ } else if (attr_type == PJ_STUN_ATTR_FINGERPRINT) {
+ if (has_fingerprint) {
+ /* Already has FINGERPRINT */
+ if (p_response) {
+ pj_stun_msg_create_response(pool, msg,
+ PJ_STUN_SC_BAD_REQUEST,
+ NULL, p_response);
+ }
+ return PJNATH_ESTUNDUPATTR;
+ }
+ has_fingerprint = PJ_TRUE;
+ } else {
+ if (has_fingerprint) {
+ /* Another attribute is found which is not FINGERPRINT
+ * after FINGERPRINT. Note that non-FINGERPRINT is
+ * allowed to appear after M-I
+ */
+ if (p_response) {
+ pj_stun_msg_create_response(pool, msg,
+ PJ_STUN_SC_BAD_REQUEST,
+ NULL, p_response);
+ }
+ return PJNATH_ESTUNFINGERPOS;
+ }
+ }
+
+ /* Make sure we have rooms for the new attribute */
+ if (msg->attr_count >= PJ_STUN_MAX_ATTR) {
+ if (p_response) {
+ pj_stun_msg_create_response(pool, msg,
+ PJ_STUN_SC_SERVER_ERROR,
+ NULL, p_response);
+ }
+ return PJNATH_ESTUNTOOMANYATTR;
+ }
+
+ /* Add the attribute */
+ msg->attr[msg->attr_count++] = (pj_stun_attr_hdr*)attr;
+ }
+
+ /* Next attribute */
+ if (attr_val_len + 4 >= pdu_len) {
+ pdu += pdu_len;
+ pdu_len = 0;
+ } else {
+ pdu += (attr_val_len + 4);
+ pdu_len -= (attr_val_len + 4);
+ }
+ }
+
+ if (pdu_len > 0) {
+ /* Stray trailing bytes */
+ PJ_LOG(4,(THIS_FILE,
+ "Error decoding STUN message: unparsed trailing %d bytes",
+ pdu_len));
+ return PJNATH_EINSTUNMSGLEN;
+ }
+
+ *p_msg = msg;
+
+ if (p_parsed_len)
+ *p_parsed_len = (pdu - start_pdu);
+
+ return PJ_SUCCESS;
+}
+
+/*
+static char *print_binary(const pj_uint8_t *data, unsigned data_len)
+{
+ static char static_buffer[1024];
+ char *buffer = static_buffer;
+ unsigned length=sizeof(static_buffer), i;
+
+ if (length < data_len * 2 + 8)
+ return "";
+
+ pj_ansi_sprintf(buffer, ", data=");
+ buffer += 7;
+
+ for (i=0; i<data_len; ++i) {
+ pj_ansi_sprintf(buffer, "%02x", (*data) & 0xFF);
+ buffer += 2;
+ data++;
+ }
+
+ pj_ansi_sprintf(buffer, "\n");
+ buffer++;
+
+ return static_buffer;
+}
+*/
+
+/*
+ * Print the message structure to a buffer.
+ */
+PJ_DEF(pj_status_t) pj_stun_msg_encode(pj_stun_msg *msg,
+ pj_uint8_t *buf, pj_size_t buf_size,
+ unsigned options,
+ const pj_str_t *key,
+ pj_size_t *p_msg_len)
+{
+ pj_uint8_t *start = buf;
+ pj_stun_msgint_attr *amsgint = NULL;
+ pj_stun_fingerprint_attr *afingerprint = NULL;
+ unsigned printed = 0, body_len;
+ pj_status_t status;
+ unsigned i;
+
+
+ PJ_ASSERT_RETURN(msg && buf && buf_size, PJ_EINVAL);
+
+ PJ_UNUSED_ARG(options);
+ PJ_ASSERT_RETURN(options == 0, PJ_EINVAL);
+
+ /* Copy the message header part and convert the header fields to
+ * network byte order
+ */
+ if (buf_size < sizeof(pj_stun_msg_hdr))
+ return PJ_ETOOSMALL;
+
+ PUTVAL16H(buf, 0, msg->hdr.type);
+ PUTVAL16H(buf, 2, 0); /* length will be calculated later */
+ PUTVAL32H(buf, 4, msg->hdr.magic);
+ pj_memcpy(buf+8, msg->hdr.tsx_id, sizeof(msg->hdr.tsx_id));
+
+ buf += sizeof(pj_stun_msg_hdr);
+ buf_size -= sizeof(pj_stun_msg_hdr);
+
+ /* Encode each attribute to the message */
+ for (i=0; i<msg->attr_count; ++i) {
+ const struct attr_desc *adesc;
+ const pj_stun_attr_hdr *attr_hdr = msg->attr[i];
+
+ if (attr_hdr->type == PJ_STUN_ATTR_MESSAGE_INTEGRITY) {
+ pj_assert(amsgint == NULL);
+ amsgint = (pj_stun_msgint_attr*) attr_hdr;
+
+ /* Stop when encountering MESSAGE-INTEGRITY */
+ break;
+
+ } else if (attr_hdr->type == PJ_STUN_ATTR_FINGERPRINT) {
+ afingerprint = (pj_stun_fingerprint_attr*) attr_hdr;
+ break;
+ }
+
+ adesc = find_attr_desc(attr_hdr->type);
+ if (adesc) {
+ status = adesc->encode_attr(attr_hdr, buf, buf_size, &msg->hdr,
+ &printed);
+ } else {
+ /* This may be a generic attribute */
+ const pj_stun_binary_attr *bin_attr = (const pj_stun_binary_attr*)
+ attr_hdr;
+ PJ_ASSERT_RETURN(bin_attr->magic == PJ_STUN_MAGIC, PJ_EBUG);
+ status = encode_binary_attr(bin_attr, buf, buf_size, &msg->hdr,
+ &printed);
+ }
+
+ if (status != PJ_SUCCESS)
+ return status;
+
+ buf += printed;
+ buf_size -= printed;
+ }
+
+ /* We may have stopped printing attribute because we found
+ * MESSAGE-INTEGRITY or FINGERPRINT. Scan the rest of the
+ * attributes.
+ */
+ for ( ++i; i<msg->attr_count; ++i) {
+ const pj_stun_attr_hdr *attr_hdr = msg->attr[i];
+
+ /* There mustn't any attribute after FINGERPRINT */
+ PJ_ASSERT_RETURN(afingerprint == NULL, PJNATH_ESTUNFINGERPOS);
+
+ if (attr_hdr->type == PJ_STUN_ATTR_MESSAGE_INTEGRITY) {
+ /* There mustn't be MESSAGE-INTEGRITY before */
+ PJ_ASSERT_RETURN(amsgint == NULL,
+ PJNATH_ESTUNMSGINTPOS);
+ amsgint = (pj_stun_msgint_attr*) attr_hdr;
+
+ } else if (attr_hdr->type == PJ_STUN_ATTR_FINGERPRINT) {
+ afingerprint = (pj_stun_fingerprint_attr*) attr_hdr;
+ }
+ }
+
+#if PJ_STUN_OLD_STYLE_MI_FINGERPRINT
+ /*
+ * This is the old style MESSAGE-INTEGRITY and FINGERPRINT
+ * calculation, used in rfc3489bis-06 and older.
+ */
+ /* We MUST update the message length in the header NOW before
+ * calculating MESSAGE-INTEGRITY and FINGERPRINT.
+ * Note that length is not including the 20 bytes header.
+ */
+ if (amsgint && afingerprint) {
+ body_len = (pj_uint16_t)((buf - start) - 20 + 24 + 8);
+ } else if (amsgint) {
+ body_len = (pj_uint16_t)((buf - start) - 20 + 24);
+ } else if (afingerprint) {
+ body_len = (pj_uint16_t)((buf - start) - 20 + 8);
+ } else {
+ body_len = (pj_uint16_t)((buf - start) - 20);
+ }
+#else
+ /* If MESSAGE-INTEGRITY is present, include the M-I attribute
+ * in message length before calculating M-I
+ */
+ if (amsgint) {
+ body_len = (pj_uint16_t)((buf - start) - 20 + 24);
+ } else {
+ body_len = (pj_uint16_t)((buf - start) - 20);
+ }
+#endif /* PJ_STUN_OLD_STYLE_MI_FINGERPRINT */
+
+ /* hdr->length = pj_htons(length); */
+ PUTVAL16H(start, 2, (pj_uint16_t)body_len);
+
+ /* Calculate message integrity, if present */
+ if (amsgint != NULL) {
+ pj_hmac_sha1_context ctx;
+
+ /* Key MUST be specified */
+ PJ_ASSERT_RETURN(key, PJ_EINVALIDOP);
+
+ /* MESSAGE-INTEGRITY must be the last attribute in the message, or
+ * the last attribute before FINGERPRINT.
+ */
+ if (msg->attr_count>1 && i < msg->attr_count-2) {
+ /* Should not happen for message generated by us */
+ pj_assert(PJ_FALSE);
+ return PJNATH_ESTUNMSGINTPOS;
+
+ } else if (i == msg->attr_count-2) {
+ if (msg->attr[i+1]->type != PJ_STUN_ATTR_FINGERPRINT) {
+ /* Should not happen for message generated by us */
+ pj_assert(PJ_FALSE);
+ return PJNATH_ESTUNMSGINTPOS;
+ } else {
+ afingerprint = (pj_stun_fingerprint_attr*) msg->attr[i+1];
+ }
+ }
+
+ /* Calculate HMAC-SHA1 digest, add zero padding to input
+ * if necessary to make the input 64 bytes aligned.
+ */
+ pj_hmac_sha1_init(&ctx, (const pj_uint8_t*)key->ptr, key->slen);
+ pj_hmac_sha1_update(&ctx, (const pj_uint8_t*)start, buf-start);
+#if PJ_STUN_OLD_STYLE_MI_FINGERPRINT
+ // These are obsoleted in rfc3489bis-08
+ if ((buf-start) & 0x3F) {
+ pj_uint8_t zeroes[64];
+ pj_bzero(zeroes, sizeof(zeroes));
+ pj_hmac_sha1_update(&ctx, zeroes, 64-((buf-start) & 0x3F));
+ }
+#endif /* PJ_STUN_OLD_STYLE_MI_FINGERPRINT */
+ pj_hmac_sha1_final(&ctx, amsgint->hmac);
+
+ /* Put this attribute in the message */
+ status = encode_msgint_attr(amsgint, buf, buf_size,
+ &msg->hdr, &printed);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ buf += printed;
+ buf_size -= printed;
+ }
+
+ /* Calculate FINGERPRINT if present */
+ if (afingerprint != NULL) {
+
+#if !PJ_STUN_OLD_STYLE_MI_FINGERPRINT
+ /* Update message length */
+ PUTVAL16H(start, 2,
+ (pj_uint16_t)(GETVAL16H(start, 2)+8));
+#endif
+
+ afingerprint->value = pj_crc32_calc(start, buf-start);
+ afingerprint->value ^= STUN_XOR_FINGERPRINT;
+
+ /* Put this attribute in the message */
+ status = encode_uint_attr(afingerprint, buf, buf_size,
+ &msg->hdr, &printed);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ buf += printed;
+ buf_size -= printed;
+ }
+
+ /* Update message length. */
+ msg->hdr.length = (pj_uint16_t) ((buf - start) - 20);
+
+ /* Return the length */
+ if (p_msg_len)
+ *p_msg_len = (buf - start);
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Find STUN attribute in the STUN message, starting from the specified
+ * index.
+ */
+PJ_DEF(pj_stun_attr_hdr*) pj_stun_msg_find_attr( const pj_stun_msg *msg,
+ int attr_type,
+ unsigned index)
+{
+ PJ_ASSERT_RETURN(msg, NULL);
+
+ for (; index < msg->attr_count; ++index) {
+ if (msg->attr[index]->type == attr_type)
+ return (pj_stun_attr_hdr*) msg->attr[index];
+ }
+
+ return NULL;
+}
+
+
+/*
+ * Clone a STUN attribute.
+ */
+PJ_DEF(pj_stun_attr_hdr*) pj_stun_attr_clone( pj_pool_t *pool,
+ const pj_stun_attr_hdr *attr)
+{
+ const struct attr_desc *adesc;
+
+ /* Get the attribute descriptor */
+ adesc = find_attr_desc(attr->type);
+ if (adesc) {
+ return (pj_stun_attr_hdr*) (*adesc->clone_attr)(pool, attr);
+ } else {
+ /* Clone generic attribute */
+ const pj_stun_binary_attr *bin_attr = (const pj_stun_binary_attr*)
+ attr;
+ PJ_ASSERT_RETURN(bin_attr->magic == PJ_STUN_MAGIC, NULL);
+ if (bin_attr->magic == PJ_STUN_MAGIC) {
+ return (pj_stun_attr_hdr*) clone_binary_attr(pool, attr);
+ } else {
+ return NULL;
+ }
+ }
+}
+
+
diff --git a/pjnath/src/pjnath/stun_msg_dump.c b/pjnath/src/pjnath/stun_msg_dump.c
new file mode 100644
index 0000000..c7488c9
--- /dev/null
+++ b/pjnath/src/pjnath/stun_msg_dump.c
@@ -0,0 +1,298 @@
+/* $Id: stun_msg_dump.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/stun_msg.h>
+#include <pjnath/errno.h>
+#include <pj/assert.h>
+#include <pj/os.h>
+#include <pj/string.h>
+
+#if PJ_LOG_MAX_LEVEL > 0
+
+
+#define APPLY() if (len < 1 || len >= (end-p)) \
+ goto on_return; \
+ p += len
+
+static int print_binary(char *buffer, unsigned length,
+ const pj_uint8_t *data, unsigned data_len)
+{
+ unsigned i;
+
+ if (length < data_len * 2 + 8)
+ return -1;
+
+ pj_ansi_sprintf(buffer, ", data=");
+ buffer += 7;
+
+ for (i=0; i<data_len; ++i) {
+ pj_ansi_sprintf(buffer, "%02x", (*data) & 0xFF);
+ buffer += 2;
+ data++;
+ }
+
+ pj_ansi_sprintf(buffer, "\n");
+ buffer++;
+
+ return data_len * 2 + 8;
+}
+
+static int print_attr(char *buffer, unsigned length,
+ const pj_stun_attr_hdr *ahdr)
+{
+ char *p = buffer, *end = buffer + length;
+ const char *attr_name = pj_stun_get_attr_name(ahdr->type);
+ char attr_buf[32];
+ int len;
+
+ if (*attr_name == '?') {
+ pj_ansi_snprintf(attr_buf, sizeof(attr_buf), "Attr 0x%x",
+ ahdr->type);
+ attr_name = attr_buf;
+ }
+
+ len = pj_ansi_snprintf(p, end-p,
+ " %s: length=%d",
+ attr_name,
+ (int)ahdr->length);
+ APPLY();
+
+
+ switch (ahdr->type) {
+ case PJ_STUN_ATTR_MAPPED_ADDR:
+ case PJ_STUN_ATTR_RESPONSE_ADDR:
+ case PJ_STUN_ATTR_SOURCE_ADDR:
+ case PJ_STUN_ATTR_CHANGED_ADDR:
+ case PJ_STUN_ATTR_REFLECTED_FROM:
+ case PJ_STUN_ATTR_XOR_PEER_ADDR:
+ case PJ_STUN_ATTR_XOR_RELAYED_ADDR:
+ case PJ_STUN_ATTR_XOR_MAPPED_ADDR:
+ case PJ_STUN_ATTR_XOR_REFLECTED_FROM:
+ case PJ_STUN_ATTR_ALTERNATE_SERVER:
+ {
+ const pj_stun_sockaddr_attr *attr;
+
+ attr = (const pj_stun_sockaddr_attr*)ahdr;
+
+ if (attr->sockaddr.addr.sa_family == pj_AF_INET()) {
+ len = pj_ansi_snprintf(p, end-p,
+ ", IPv4 addr=%s:%d\n",
+ pj_inet_ntoa(attr->sockaddr.ipv4.sin_addr),
+ pj_ntohs(attr->sockaddr.ipv4.sin_port));
+
+ } else if (attr->sockaddr.addr.sa_family == pj_AF_INET6()) {
+ len = pj_ansi_snprintf(p, end-p,
+ ", IPv6 addr present\n");
+ } else {
+ len = pj_ansi_snprintf(p, end-p,
+ ", INVALID ADDRESS FAMILY!\n");
+ }
+ APPLY();
+ }
+ break;
+
+ case PJ_STUN_ATTR_CHANNEL_NUMBER:
+ {
+ const pj_stun_uint_attr *attr;
+
+ attr = (const pj_stun_uint_attr*)ahdr;
+ len = pj_ansi_snprintf(p, end-p,
+ ", chnum=%u (0x%x)\n",
+ (int)PJ_STUN_GET_CH_NB(attr->value),
+ (int)PJ_STUN_GET_CH_NB(attr->value));
+ APPLY();
+ }
+ break;
+
+ case PJ_STUN_ATTR_CHANGE_REQUEST:
+ case PJ_STUN_ATTR_LIFETIME:
+ case PJ_STUN_ATTR_BANDWIDTH:
+ case PJ_STUN_ATTR_REQ_ADDR_TYPE:
+ case PJ_STUN_ATTR_EVEN_PORT:
+ case PJ_STUN_ATTR_REQ_TRANSPORT:
+ case PJ_STUN_ATTR_TIMER_VAL:
+ case PJ_STUN_ATTR_PRIORITY:
+ case PJ_STUN_ATTR_FINGERPRINT:
+ case PJ_STUN_ATTR_REFRESH_INTERVAL:
+ case PJ_STUN_ATTR_ICMP:
+ {
+ const pj_stun_uint_attr *attr;
+
+ attr = (const pj_stun_uint_attr*)ahdr;
+ len = pj_ansi_snprintf(p, end-p,
+ ", value=%u (0x%x)\n",
+ (pj_uint32_t)attr->value,
+ (pj_uint32_t)attr->value);
+ APPLY();
+ }
+ break;
+
+ case PJ_STUN_ATTR_USERNAME:
+ case PJ_STUN_ATTR_PASSWORD:
+ case PJ_STUN_ATTR_REALM:
+ case PJ_STUN_ATTR_NONCE:
+ case PJ_STUN_ATTR_SOFTWARE:
+ {
+ const pj_stun_string_attr *attr;
+
+ attr = (pj_stun_string_attr*)ahdr;
+ len = pj_ansi_snprintf(p, end-p,
+ ", value=\"%.*s\"\n",
+ (int)attr->value.slen,
+ attr->value.ptr);
+ APPLY();
+ }
+ break;
+
+ case PJ_STUN_ATTR_ERROR_CODE:
+ {
+ const pj_stun_errcode_attr *attr;
+
+ attr = (const pj_stun_errcode_attr*) ahdr;
+ len = pj_ansi_snprintf(p, end-p,
+ ", err_code=%d, reason=\"%.*s\"\n",
+ attr->err_code,
+ (int)attr->reason.slen,
+ attr->reason.ptr);
+ APPLY();
+ }
+ break;
+
+ case PJ_STUN_ATTR_UNKNOWN_ATTRIBUTES:
+ {
+ const pj_stun_unknown_attr *attr;
+ unsigned j;
+
+ attr = (const pj_stun_unknown_attr*) ahdr;
+
+ len = pj_ansi_snprintf(p, end-p,
+ ", unknown list:");
+ APPLY();
+
+ for (j=0; j<attr->attr_count; ++j) {
+ len = pj_ansi_snprintf(p, end-p,
+ " %d",
+ (int)attr->attrs[j]);
+ APPLY();
+ }
+ }
+ break;
+
+ case PJ_STUN_ATTR_MESSAGE_INTEGRITY:
+ {
+ const pj_stun_msgint_attr *attr;
+
+ attr = (const pj_stun_msgint_attr*) ahdr;
+ len = print_binary(p, end-p, attr->hmac, 20);
+ APPLY();
+ }
+ break;
+
+ case PJ_STUN_ATTR_DATA:
+ {
+ const pj_stun_binary_attr *attr;
+
+ attr = (const pj_stun_binary_attr*) ahdr;
+ len = print_binary(p, end-p, attr->data, attr->length);
+ APPLY();
+ }
+ break;
+ case PJ_STUN_ATTR_ICE_CONTROLLED:
+ case PJ_STUN_ATTR_ICE_CONTROLLING:
+ case PJ_STUN_ATTR_RESERVATION_TOKEN:
+ {
+ const pj_stun_uint64_attr *attr;
+ pj_uint8_t data[8];
+ int i;
+
+ attr = (const pj_stun_uint64_attr*) ahdr;
+
+ for (i=0; i<8; ++i)
+ data[i] = ((const pj_uint8_t*)&attr->value)[7-i];
+
+ len = print_binary(p, end-p, data, 8);
+ APPLY();
+ }
+ break;
+ case PJ_STUN_ATTR_USE_CANDIDATE:
+ case PJ_STUN_ATTR_DONT_FRAGMENT:
+ default:
+ len = pj_ansi_snprintf(p, end-p, "\n");
+ APPLY();
+ break;
+ }
+
+ return (p-buffer);
+
+on_return:
+ return len;
+}
+
+
+/*
+ * Dump STUN message to a printable string output.
+ */
+PJ_DEF(char*) pj_stun_msg_dump(const pj_stun_msg *msg,
+ char *buffer,
+ unsigned length,
+ unsigned *printed_len)
+{
+ char *p, *end;
+ int len;
+ unsigned i;
+
+ PJ_ASSERT_RETURN(msg && buffer && length, NULL);
+
+ PJ_CHECK_STACK();
+
+ p = buffer;
+ end = buffer + length;
+
+ len = pj_ansi_snprintf(p, end-p, "STUN %s %s\n",
+ pj_stun_get_method_name(msg->hdr.type),
+ pj_stun_get_class_name(msg->hdr.type));
+ APPLY();
+
+ len = pj_ansi_snprintf(p, end-p,
+ " Hdr: length=%d, magic=%08x, tsx_id=%08x%08x%08x\n"
+ " Attributes:\n",
+ msg->hdr.length,
+ msg->hdr.magic,
+ *(pj_uint32_t*)&msg->hdr.tsx_id[0],
+ *(pj_uint32_t*)&msg->hdr.tsx_id[4],
+ *(pj_uint32_t*)&msg->hdr.tsx_id[8]);
+ APPLY();
+
+ for (i=0; i<msg->attr_count; ++i) {
+ len = print_attr(p, end-p, msg->attr[i]);
+ APPLY();
+ }
+
+on_return:
+ *p = '\0';
+ if (printed_len)
+ *printed_len = (p-buffer);
+ return buffer;
+
+#undef APPLY
+}
+
+
+#endif /* PJ_LOG_MAX_LEVEL > 0 */
+
diff --git a/pjnath/src/pjnath/stun_session.c b/pjnath/src/pjnath/stun_session.c
new file mode 100644
index 0000000..45d5313
--- /dev/null
+++ b/pjnath/src/pjnath/stun_session.c
@@ -0,0 +1,1436 @@
+/* $Id: stun_session.c 3843 2011-10-24 14:13:35Z bennylp $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/stun_session.h>
+#include <pjnath/errno.h>
+#include <pjlib.h>
+
+struct pj_stun_session
+{
+ pj_stun_config *cfg;
+ pj_pool_t *pool;
+ pj_lock_t *lock;
+ pj_bool_t delete_lock;
+ pj_stun_session_cb cb;
+ void *user_data;
+
+ pj_atomic_t *busy;
+ pj_bool_t destroy_request;
+
+ pj_bool_t use_fingerprint;
+
+ pj_pool_t *rx_pool;
+
+#if PJ_LOG_MAX_LEVEL >= 5
+ char dump_buf[1000];
+#endif
+ unsigned log_flag;
+
+ pj_stun_auth_type auth_type;
+ pj_stun_auth_cred cred;
+ int auth_retry;
+ pj_str_t next_nonce;
+ pj_str_t server_realm;
+
+ pj_str_t srv_name;
+
+ pj_stun_tx_data pending_request_list;
+ pj_stun_tx_data cached_response_list;
+};
+
+#define SNAME(s_) ((s_)->pool->obj_name)
+
+#if PJ_LOG_MAX_LEVEL >= 5
+# define TRACE_(expr) PJ_LOG(5,expr)
+#else
+# define TRACE_(expr)
+#endif
+
+#define LOG_ERR_(sess,title,rc) pjnath_perror(sess->pool->obj_name,title,rc)
+
+#define TDATA_POOL_SIZE PJNATH_POOL_LEN_STUN_TDATA
+#define TDATA_POOL_INC PJNATH_POOL_INC_STUN_TDATA
+
+
+static void stun_tsx_on_complete(pj_stun_client_tsx *tsx,
+ pj_status_t status,
+ const pj_stun_msg *response,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len);
+static pj_status_t stun_tsx_on_send_msg(pj_stun_client_tsx *tsx,
+ const void *stun_pkt,
+ pj_size_t pkt_size);
+static void stun_tsx_on_destroy(pj_stun_client_tsx *tsx);
+
+static pj_stun_tsx_cb tsx_cb =
+{
+ &stun_tsx_on_complete,
+ &stun_tsx_on_send_msg,
+ &stun_tsx_on_destroy
+};
+
+
+static pj_status_t tsx_add(pj_stun_session *sess,
+ pj_stun_tx_data *tdata)
+{
+ pj_list_push_front(&sess->pending_request_list, tdata);
+ return PJ_SUCCESS;
+}
+
+static pj_status_t tsx_erase(pj_stun_session *sess,
+ pj_stun_tx_data *tdata)
+{
+ PJ_UNUSED_ARG(sess);
+ pj_list_erase(tdata);
+ return PJ_SUCCESS;
+}
+
+static pj_stun_tx_data* tsx_lookup(pj_stun_session *sess,
+ const pj_stun_msg *msg)
+{
+ pj_stun_tx_data *tdata;
+
+ tdata = sess->pending_request_list.next;
+ while (tdata != &sess->pending_request_list) {
+ pj_assert(sizeof(tdata->msg_key)==sizeof(msg->hdr.tsx_id));
+ if (tdata->msg_magic == msg->hdr.magic &&
+ pj_memcmp(tdata->msg_key, msg->hdr.tsx_id,
+ sizeof(msg->hdr.tsx_id))==0)
+ {
+ return tdata;
+ }
+ tdata = tdata->next;
+ }
+
+ return NULL;
+}
+
+static pj_status_t create_tdata(pj_stun_session *sess,
+ pj_stun_tx_data **p_tdata)
+{
+ pj_pool_t *pool;
+ pj_stun_tx_data *tdata;
+
+ /* Create pool and initialize basic tdata attributes */
+ pool = pj_pool_create(sess->cfg->pf, "tdata%p",
+ TDATA_POOL_SIZE, TDATA_POOL_INC, NULL);
+ PJ_ASSERT_RETURN(pool, PJ_ENOMEM);
+
+ tdata = PJ_POOL_ZALLOC_T(pool, pj_stun_tx_data);
+ tdata->pool = pool;
+ tdata->sess = sess;
+
+ pj_list_init(tdata);
+
+ *p_tdata = tdata;
+
+ return PJ_SUCCESS;
+}
+
+static void stun_tsx_on_destroy(pj_stun_client_tsx *tsx)
+{
+ pj_stun_tx_data *tdata;
+
+ tdata = (pj_stun_tx_data*) pj_stun_client_tsx_get_data(tsx);
+ tsx_erase(tdata->sess, tdata);
+
+ pj_stun_client_tsx_destroy(tsx);
+ pj_pool_release(tdata->pool);
+}
+
+static void destroy_tdata(pj_stun_tx_data *tdata, pj_bool_t force)
+{
+ if (tdata->res_timer.id != PJ_FALSE) {
+ pj_timer_heap_cancel(tdata->sess->cfg->timer_heap,
+ &tdata->res_timer);
+ tdata->res_timer.id = PJ_FALSE;
+ pj_list_erase(tdata);
+ }
+
+ if (force) {
+ if (tdata->client_tsx) {
+ tsx_erase(tdata->sess, tdata);
+ pj_stun_client_tsx_destroy(tdata->client_tsx);
+ }
+ pj_pool_release(tdata->pool);
+
+ } else {
+ if (tdata->client_tsx) {
+ pj_time_val delay = {2, 0};
+ pj_stun_client_tsx_schedule_destroy(tdata->client_tsx, &delay);
+
+ } else {
+ pj_pool_release(tdata->pool);
+ }
+ }
+}
+
+/*
+ * Destroy the transmit data.
+ */
+PJ_DEF(void) pj_stun_msg_destroy_tdata( pj_stun_session *sess,
+ pj_stun_tx_data *tdata)
+{
+ PJ_UNUSED_ARG(sess);
+ destroy_tdata(tdata, PJ_FALSE);
+}
+
+
+/* Timer callback to be called when it's time to destroy response cache */
+static void on_cache_timeout(pj_timer_heap_t *timer_heap,
+ struct pj_timer_entry *entry)
+{
+ pj_stun_tx_data *tdata;
+
+ PJ_UNUSED_ARG(timer_heap);
+
+ entry->id = PJ_FALSE;
+ tdata = (pj_stun_tx_data*) entry->user_data;
+
+ PJ_LOG(5,(SNAME(tdata->sess), "Response cache deleted"));
+
+ pj_list_erase(tdata);
+ pj_stun_msg_destroy_tdata(tdata->sess, tdata);
+}
+
+static pj_status_t apply_msg_options(pj_stun_session *sess,
+ pj_pool_t *pool,
+ const pj_stun_req_cred_info *auth_info,
+ pj_stun_msg *msg)
+{
+ pj_status_t status = 0;
+ pj_str_t realm, username, nonce, auth_key;
+
+ /* If the agent is sending a request, it SHOULD add a SOFTWARE attribute
+ * to the request. The server SHOULD include a SOFTWARE attribute in all
+ * responses.
+ *
+ * If magic value is not PJ_STUN_MAGIC, only apply the attribute for
+ * responses.
+ */
+ if (sess->srv_name.slen &&
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_SOFTWARE, 0)==NULL &&
+ (PJ_STUN_IS_RESPONSE(msg->hdr.type) ||
+ (PJ_STUN_IS_REQUEST(msg->hdr.type) && msg->hdr.magic==PJ_STUN_MAGIC)))
+ {
+ pj_stun_msg_add_string_attr(pool, msg, PJ_STUN_ATTR_SOFTWARE,
+ &sess->srv_name);
+ }
+
+ if (pj_stun_auth_valid_for_msg(msg) && auth_info) {
+ realm = auth_info->realm;
+ username = auth_info->username;
+ nonce = auth_info->nonce;
+ auth_key = auth_info->auth_key;
+ } else {
+ realm.slen = username.slen = nonce.slen = auth_key.slen = 0;
+ }
+
+ /* Create and add USERNAME attribute if needed */
+ if (username.slen && PJ_STUN_IS_REQUEST(msg->hdr.type)) {
+ status = pj_stun_msg_add_string_attr(pool, msg,
+ PJ_STUN_ATTR_USERNAME,
+ &username);
+ PJ_ASSERT_RETURN(status==PJ_SUCCESS, status);
+ }
+
+ /* Add REALM only when long term credential is used */
+ if (realm.slen && PJ_STUN_IS_REQUEST(msg->hdr.type)) {
+ status = pj_stun_msg_add_string_attr(pool, msg,
+ PJ_STUN_ATTR_REALM,
+ &realm);
+ PJ_ASSERT_RETURN(status==PJ_SUCCESS, status);
+ }
+
+ /* Add NONCE when desired */
+ if (nonce.slen &&
+ (PJ_STUN_IS_REQUEST(msg->hdr.type) ||
+ PJ_STUN_IS_ERROR_RESPONSE(msg->hdr.type)))
+ {
+ status = pj_stun_msg_add_string_attr(pool, msg,
+ PJ_STUN_ATTR_NONCE,
+ &nonce);
+ }
+
+ /* Add MESSAGE-INTEGRITY attribute */
+ if (username.slen && auth_key.slen) {
+ status = pj_stun_msg_add_msgint_attr(pool, msg);
+ PJ_ASSERT_RETURN(status==PJ_SUCCESS, status);
+ }
+
+
+ /* Add FINGERPRINT attribute if necessary */
+ if (sess->use_fingerprint) {
+ status = pj_stun_msg_add_uint_attr(pool, msg,
+ PJ_STUN_ATTR_FINGERPRINT, 0);
+ PJ_ASSERT_RETURN(status==PJ_SUCCESS, status);
+ }
+
+ return PJ_SUCCESS;
+}
+
+static pj_status_t handle_auth_challenge(pj_stun_session *sess,
+ const pj_stun_tx_data *request,
+ const pj_stun_msg *response,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len,
+ pj_bool_t *notify_user)
+{
+ const pj_stun_errcode_attr *ea;
+
+ *notify_user = PJ_TRUE;
+
+ if (response==NULL)
+ return PJ_SUCCESS;
+
+ if (sess->auth_type != PJ_STUN_AUTH_LONG_TERM)
+ return PJ_SUCCESS;
+
+ if (!PJ_STUN_IS_ERROR_RESPONSE(response->hdr.type)) {
+ sess->auth_retry = 0;
+ return PJ_SUCCESS;
+ }
+
+ ea = (const pj_stun_errcode_attr*)
+ pj_stun_msg_find_attr(response, PJ_STUN_ATTR_ERROR_CODE, 0);
+ if (!ea) {
+ PJ_LOG(4,(SNAME(sess), "Invalid error response: no ERROR-CODE"
+ " attribute"));
+ *notify_user = PJ_FALSE;
+ return PJNATH_EINSTUNMSG;
+ }
+
+ if (ea->err_code == PJ_STUN_SC_UNAUTHORIZED ||
+ ea->err_code == PJ_STUN_SC_STALE_NONCE)
+ {
+ const pj_stun_nonce_attr *anonce;
+ const pj_stun_realm_attr *arealm;
+ pj_stun_tx_data *tdata;
+ unsigned i;
+ pj_status_t status;
+
+ anonce = (const pj_stun_nonce_attr*)
+ pj_stun_msg_find_attr(response, PJ_STUN_ATTR_NONCE, 0);
+ if (!anonce) {
+ PJ_LOG(4,(SNAME(sess), "Invalid response: missing NONCE"));
+ *notify_user = PJ_FALSE;
+ return PJNATH_EINSTUNMSG;
+ }
+
+ /* Bail out if we've supplied the correct nonce */
+ if (pj_strcmp(&anonce->value, &sess->next_nonce)==0) {
+ return PJ_SUCCESS;
+ }
+
+ /* Bail out if we've tried too many */
+ if (++sess->auth_retry > 3) {
+ PJ_LOG(4,(SNAME(sess), "Error: authentication failed (too "
+ "many retries)"));
+ return PJ_STATUS_FROM_STUN_CODE(401);
+ }
+
+ /* Save next_nonce */
+ pj_strdup(sess->pool, &sess->next_nonce, &anonce->value);
+
+ /* Copy the realm from the response */
+ arealm = (pj_stun_realm_attr*)
+ pj_stun_msg_find_attr(response, PJ_STUN_ATTR_REALM, 0);
+ if (arealm) {
+ pj_strdup(sess->pool, &sess->server_realm, &arealm->value);
+ while (sess->server_realm.slen &&
+ !sess->server_realm.ptr[sess->server_realm.slen-1])
+ {
+ --sess->server_realm.slen;
+ }
+ }
+
+ /* Create new request */
+ status = pj_stun_session_create_req(sess, request->msg->hdr.type,
+ request->msg->hdr.magic,
+ NULL, &tdata);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ /* Duplicate all the attributes in the old request, except
+ * USERNAME, REALM, M-I, and NONCE, which will be filled in
+ * later.
+ */
+ for (i=0; i<request->msg->attr_count; ++i) {
+ const pj_stun_attr_hdr *asrc = request->msg->attr[i];
+
+ if (asrc->type == PJ_STUN_ATTR_USERNAME ||
+ asrc->type == PJ_STUN_ATTR_REALM ||
+ asrc->type == PJ_STUN_ATTR_MESSAGE_INTEGRITY ||
+ asrc->type == PJ_STUN_ATTR_NONCE)
+ {
+ continue;
+ }
+
+ tdata->msg->attr[tdata->msg->attr_count++] =
+ pj_stun_attr_clone(tdata->pool, asrc);
+ }
+
+ /* Will retry the request with authentication, no need to
+ * notify user.
+ */
+ *notify_user = PJ_FALSE;
+
+ PJ_LOG(4,(SNAME(sess), "Retrying request with new authentication"));
+
+ /* Retry the request */
+ status = pj_stun_session_send_msg(sess, request->token, PJ_TRUE,
+ request->retransmit, src_addr,
+ src_addr_len, tdata);
+
+ } else {
+ sess->auth_retry = 0;
+ }
+
+ return PJ_SUCCESS;
+}
+
+static void stun_tsx_on_complete(pj_stun_client_tsx *tsx,
+ pj_status_t status,
+ const pj_stun_msg *response,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_stun_session *sess;
+ pj_bool_t notify_user = PJ_TRUE;
+ pj_stun_tx_data *tdata;
+
+ tdata = (pj_stun_tx_data*) pj_stun_client_tsx_get_data(tsx);
+ sess = tdata->sess;
+
+ /* Lock the session and prevent user from destroying us in the callback */
+ pj_atomic_inc(sess->busy);
+ pj_lock_acquire(sess->lock);
+
+ /* Handle authentication challenge */
+ handle_auth_challenge(sess, tdata, response, src_addr,
+ src_addr_len, &notify_user);
+
+ if (notify_user && sess->cb.on_request_complete) {
+ (*sess->cb.on_request_complete)(sess, status, tdata->token, tdata,
+ response, src_addr, src_addr_len);
+ }
+
+ /* Destroy the transmit data. This will remove the transaction
+ * from the pending list too.
+ */
+ pj_stun_msg_destroy_tdata(sess, tdata);
+ tdata = NULL;
+
+ pj_lock_release(sess->lock);
+
+ if (pj_atomic_dec_and_get(sess->busy)==0 && sess->destroy_request) {
+ pj_stun_session_destroy(sess);
+ return;
+ }
+}
+
+static pj_status_t stun_tsx_on_send_msg(pj_stun_client_tsx *tsx,
+ const void *stun_pkt,
+ pj_size_t pkt_size)
+{
+ pj_stun_tx_data *tdata;
+ pj_stun_session *sess;
+ pj_status_t status;
+
+ tdata = (pj_stun_tx_data*) pj_stun_client_tsx_get_data(tsx);
+ sess = tdata->sess;
+
+ /* Lock the session and prevent user from destroying us in the callback */
+ pj_atomic_inc(sess->busy);
+ pj_lock_acquire(sess->lock);
+
+ status = sess->cb.on_send_msg(tdata->sess, tdata->token, stun_pkt,
+ pkt_size, tdata->dst_addr,
+ tdata->addr_len);
+ pj_lock_release(sess->lock);
+
+ if (pj_atomic_dec_and_get(sess->busy)==0 && sess->destroy_request) {
+ pj_stun_session_destroy(sess);
+ return PJNATH_ESTUNDESTROYED;
+ } else {
+ return status;
+ }
+}
+
+/* **************************************************************************/
+
+PJ_DEF(pj_status_t) pj_stun_session_create( pj_stun_config *cfg,
+ const char *name,
+ const pj_stun_session_cb *cb,
+ pj_bool_t fingerprint,
+ pj_stun_session **p_sess)
+{
+ pj_pool_t *pool;
+ pj_stun_session *sess;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(cfg && cb && p_sess, PJ_EINVAL);
+
+ if (name==NULL)
+ name = "stuse%p";
+
+ pool = pj_pool_create(cfg->pf, name, PJNATH_POOL_LEN_STUN_SESS,
+ PJNATH_POOL_INC_STUN_SESS, NULL);
+ PJ_ASSERT_RETURN(pool, PJ_ENOMEM);
+
+ sess = PJ_POOL_ZALLOC_T(pool, pj_stun_session);
+ sess->cfg = cfg;
+ sess->pool = pool;
+ pj_memcpy(&sess->cb, cb, sizeof(*cb));
+ sess->use_fingerprint = fingerprint;
+ sess->log_flag = 0xFFFF;
+
+ sess->srv_name.ptr = (char*) pj_pool_alloc(pool, 32);
+ sess->srv_name.slen = pj_ansi_snprintf(sess->srv_name.ptr, 32,
+ "pjnath-%s", pj_get_version());
+
+ sess->rx_pool = pj_pool_create(sess->cfg->pf, name,
+ PJNATH_POOL_LEN_STUN_TDATA,
+ PJNATH_POOL_INC_STUN_TDATA, NULL);
+
+ pj_list_init(&sess->pending_request_list);
+ pj_list_init(&sess->cached_response_list);
+
+ status = pj_lock_create_recursive_mutex(pool, name, &sess->lock);
+ if (status != PJ_SUCCESS) {
+ pj_pool_release(pool);
+ return status;
+ }
+ sess->delete_lock = PJ_TRUE;
+
+ status = pj_atomic_create(pool, 0, &sess->busy);
+ if (status != PJ_SUCCESS) {
+ pj_lock_destroy(sess->lock);
+ pj_pool_release(pool);
+ return status;
+ }
+
+ *p_sess = sess;
+
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_stun_session_destroy(pj_stun_session *sess)
+{
+ PJ_ASSERT_RETURN(sess, PJ_EINVAL);
+
+ pj_lock_acquire(sess->lock);
+
+ /* Can't destroy if we're in a callback */
+ sess->destroy_request = PJ_TRUE;
+ if (pj_atomic_get(sess->busy)) {
+ pj_lock_release(sess->lock);
+ return PJ_EPENDING;
+ }
+
+ while (!pj_list_empty(&sess->pending_request_list)) {
+ pj_stun_tx_data *tdata = sess->pending_request_list.next;
+ destroy_tdata(tdata, PJ_TRUE);
+ }
+
+ while (!pj_list_empty(&sess->cached_response_list)) {
+ pj_stun_tx_data *tdata = sess->cached_response_list.next;
+ destroy_tdata(tdata, PJ_TRUE);
+ }
+ pj_lock_release(sess->lock);
+
+ if (sess->delete_lock) {
+ pj_lock_destroy(sess->lock);
+ }
+
+ if (sess->rx_pool) {
+ pj_pool_release(sess->rx_pool);
+ sess->rx_pool = NULL;
+ }
+
+ pj_pool_release(sess->pool);
+
+ return PJ_SUCCESS;
+}
+
+
+PJ_DEF(pj_status_t) pj_stun_session_set_user_data( pj_stun_session *sess,
+ void *user_data)
+{
+ PJ_ASSERT_RETURN(sess, PJ_EINVAL);
+ pj_lock_acquire(sess->lock);
+ sess->user_data = user_data;
+ pj_lock_release(sess->lock);
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(void*) pj_stun_session_get_user_data(pj_stun_session *sess)
+{
+ PJ_ASSERT_RETURN(sess, NULL);
+ return sess->user_data;
+}
+
+PJ_DEF(pj_status_t) pj_stun_session_set_lock( pj_stun_session *sess,
+ pj_lock_t *lock,
+ pj_bool_t auto_del)
+{
+ pj_lock_t *old_lock = sess->lock;
+ pj_bool_t old_del;
+
+ PJ_ASSERT_RETURN(sess && lock, PJ_EINVAL);
+
+ pj_lock_acquire(old_lock);
+ sess->lock = lock;
+ old_del = sess->delete_lock;
+ sess->delete_lock = auto_del;
+ pj_lock_release(old_lock);
+
+ if (old_lock)
+ pj_lock_destroy(old_lock);
+
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_stun_session_set_software_name(pj_stun_session *sess,
+ const pj_str_t *sw)
+{
+ PJ_ASSERT_RETURN(sess, PJ_EINVAL);
+ if (sw && sw->slen)
+ pj_strdup(sess->pool, &sess->srv_name, sw);
+ else
+ sess->srv_name.slen = 0;
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_stun_session_set_credential(pj_stun_session *sess,
+ pj_stun_auth_type auth_type,
+ const pj_stun_auth_cred *cred)
+{
+ PJ_ASSERT_RETURN(sess, PJ_EINVAL);
+
+ sess->auth_type = auth_type;
+ if (cred) {
+ pj_stun_auth_cred_dup(sess->pool, &sess->cred, cred);
+ } else {
+ sess->auth_type = PJ_STUN_AUTH_NONE;
+ pj_bzero(&sess->cred, sizeof(sess->cred));
+ }
+
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(void) pj_stun_session_set_log( pj_stun_session *sess,
+ unsigned flags)
+{
+ PJ_ASSERT_ON_FAIL(sess, return);
+ sess->log_flag = flags;
+}
+
+PJ_DEF(pj_bool_t) pj_stun_session_use_fingerprint(pj_stun_session *sess,
+ pj_bool_t use)
+{
+ pj_bool_t old_use;
+
+ PJ_ASSERT_RETURN(sess, PJ_FALSE);
+
+ old_use = sess->use_fingerprint;
+ sess->use_fingerprint = use;
+ return old_use;
+}
+
+static pj_status_t get_auth(pj_stun_session *sess,
+ pj_stun_tx_data *tdata)
+{
+ if (sess->cred.type == PJ_STUN_AUTH_CRED_STATIC) {
+ //tdata->auth_info.realm = sess->cred.data.static_cred.realm;
+ tdata->auth_info.realm = sess->server_realm;
+ tdata->auth_info.username = sess->cred.data.static_cred.username;
+ tdata->auth_info.nonce = sess->cred.data.static_cred.nonce;
+
+ pj_stun_create_key(tdata->pool, &tdata->auth_info.auth_key,
+ &tdata->auth_info.realm,
+ &tdata->auth_info.username,
+ sess->cred.data.static_cred.data_type,
+ &sess->cred.data.static_cred.data);
+
+ } else if (sess->cred.type == PJ_STUN_AUTH_CRED_DYNAMIC) {
+ pj_str_t password;
+ void *user_data = sess->cred.data.dyn_cred.user_data;
+ pj_stun_passwd_type data_type = PJ_STUN_PASSWD_PLAIN;
+ pj_status_t rc;
+
+ rc = (*sess->cred.data.dyn_cred.get_cred)(tdata->msg, user_data,
+ tdata->pool,
+ &tdata->auth_info.realm,
+ &tdata->auth_info.username,
+ &tdata->auth_info.nonce,
+ &data_type, &password);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ pj_stun_create_key(tdata->pool, &tdata->auth_info.auth_key,
+ &tdata->auth_info.realm, &tdata->auth_info.username,
+ data_type, &password);
+
+ } else {
+ pj_assert(!"Unknown credential type");
+ return PJ_EBUG;
+ }
+
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_stun_session_create_req(pj_stun_session *sess,
+ int method,
+ pj_uint32_t magic,
+ const pj_uint8_t tsx_id[12],
+ pj_stun_tx_data **p_tdata)
+{
+ pj_stun_tx_data *tdata = NULL;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(sess && p_tdata, PJ_EINVAL);
+
+ status = create_tdata(sess, &tdata);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ /* Create STUN message */
+ status = pj_stun_msg_create(tdata->pool, method, magic,
+ tsx_id, &tdata->msg);
+ if (status != PJ_SUCCESS) {
+ pj_pool_release(tdata->pool);
+ return status;
+ }
+
+ /* copy the request's transaction ID as the transaction key. */
+ pj_assert(sizeof(tdata->msg_key)==sizeof(tdata->msg->hdr.tsx_id));
+ tdata->msg_magic = tdata->msg->hdr.magic;
+ pj_memcpy(tdata->msg_key, tdata->msg->hdr.tsx_id,
+ sizeof(tdata->msg->hdr.tsx_id));
+
+
+ /* Get authentication information for the request */
+ if (sess->auth_type == PJ_STUN_AUTH_NONE) {
+ /* No authentication */
+
+ } else if (sess->auth_type == PJ_STUN_AUTH_SHORT_TERM) {
+ /* MUST put authentication in request */
+ status = get_auth(sess, tdata);
+ if (status != PJ_SUCCESS) {
+ pj_pool_release(tdata->pool);
+ return status;
+ }
+
+ } else if (sess->auth_type == PJ_STUN_AUTH_LONG_TERM) {
+ /* Only put authentication information if we've received
+ * response from server.
+ */
+ if (sess->next_nonce.slen != 0) {
+ status = get_auth(sess, tdata);
+ if (status != PJ_SUCCESS) {
+ pj_pool_release(tdata->pool);
+ return status;
+ }
+ tdata->auth_info.nonce = sess->next_nonce;
+ tdata->auth_info.realm = sess->server_realm;
+ }
+
+ } else {
+ pj_assert(!"Invalid authentication type");
+ pj_pool_release(tdata->pool);
+ return PJ_EBUG;
+ }
+
+ *p_tdata = tdata;
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_stun_session_create_ind(pj_stun_session *sess,
+ int msg_type,
+ pj_stun_tx_data **p_tdata)
+{
+ pj_stun_tx_data *tdata = NULL;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(sess && p_tdata, PJ_EINVAL);
+
+ status = create_tdata(sess, &tdata);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ /* Create STUN message */
+ msg_type |= PJ_STUN_INDICATION_BIT;
+ status = pj_stun_msg_create(tdata->pool, msg_type, PJ_STUN_MAGIC,
+ NULL, &tdata->msg);
+ if (status != PJ_SUCCESS) {
+ pj_pool_release(tdata->pool);
+ return status;
+ }
+
+ *p_tdata = tdata;
+ return PJ_SUCCESS;
+}
+
+/*
+ * Create a STUN response message.
+ */
+PJ_DEF(pj_status_t) pj_stun_session_create_res( pj_stun_session *sess,
+ const pj_stun_rx_data *rdata,
+ unsigned err_code,
+ const pj_str_t *err_msg,
+ pj_stun_tx_data **p_tdata)
+{
+ pj_status_t status;
+ pj_stun_tx_data *tdata = NULL;
+
+ status = create_tdata(sess, &tdata);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ /* Create STUN response message */
+ status = pj_stun_msg_create_response(tdata->pool, rdata->msg,
+ err_code, err_msg, &tdata->msg);
+ if (status != PJ_SUCCESS) {
+ pj_pool_release(tdata->pool);
+ return status;
+ }
+
+ /* copy the request's transaction ID as the transaction key. */
+ pj_assert(sizeof(tdata->msg_key)==sizeof(rdata->msg->hdr.tsx_id));
+ tdata->msg_magic = rdata->msg->hdr.magic;
+ pj_memcpy(tdata->msg_key, rdata->msg->hdr.tsx_id,
+ sizeof(rdata->msg->hdr.tsx_id));
+
+ /* copy the credential found in the request */
+ pj_stun_req_cred_info_dup(tdata->pool, &tdata->auth_info, &rdata->info);
+
+ *p_tdata = tdata;
+
+ return PJ_SUCCESS;
+}
+
+
+/* Print outgoing message to log */
+static void dump_tx_msg(pj_stun_session *sess, const pj_stun_msg *msg,
+ unsigned pkt_size, const pj_sockaddr_t *addr)
+{
+ char dst_name[PJ_INET6_ADDRSTRLEN+10];
+
+ if ((PJ_STUN_IS_REQUEST(msg->hdr.type) &&
+ (sess->log_flag & PJ_STUN_SESS_LOG_TX_REQ)==0) ||
+ (PJ_STUN_IS_RESPONSE(msg->hdr.type) &&
+ (sess->log_flag & PJ_STUN_SESS_LOG_TX_RES)==0) ||
+ (PJ_STUN_IS_INDICATION(msg->hdr.type) &&
+ (sess->log_flag & PJ_STUN_SESS_LOG_TX_IND)==0))
+ {
+ return;
+ }
+
+ pj_sockaddr_print(addr, dst_name, sizeof(dst_name), 3);
+
+ PJ_LOG(5,(SNAME(sess),
+ "TX %d bytes STUN message to %s:\n"
+ "--- begin STUN message ---\n"
+ "%s"
+ "--- end of STUN message ---\n",
+ pkt_size, dst_name,
+ pj_stun_msg_dump(msg, sess->dump_buf, sizeof(sess->dump_buf),
+ NULL)));
+
+}
+
+
+PJ_DEF(pj_status_t) pj_stun_session_send_msg( pj_stun_session *sess,
+ void *token,
+ pj_bool_t cache_res,
+ pj_bool_t retransmit,
+ const pj_sockaddr_t *server,
+ unsigned addr_len,
+ pj_stun_tx_data *tdata)
+{
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(sess && addr_len && server && tdata, PJ_EINVAL);
+
+ pj_log_push_indent();
+
+ /* Allocate packet */
+ tdata->max_len = PJ_STUN_MAX_PKT_LEN;
+ tdata->pkt = pj_pool_alloc(tdata->pool, tdata->max_len);
+
+ tdata->token = token;
+ tdata->retransmit = retransmit;
+
+ /* Lock the session and prevent user from destroying us in the callback */
+ pj_atomic_inc(sess->busy);
+ pj_lock_acquire(sess->lock);
+
+ /* Apply options */
+ status = apply_msg_options(sess, tdata->pool, &tdata->auth_info,
+ tdata->msg);
+ if (status != PJ_SUCCESS) {
+ pj_stun_msg_destroy_tdata(sess, tdata);
+ LOG_ERR_(sess, "Error applying options", status);
+ goto on_return;
+ }
+
+ /* Encode message */
+ status = pj_stun_msg_encode(tdata->msg, (pj_uint8_t*)tdata->pkt,
+ tdata->max_len, 0,
+ &tdata->auth_info.auth_key,
+ &tdata->pkt_size);
+ if (status != PJ_SUCCESS) {
+ pj_stun_msg_destroy_tdata(sess, tdata);
+ LOG_ERR_(sess, "STUN encode() error", status);
+ goto on_return;
+ }
+
+ /* Dump packet */
+ dump_tx_msg(sess, tdata->msg, tdata->pkt_size, server);
+
+ /* If this is a STUN request message, then send the request with
+ * a new STUN client transaction.
+ */
+ if (PJ_STUN_IS_REQUEST(tdata->msg->hdr.type)) {
+
+ /* Create STUN client transaction */
+ status = pj_stun_client_tsx_create(sess->cfg, tdata->pool,
+ &tsx_cb, &tdata->client_tsx);
+ PJ_ASSERT_RETURN(status==PJ_SUCCESS, status);
+ pj_stun_client_tsx_set_data(tdata->client_tsx, (void*)tdata);
+
+ /* Save the remote address */
+ tdata->addr_len = addr_len;
+ tdata->dst_addr = server;
+
+ /* Send the request! */
+ status = pj_stun_client_tsx_send_msg(tdata->client_tsx, retransmit,
+ tdata->pkt, tdata->pkt_size);
+ if (status != PJ_SUCCESS && status != PJ_EPENDING) {
+ pj_stun_msg_destroy_tdata(sess, tdata);
+ LOG_ERR_(sess, "Error sending STUN request", status);
+ goto on_return;
+ }
+
+ /* Add to pending request list */
+ tsx_add(sess, tdata);
+
+ } else {
+ if (cache_res &&
+ (PJ_STUN_IS_SUCCESS_RESPONSE(tdata->msg->hdr.type) ||
+ PJ_STUN_IS_ERROR_RESPONSE(tdata->msg->hdr.type)))
+ {
+ /* Requested to keep the response in the cache */
+ pj_time_val timeout;
+
+ pj_memset(&tdata->res_timer, 0, sizeof(tdata->res_timer));
+ pj_timer_entry_init(&tdata->res_timer, PJ_TRUE, tdata,
+ &on_cache_timeout);
+
+ timeout.sec = sess->cfg->res_cache_msec / 1000;
+ timeout.msec = sess->cfg->res_cache_msec % 1000;
+
+ status = pj_timer_heap_schedule(sess->cfg->timer_heap,
+ &tdata->res_timer,
+ &timeout);
+ if (status != PJ_SUCCESS) {
+ tdata->res_timer.id = PJ_FALSE;
+ pj_stun_msg_destroy_tdata(sess, tdata);
+ LOG_ERR_(sess, "Error scheduling response timer", status);
+ goto on_return;
+ }
+
+ pj_list_push_back(&sess->cached_response_list, tdata);
+ }
+
+ /* Otherwise for non-request message, send directly to transport. */
+ status = sess->cb.on_send_msg(sess, token, tdata->pkt,
+ tdata->pkt_size, server, addr_len);
+
+ if (status != PJ_SUCCESS && status != PJ_EPENDING) {
+ pj_stun_msg_destroy_tdata(sess, tdata);
+ LOG_ERR_(sess, "Error sending STUN request", status);
+ goto on_return;
+ }
+
+ /* Destroy only when response is not cached*/
+ if (tdata->res_timer.id == 0) {
+ pj_stun_msg_destroy_tdata(sess, tdata);
+ }
+ }
+
+on_return:
+ pj_lock_release(sess->lock);
+
+ pj_log_pop_indent();
+
+ /* Check if application has called destroy() in the callback */
+ if (pj_atomic_dec_and_get(sess->busy)==0 && sess->destroy_request) {
+ pj_stun_session_destroy(sess);
+ return PJNATH_ESTUNDESTROYED;
+ }
+
+ return status;
+}
+
+
+/*
+ * Create and send STUN response message.
+ */
+PJ_DEF(pj_status_t) pj_stun_session_respond( pj_stun_session *sess,
+ const pj_stun_rx_data *rdata,
+ unsigned code,
+ const char *errmsg,
+ void *token,
+ pj_bool_t cache,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len)
+{
+ pj_status_t status;
+ pj_str_t reason;
+ pj_stun_tx_data *tdata;
+
+ status = pj_stun_session_create_res(sess, rdata, code,
+ (errmsg?pj_cstr(&reason,errmsg):NULL),
+ &tdata);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ return pj_stun_session_send_msg(sess, token, cache, PJ_FALSE,
+ dst_addr, addr_len, tdata);
+}
+
+
+/*
+ * Cancel outgoing STUN transaction.
+ */
+PJ_DEF(pj_status_t) pj_stun_session_cancel_req( pj_stun_session *sess,
+ pj_stun_tx_data *tdata,
+ pj_bool_t notify,
+ pj_status_t notify_status)
+{
+ PJ_ASSERT_RETURN(sess && tdata, PJ_EINVAL);
+ PJ_ASSERT_RETURN(!notify || notify_status!=PJ_SUCCESS, PJ_EINVAL);
+ PJ_ASSERT_RETURN(PJ_STUN_IS_REQUEST(tdata->msg->hdr.type), PJ_EINVAL);
+
+ /* Lock the session and prevent user from destroying us in the callback */
+ pj_atomic_inc(sess->busy);
+ pj_lock_acquire(sess->lock);
+
+ if (notify) {
+ (sess->cb.on_request_complete)(sess, notify_status, tdata->token,
+ tdata, NULL, NULL, 0);
+ }
+
+ /* Just destroy tdata. This will destroy the transaction as well */
+ pj_stun_msg_destroy_tdata(sess, tdata);
+
+ pj_lock_release(sess->lock);
+
+ if (pj_atomic_dec_and_get(sess->busy)==0 && sess->destroy_request) {
+ pj_stun_session_destroy(sess);
+ return PJNATH_ESTUNDESTROYED;
+ }
+
+ return PJ_SUCCESS;
+}
+
+/*
+ * Explicitly request retransmission of the request.
+ */
+PJ_DEF(pj_status_t) pj_stun_session_retransmit_req(pj_stun_session *sess,
+ pj_stun_tx_data *tdata)
+{
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(sess && tdata, PJ_EINVAL);
+ PJ_ASSERT_RETURN(PJ_STUN_IS_REQUEST(tdata->msg->hdr.type), PJ_EINVAL);
+
+ /* Lock the session and prevent user from destroying us in the callback */
+ pj_atomic_inc(sess->busy);
+ pj_lock_acquire(sess->lock);
+
+ status = pj_stun_client_tsx_retransmit(tdata->client_tsx);
+
+ pj_lock_release(sess->lock);
+
+ if (pj_atomic_dec_and_get(sess->busy)==0 && sess->destroy_request) {
+ pj_stun_session_destroy(sess);
+ return PJNATH_ESTUNDESTROYED;
+ }
+
+ return status;
+}
+
+
+/* Send response */
+static pj_status_t send_response(pj_stun_session *sess, void *token,
+ pj_pool_t *pool, pj_stun_msg *response,
+ const pj_stun_req_cred_info *auth_info,
+ pj_bool_t retransmission,
+ const pj_sockaddr_t *addr, unsigned addr_len)
+{
+ pj_uint8_t *out_pkt;
+ pj_size_t out_max_len, out_len;
+ pj_status_t status;
+
+ /* Apply options */
+ if (!retransmission) {
+ status = apply_msg_options(sess, pool, auth_info, response);
+ if (status != PJ_SUCCESS)
+ return status;
+ }
+
+ /* Alloc packet buffer */
+ out_max_len = PJ_STUN_MAX_PKT_LEN;
+ out_pkt = (pj_uint8_t*) pj_pool_alloc(pool, out_max_len);
+
+ /* Encode */
+ status = pj_stun_msg_encode(response, out_pkt, out_max_len, 0,
+ &auth_info->auth_key, &out_len);
+ if (status != PJ_SUCCESS) {
+ LOG_ERR_(sess, "Error encoding message", status);
+ return status;
+ }
+
+ /* Print log */
+ dump_tx_msg(sess, response, out_len, addr);
+
+ /* Send packet */
+ status = sess->cb.on_send_msg(sess, token, out_pkt, out_len,
+ addr, addr_len);
+
+ return status;
+}
+
+/* Authenticate incoming message */
+static pj_status_t authenticate_req(pj_stun_session *sess,
+ void *token,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ pj_stun_rx_data *rdata,
+ pj_pool_t *tmp_pool,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_stun_msg *response;
+ pj_status_t status;
+
+ if (PJ_STUN_IS_ERROR_RESPONSE(rdata->msg->hdr.type) ||
+ sess->auth_type == PJ_STUN_AUTH_NONE)
+ {
+ return PJ_SUCCESS;
+ }
+
+ status = pj_stun_authenticate_request(pkt, pkt_len, rdata->msg,
+ &sess->cred, tmp_pool, &rdata->info,
+ &response);
+ if (status != PJ_SUCCESS && response != NULL) {
+ PJ_LOG(5,(SNAME(sess), "Message authentication failed"));
+ send_response(sess, token, tmp_pool, response, &rdata->info,
+ PJ_FALSE, src_addr, src_addr_len);
+ }
+
+ return status;
+}
+
+
+/* Handle incoming response */
+static pj_status_t on_incoming_response(pj_stun_session *sess,
+ unsigned options,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ pj_stun_msg *msg,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_stun_tx_data *tdata;
+ pj_status_t status;
+
+ /* Lookup pending client transaction */
+ tdata = tsx_lookup(sess, msg);
+ if (tdata == NULL) {
+ PJ_LOG(5,(SNAME(sess),
+ "Transaction not found, response silently discarded"));
+ return PJ_SUCCESS;
+ }
+
+ if (sess->auth_type == PJ_STUN_AUTH_NONE)
+ options |= PJ_STUN_NO_AUTHENTICATE;
+
+ /* Authenticate the message, unless PJ_STUN_NO_AUTHENTICATE
+ * is specified in the option.
+ */
+ if ((options & PJ_STUN_NO_AUTHENTICATE) == 0 &&
+ tdata->auth_info.auth_key.slen != 0 &&
+ pj_stun_auth_valid_for_msg(msg))
+ {
+ status = pj_stun_authenticate_response(pkt, pkt_len, msg,
+ &tdata->auth_info.auth_key);
+ if (status != PJ_SUCCESS) {
+ PJ_LOG(5,(SNAME(sess),
+ "Response authentication failed"));
+ return status;
+ }
+ }
+
+ /* Pass the response to the transaction.
+ * If the message is accepted, transaction callback will be called,
+ * and this will call the session callback too.
+ */
+ status = pj_stun_client_tsx_on_rx_msg(tdata->client_tsx, msg,
+ src_addr, src_addr_len);
+ if (status != PJ_SUCCESS) {
+ return status;
+ }
+
+ return PJ_SUCCESS;
+}
+
+
+/* For requests, check if we cache the response */
+static pj_status_t check_cached_response(pj_stun_session *sess,
+ pj_pool_t *tmp_pool,
+ const pj_stun_msg *msg,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_stun_tx_data *t;
+
+ /* First lookup response in response cache */
+ t = sess->cached_response_list.next;
+ while (t != &sess->cached_response_list) {
+ if (t->msg_magic == msg->hdr.magic &&
+ t->msg->hdr.type == msg->hdr.type &&
+ pj_memcmp(t->msg_key, msg->hdr.tsx_id,
+ sizeof(msg->hdr.tsx_id))==0)
+ {
+ break;
+ }
+ t = t->next;
+ }
+
+ if (t != &sess->cached_response_list) {
+ /* Found response in the cache */
+
+ PJ_LOG(5,(SNAME(sess),
+ "Request retransmission, sending cached response"));
+
+ send_response(sess, t->token, tmp_pool, t->msg, &t->auth_info,
+ PJ_TRUE, src_addr, src_addr_len);
+ return PJ_SUCCESS;
+ }
+
+ return PJ_ENOTFOUND;
+}
+
+/* Handle incoming request */
+static pj_status_t on_incoming_request(pj_stun_session *sess,
+ unsigned options,
+ void *token,
+ pj_pool_t *tmp_pool,
+ const pj_uint8_t *in_pkt,
+ unsigned in_pkt_len,
+ pj_stun_msg *msg,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_stun_rx_data rdata;
+ pj_status_t status;
+
+ /* Init rdata */
+ rdata.msg = msg;
+ pj_bzero(&rdata.info, sizeof(rdata.info));
+
+ if (sess->auth_type == PJ_STUN_AUTH_NONE)
+ options |= PJ_STUN_NO_AUTHENTICATE;
+
+ /* Authenticate the message, unless PJ_STUN_NO_AUTHENTICATE
+ * is specified in the option.
+ */
+ if ((options & PJ_STUN_NO_AUTHENTICATE) == 0) {
+ status = authenticate_req(sess, token, (const pj_uint8_t*) in_pkt,
+ in_pkt_len,&rdata, tmp_pool, src_addr,
+ src_addr_len);
+ if (status != PJ_SUCCESS) {
+ return status;
+ }
+ }
+
+ /* Distribute to handler, or respond with Bad Request */
+ if (sess->cb.on_rx_request) {
+ status = (*sess->cb.on_rx_request)(sess, in_pkt, in_pkt_len, &rdata,
+ token, src_addr, src_addr_len);
+ } else {
+ pj_str_t err_text;
+ pj_stun_msg *response;
+
+ err_text = pj_str("Callback is not set to handle request");
+ status = pj_stun_msg_create_response(tmp_pool, msg,
+ PJ_STUN_SC_BAD_REQUEST,
+ &err_text, &response);
+ if (status == PJ_SUCCESS && response) {
+ status = send_response(sess, token, tmp_pool, response,
+ NULL, PJ_FALSE, src_addr, src_addr_len);
+ }
+ }
+
+ return status;
+}
+
+
+/* Handle incoming indication */
+static pj_status_t on_incoming_indication(pj_stun_session *sess,
+ void *token,
+ pj_pool_t *tmp_pool,
+ const pj_uint8_t *in_pkt,
+ unsigned in_pkt_len,
+ const pj_stun_msg *msg,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ PJ_UNUSED_ARG(tmp_pool);
+
+ /* Distribute to handler */
+ if (sess->cb.on_rx_indication) {
+ return (*sess->cb.on_rx_indication)(sess, in_pkt, in_pkt_len, msg,
+ token, src_addr, src_addr_len);
+ } else {
+ return PJ_SUCCESS;
+ }
+}
+
+
+/* Print outgoing message to log */
+static void dump_rx_msg(pj_stun_session *sess, const pj_stun_msg *msg,
+ unsigned pkt_size, const pj_sockaddr_t *addr)
+{
+ char src_info[PJ_INET6_ADDRSTRLEN+10];
+
+ if ((PJ_STUN_IS_REQUEST(msg->hdr.type) &&
+ (sess->log_flag & PJ_STUN_SESS_LOG_RX_REQ)==0) ||
+ (PJ_STUN_IS_RESPONSE(msg->hdr.type) &&
+ (sess->log_flag & PJ_STUN_SESS_LOG_RX_RES)==0) ||
+ (PJ_STUN_IS_INDICATION(msg->hdr.type) &&
+ (sess->log_flag & PJ_STUN_SESS_LOG_RX_IND)==0))
+ {
+ return;
+ }
+
+ pj_sockaddr_print(addr, src_info, sizeof(src_info), 3);
+
+ PJ_LOG(5,(SNAME(sess),
+ "RX %d bytes STUN message from %s:\n"
+ "--- begin STUN message ---\n"
+ "%s"
+ "--- end of STUN message ---\n",
+ pkt_size, src_info,
+ pj_stun_msg_dump(msg, sess->dump_buf, sizeof(sess->dump_buf),
+ NULL)));
+
+}
+
+/* Incoming packet */
+PJ_DEF(pj_status_t) pj_stun_session_on_rx_pkt(pj_stun_session *sess,
+ const void *packet,
+ pj_size_t pkt_size,
+ unsigned options,
+ void *token,
+ pj_size_t *parsed_len,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_stun_msg *msg, *response;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(sess && packet && pkt_size, PJ_EINVAL);
+
+ pj_log_push_indent();
+
+ /* Lock the session and prevent user from destroying us in the callback */
+ pj_atomic_inc(sess->busy);
+ pj_lock_acquire(sess->lock);
+
+ /* Reset pool */
+ pj_pool_reset(sess->rx_pool);
+
+ /* Try to parse the message */
+ status = pj_stun_msg_decode(sess->rx_pool, (const pj_uint8_t*)packet,
+ pkt_size, options,
+ &msg, parsed_len, &response);
+ if (status != PJ_SUCCESS) {
+ LOG_ERR_(sess, "STUN msg_decode() error", status);
+ if (response) {
+ send_response(sess, token, sess->rx_pool, response, NULL,
+ PJ_FALSE, src_addr, src_addr_len);
+ }
+ goto on_return;
+ }
+
+ dump_rx_msg(sess, msg, pkt_size, src_addr);
+
+ /* For requests, check if we have cached response */
+ status = check_cached_response(sess, sess->rx_pool, msg,
+ src_addr, src_addr_len);
+ if (status == PJ_SUCCESS) {
+ goto on_return;
+ }
+
+ /* Handle message */
+ if (PJ_STUN_IS_SUCCESS_RESPONSE(msg->hdr.type) ||
+ PJ_STUN_IS_ERROR_RESPONSE(msg->hdr.type))
+ {
+ status = on_incoming_response(sess, options,
+ (const pj_uint8_t*) packet, pkt_size,
+ msg, src_addr, src_addr_len);
+
+ } else if (PJ_STUN_IS_REQUEST(msg->hdr.type)) {
+
+ status = on_incoming_request(sess, options, token, sess->rx_pool,
+ (const pj_uint8_t*) packet, pkt_size,
+ msg, src_addr, src_addr_len);
+
+ } else if (PJ_STUN_IS_INDICATION(msg->hdr.type)) {
+
+ status = on_incoming_indication(sess, token, sess->rx_pool,
+ (const pj_uint8_t*) packet, pkt_size,
+ msg, src_addr, src_addr_len);
+
+ } else {
+ pj_assert(!"Unexpected!");
+ status = PJ_EBUG;
+ }
+
+on_return:
+ pj_lock_release(sess->lock);
+
+ pj_log_pop_indent();
+
+ /* If we've received destroy request while we're on the callback,
+ * destroy the session now.
+ */
+ if (pj_atomic_dec_and_get(sess->busy)==0 && sess->destroy_request) {
+ pj_stun_session_destroy(sess);
+ return PJNATH_ESTUNDESTROYED;
+ }
+
+ return status;
+}
+
diff --git a/pjnath/src/pjnath/stun_sock.c b/pjnath/src/pjnath/stun_sock.c
new file mode 100644
index 0000000..ff7dc16
--- /dev/null
+++ b/pjnath/src/pjnath/stun_sock.c
@@ -0,0 +1,856 @@
+/* $Id: stun_sock.c 3999 2012-03-30 07:10:13Z bennylp $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/stun_sock.h>
+#include <pjnath/errno.h>
+#include <pjnath/stun_transaction.h>
+#include <pjnath/stun_session.h>
+#include <pjlib-util/srv_resolver.h>
+#include <pj/activesock.h>
+#include <pj/addr_resolv.h>
+#include <pj/array.h>
+#include <pj/assert.h>
+#include <pj/ip_helper.h>
+#include <pj/log.h>
+#include <pj/pool.h>
+#include <pj/rand.h>
+
+
+struct pj_stun_sock
+{
+ char *obj_name; /* Log identification */
+ pj_pool_t *pool; /* Pool */
+ void *user_data; /* Application user data */
+
+ int af; /* Address family */
+ pj_stun_config stun_cfg; /* STUN config (ioqueue etc)*/
+ pj_stun_sock_cb cb; /* Application callbacks */
+
+ int ka_interval; /* Keep alive interval */
+ pj_timer_entry ka_timer; /* Keep alive timer. */
+
+ pj_sockaddr srv_addr; /* Resolved server addr */
+ pj_sockaddr mapped_addr; /* Our public address */
+
+ pj_dns_srv_async_query *q; /* Pending DNS query */
+ pj_sock_t sock_fd; /* Socket descriptor */
+ pj_activesock_t *active_sock; /* Active socket object */
+ pj_ioqueue_op_key_t send_key; /* Default send key for app */
+ pj_ioqueue_op_key_t int_send_key; /* Send key for internal */
+
+ pj_uint16_t tsx_id[6]; /* .. to match STUN msg */
+ pj_stun_session *stun_sess; /* STUN session */
+
+};
+
+/*
+ * Prototypes for static functions
+ */
+
+/* This callback is called by the STUN session to send packet */
+static pj_status_t sess_on_send_msg(pj_stun_session *sess,
+ void *token,
+ const void *pkt,
+ pj_size_t pkt_size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len);
+
+/* This callback is called by the STUN session when outgoing transaction
+ * is complete
+ */
+static void sess_on_request_complete(pj_stun_session *sess,
+ pj_status_t status,
+ void *token,
+ pj_stun_tx_data *tdata,
+ const pj_stun_msg *response,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len);
+/* DNS resolver callback */
+static void dns_srv_resolver_cb(void *user_data,
+ pj_status_t status,
+ const pj_dns_srv_record *rec);
+
+/* Start sending STUN Binding request */
+static pj_status_t get_mapped_addr(pj_stun_sock *stun_sock);
+
+/* Callback from active socket when incoming packet is received */
+static pj_bool_t on_data_recvfrom(pj_activesock_t *asock,
+ void *data,
+ pj_size_t size,
+ const pj_sockaddr_t *src_addr,
+ int addr_len,
+ pj_status_t status);
+
+/* Callback from active socket about send status */
+static pj_bool_t on_data_sent(pj_activesock_t *asock,
+ pj_ioqueue_op_key_t *send_key,
+ pj_ssize_t sent);
+
+/* Schedule keep-alive timer */
+static void start_ka_timer(pj_stun_sock *stun_sock);
+
+/* Keep-alive timer callback */
+static void ka_timer_cb(pj_timer_heap_t *th, pj_timer_entry *te);
+
+#define INTERNAL_MSG_TOKEN (void*)1
+
+
+/*
+ * Retrieve the name representing the specified operation.
+ */
+PJ_DEF(const char*) pj_stun_sock_op_name(pj_stun_sock_op op)
+{
+ const char *names[] = {
+ "?",
+ "DNS resolution",
+ "STUN Binding request",
+ "Keep-alive",
+ "Mapped addr. changed"
+ };
+
+ return op < PJ_ARRAY_SIZE(names) ? names[op] : "???";
+};
+
+
+/*
+ * Initialize the STUN transport setting with its default values.
+ */
+PJ_DEF(void) pj_stun_sock_cfg_default(pj_stun_sock_cfg *cfg)
+{
+ pj_bzero(cfg, sizeof(*cfg));
+ cfg->max_pkt_size = PJ_STUN_SOCK_PKT_LEN;
+ cfg->async_cnt = 1;
+ cfg->ka_interval = PJ_STUN_KEEP_ALIVE_SEC;
+ cfg->qos_type = PJ_QOS_TYPE_BEST_EFFORT;
+ cfg->qos_ignore_error = PJ_TRUE;
+}
+
+
+/* Check that configuration setting is valid */
+static pj_bool_t pj_stun_sock_cfg_is_valid(const pj_stun_sock_cfg *cfg)
+{
+ return cfg->max_pkt_size > 1 && cfg->async_cnt >= 1;
+}
+
+/*
+ * Create the STUN transport using the specified configuration.
+ */
+PJ_DEF(pj_status_t) pj_stun_sock_create( pj_stun_config *stun_cfg,
+ const char *name,
+ int af,
+ const pj_stun_sock_cb *cb,
+ const pj_stun_sock_cfg *cfg,
+ void *user_data,
+ pj_stun_sock **p_stun_sock)
+{
+ pj_pool_t *pool;
+ pj_stun_sock *stun_sock;
+ pj_stun_sock_cfg default_cfg;
+ unsigned i;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(stun_cfg && cb && p_stun_sock, PJ_EINVAL);
+ PJ_ASSERT_RETURN(af==pj_AF_INET()||af==pj_AF_INET6(), PJ_EAFNOTSUP);
+ PJ_ASSERT_RETURN(!cfg || pj_stun_sock_cfg_is_valid(cfg), PJ_EINVAL);
+ PJ_ASSERT_RETURN(cb->on_status, PJ_EINVAL);
+
+ status = pj_stun_config_check_valid(stun_cfg);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ if (name == NULL)
+ name = "stuntp%p";
+
+ if (cfg == NULL) {
+ pj_stun_sock_cfg_default(&default_cfg);
+ cfg = &default_cfg;
+ }
+
+
+ /* Create structure */
+ pool = pj_pool_create(stun_cfg->pf, name, 256, 512, NULL);
+ stun_sock = PJ_POOL_ZALLOC_T(pool, pj_stun_sock);
+ stun_sock->pool = pool;
+ stun_sock->obj_name = pool->obj_name;
+ stun_sock->user_data = user_data;
+ stun_sock->af = af;
+ stun_sock->sock_fd = PJ_INVALID_SOCKET;
+ pj_memcpy(&stun_sock->stun_cfg, stun_cfg, sizeof(*stun_cfg));
+ pj_memcpy(&stun_sock->cb, cb, sizeof(*cb));
+
+ stun_sock->ka_interval = cfg->ka_interval;
+ if (stun_sock->ka_interval == 0)
+ stun_sock->ka_interval = PJ_STUN_KEEP_ALIVE_SEC;
+
+ /* Create socket and bind socket */
+ status = pj_sock_socket(af, pj_SOCK_DGRAM(), 0, &stun_sock->sock_fd);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Apply QoS, if specified */
+ status = pj_sock_apply_qos2(stun_sock->sock_fd, cfg->qos_type,
+ &cfg->qos_params, 2, stun_sock->obj_name,
+ NULL);
+ if (status != PJ_SUCCESS && !cfg->qos_ignore_error)
+ goto on_error;
+
+ /* Bind socket */
+ if (pj_sockaddr_has_addr(&cfg->bound_addr)) {
+ status = pj_sock_bind(stun_sock->sock_fd, &cfg->bound_addr,
+ pj_sockaddr_get_len(&cfg->bound_addr));
+ } else {
+ pj_sockaddr bound_addr;
+
+ pj_sockaddr_init(af, &bound_addr, NULL, 0);
+ status = pj_sock_bind(stun_sock->sock_fd, &bound_addr,
+ pj_sockaddr_get_len(&bound_addr));
+ }
+
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Create more useful information string about this transport */
+#if 0
+ {
+ pj_sockaddr bound_addr;
+ int addr_len = sizeof(bound_addr);
+
+ status = pj_sock_getsockname(stun_sock->sock_fd, &bound_addr,
+ &addr_len);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ stun_sock->info = pj_pool_alloc(pool, PJ_INET6_ADDRSTRLEN+10);
+ pj_sockaddr_print(&bound_addr, stun_sock->info,
+ PJ_INET6_ADDRSTRLEN, 3);
+ }
+#endif
+
+ /* Init active socket configuration */
+ {
+ pj_activesock_cfg activesock_cfg;
+ pj_activesock_cb activesock_cb;
+
+ pj_activesock_cfg_default(&activesock_cfg);
+ activesock_cfg.async_cnt = cfg->async_cnt;
+ activesock_cfg.concurrency = 0;
+
+ /* Create the active socket */
+ pj_bzero(&activesock_cb, sizeof(activesock_cb));
+ activesock_cb.on_data_recvfrom = &on_data_recvfrom;
+ activesock_cb.on_data_sent = &on_data_sent;
+ status = pj_activesock_create(pool, stun_sock->sock_fd,
+ pj_SOCK_DGRAM(),
+ &activesock_cfg, stun_cfg->ioqueue,
+ &activesock_cb, stun_sock,
+ &stun_sock->active_sock);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Start asynchronous read operations */
+ status = pj_activesock_start_recvfrom(stun_sock->active_sock, pool,
+ cfg->max_pkt_size, 0);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Init send keys */
+ pj_ioqueue_op_key_init(&stun_sock->send_key,
+ sizeof(stun_sock->send_key));
+ pj_ioqueue_op_key_init(&stun_sock->int_send_key,
+ sizeof(stun_sock->int_send_key));
+ }
+
+ /* Create STUN session */
+ {
+ pj_stun_session_cb sess_cb;
+
+ pj_bzero(&sess_cb, sizeof(sess_cb));
+ sess_cb.on_request_complete = &sess_on_request_complete;
+ sess_cb.on_send_msg = &sess_on_send_msg;
+ status = pj_stun_session_create(&stun_sock->stun_cfg,
+ stun_sock->obj_name,
+ &sess_cb, PJ_FALSE,
+ &stun_sock->stun_sess);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+ }
+
+ /* Associate us with the STUN session */
+ pj_stun_session_set_user_data(stun_sock->stun_sess, stun_sock);
+
+ /* Initialize random numbers to be used as STUN transaction ID for
+ * outgoing Binding request. We use the 80bit number to distinguish
+ * STUN messages we sent with STUN messages that the application sends.
+ * The last 16bit value in the array is a counter.
+ */
+ for (i=0; i<PJ_ARRAY_SIZE(stun_sock->tsx_id); ++i) {
+ stun_sock->tsx_id[i] = (pj_uint16_t) pj_rand();
+ }
+ stun_sock->tsx_id[5] = 0;
+
+
+ /* Init timer entry */
+ stun_sock->ka_timer.cb = &ka_timer_cb;
+ stun_sock->ka_timer.user_data = stun_sock;
+
+ /* Done */
+ *p_stun_sock = stun_sock;
+ return PJ_SUCCESS;
+
+on_error:
+ pj_stun_sock_destroy(stun_sock);
+ return status;
+}
+
+/* Start socket. */
+PJ_DEF(pj_status_t) pj_stun_sock_start( pj_stun_sock *stun_sock,
+ const pj_str_t *domain,
+ pj_uint16_t default_port,
+ pj_dns_resolver *resolver)
+{
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(stun_sock && domain && default_port, PJ_EINVAL);
+
+ /* Check whether the domain contains IP address */
+ stun_sock->srv_addr.addr.sa_family = (pj_uint16_t)stun_sock->af;
+ status = pj_inet_pton(stun_sock->af, domain,
+ pj_sockaddr_get_addr(&stun_sock->srv_addr));
+ if (status != PJ_SUCCESS) {
+ stun_sock->srv_addr.addr.sa_family = (pj_uint16_t)0;
+ }
+
+ /* If resolver is set, try to resolve with DNS SRV first. It
+ * will fallback to DNS A/AAAA when no SRV record is found.
+ */
+ if (status != PJ_SUCCESS && resolver) {
+ const pj_str_t res_name = pj_str("_stun._udp.");
+ unsigned opt;
+
+ pj_assert(stun_sock->q == NULL);
+
+ opt = PJ_DNS_SRV_FALLBACK_A;
+ if (stun_sock->af == pj_AF_INET6()) {
+ opt |= (PJ_DNS_SRV_RESOLVE_AAAA | PJ_DNS_SRV_FALLBACK_AAAA);
+ }
+
+ status = pj_dns_srv_resolve(domain, &res_name, default_port,
+ stun_sock->pool, resolver, opt,
+ stun_sock, &dns_srv_resolver_cb,
+ &stun_sock->q);
+
+ /* Processing will resume when the DNS SRV callback is called */
+ return status;
+
+ } else {
+
+ if (status != PJ_SUCCESS) {
+ pj_addrinfo ai;
+ unsigned cnt = 1;
+
+ status = pj_getaddrinfo(stun_sock->af, domain, &cnt, &ai);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ pj_sockaddr_cp(&stun_sock->srv_addr, &ai.ai_addr);
+ }
+
+ pj_sockaddr_set_port(&stun_sock->srv_addr, (pj_uint16_t)default_port);
+
+ /* Start sending Binding request */
+ return get_mapped_addr(stun_sock);
+ }
+}
+
+/* Destroy */
+PJ_DEF(pj_status_t) pj_stun_sock_destroy(pj_stun_sock *stun_sock)
+{
+ if (stun_sock->q) {
+ pj_dns_srv_cancel_query(stun_sock->q, PJ_FALSE);
+ stun_sock->q = NULL;
+ }
+
+ /* Destroy the active socket first just in case we'll get
+ * stray callback.
+ */
+ if (stun_sock->active_sock != NULL) {
+ pj_activesock_close(stun_sock->active_sock);
+ stun_sock->active_sock = NULL;
+ stun_sock->sock_fd = PJ_INVALID_SOCKET;
+ } else if (stun_sock->sock_fd != PJ_INVALID_SOCKET) {
+ pj_sock_close(stun_sock->sock_fd);
+ stun_sock->sock_fd = PJ_INVALID_SOCKET;
+ }
+
+ if (stun_sock->ka_timer.id != 0) {
+ pj_timer_heap_cancel(stun_sock->stun_cfg.timer_heap,
+ &stun_sock->ka_timer);
+ stun_sock->ka_timer.id = 0;
+ }
+
+ if (stun_sock->stun_sess) {
+ pj_stun_session_destroy(stun_sock->stun_sess);
+ stun_sock->stun_sess = NULL;
+ }
+
+ if (stun_sock->pool) {
+ pj_pool_t *pool = stun_sock->pool;
+ stun_sock->pool = NULL;
+ pj_pool_release(pool);
+ }
+
+ return PJ_SUCCESS;
+}
+
+/* Associate user data */
+PJ_DEF(pj_status_t) pj_stun_sock_set_user_data( pj_stun_sock *stun_sock,
+ void *user_data)
+{
+ PJ_ASSERT_RETURN(stun_sock, PJ_EINVAL);
+ stun_sock->user_data = user_data;
+ return PJ_SUCCESS;
+}
+
+
+/* Get user data */
+PJ_DEF(void*) pj_stun_sock_get_user_data(pj_stun_sock *stun_sock)
+{
+ PJ_ASSERT_RETURN(stun_sock, NULL);
+ return stun_sock->user_data;
+}
+
+/* Notify application that session has failed */
+static pj_bool_t sess_fail(pj_stun_sock *stun_sock,
+ pj_stun_sock_op op,
+ pj_status_t status)
+{
+ pj_bool_t ret;
+
+ PJ_PERROR(4,(stun_sock->obj_name, status,
+ "Session failed because %s failed",
+ pj_stun_sock_op_name(op)));
+
+ ret = (*stun_sock->cb.on_status)(stun_sock, op, status);
+
+ return ret;
+}
+
+/* DNS resolver callback */
+static void dns_srv_resolver_cb(void *user_data,
+ pj_status_t status,
+ const pj_dns_srv_record *rec)
+{
+ pj_stun_sock *stun_sock = (pj_stun_sock*) user_data;
+
+ /* Clear query */
+ stun_sock->q = NULL;
+
+ /* Handle error */
+ if (status != PJ_SUCCESS) {
+ sess_fail(stun_sock, PJ_STUN_SOCK_DNS_OP, status);
+ return;
+ }
+
+ pj_assert(rec->count);
+ pj_assert(rec->entry[0].server.addr_count);
+
+ PJ_TODO(SUPPORT_IPV6_IN_RESOLVER);
+ pj_assert(stun_sock->af == pj_AF_INET());
+
+ /* Set the address */
+ pj_sockaddr_in_init(&stun_sock->srv_addr.ipv4, NULL,
+ rec->entry[0].port);
+ stun_sock->srv_addr.ipv4.sin_addr = rec->entry[0].server.addr[0];
+
+ /* Start sending Binding request */
+ get_mapped_addr(stun_sock);
+}
+
+
+/* Start sending STUN Binding request */
+static pj_status_t get_mapped_addr(pj_stun_sock *stun_sock)
+{
+ pj_stun_tx_data *tdata;
+ pj_status_t status;
+
+ /* Increment request counter and create STUN Binding request */
+ ++stun_sock->tsx_id[5];
+ status = pj_stun_session_create_req(stun_sock->stun_sess,
+ PJ_STUN_BINDING_REQUEST,
+ PJ_STUN_MAGIC,
+ (const pj_uint8_t*)stun_sock->tsx_id,
+ &tdata);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Send request */
+ status=pj_stun_session_send_msg(stun_sock->stun_sess, INTERNAL_MSG_TOKEN,
+ PJ_FALSE, PJ_TRUE, &stun_sock->srv_addr,
+ pj_sockaddr_get_len(&stun_sock->srv_addr),
+ tdata);
+ if (status != PJ_SUCCESS && status != PJ_EPENDING)
+ goto on_error;
+
+ return PJ_SUCCESS;
+
+on_error:
+ sess_fail(stun_sock, PJ_STUN_SOCK_BINDING_OP, status);
+ return status;
+}
+
+/* Get info */
+PJ_DEF(pj_status_t) pj_stun_sock_get_info( pj_stun_sock *stun_sock,
+ pj_stun_sock_info *info)
+{
+ int addr_len;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(stun_sock && info, PJ_EINVAL);
+
+ /* Copy STUN server address and mapped address */
+ pj_memcpy(&info->srv_addr, &stun_sock->srv_addr,
+ sizeof(pj_sockaddr));
+ pj_memcpy(&info->mapped_addr, &stun_sock->mapped_addr,
+ sizeof(pj_sockaddr));
+
+ /* Retrieve bound address */
+ addr_len = sizeof(info->bound_addr);
+ status = pj_sock_getsockname(stun_sock->sock_fd, &info->bound_addr,
+ &addr_len);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ /* If socket is bound to a specific interface, then only put that
+ * interface in the alias list. Otherwise query all the interfaces
+ * in the host.
+ */
+ if (pj_sockaddr_has_addr(&info->bound_addr)) {
+ info->alias_cnt = 1;
+ pj_sockaddr_cp(&info->aliases[0], &info->bound_addr);
+ } else {
+ pj_sockaddr def_addr;
+ pj_uint16_t port = pj_sockaddr_get_port(&info->bound_addr);
+ unsigned i;
+
+ /* Get the default address */
+ status = pj_gethostip(stun_sock->af, &def_addr);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ pj_sockaddr_set_port(&def_addr, port);
+
+ /* Enum all IP interfaces in the host */
+ info->alias_cnt = PJ_ARRAY_SIZE(info->aliases);
+ status = pj_enum_ip_interface(stun_sock->af, &info->alias_cnt,
+ info->aliases);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ /* Set the port number for each address.
+ */
+ for (i=0; i<info->alias_cnt; ++i) {
+ pj_sockaddr_set_port(&info->aliases[i], port);
+ }
+
+ /* Put the default IP in the first slot */
+ for (i=0; i<info->alias_cnt; ++i) {
+ if (pj_sockaddr_cmp(&info->aliases[i], &def_addr)==0) {
+ if (i!=0) {
+ pj_sockaddr_cp(&info->aliases[i], &info->aliases[0]);
+ pj_sockaddr_cp(&info->aliases[0], &def_addr);
+ }
+ break;
+ }
+ }
+ }
+
+ return PJ_SUCCESS;
+}
+
+/* Send application data */
+PJ_DEF(pj_status_t) pj_stun_sock_sendto( pj_stun_sock *stun_sock,
+ pj_ioqueue_op_key_t *send_key,
+ const void *pkt,
+ unsigned pkt_len,
+ unsigned flag,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len)
+{
+ pj_ssize_t size;
+ PJ_ASSERT_RETURN(stun_sock && pkt && dst_addr && addr_len, PJ_EINVAL);
+
+ if (send_key==NULL)
+ send_key = &stun_sock->send_key;
+
+ size = pkt_len;
+ return pj_activesock_sendto(stun_sock->active_sock, send_key,
+ pkt, &size, flag, dst_addr, addr_len);
+}
+
+/* This callback is called by the STUN session to send packet */
+static pj_status_t sess_on_send_msg(pj_stun_session *sess,
+ void *token,
+ const void *pkt,
+ pj_size_t pkt_size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len)
+{
+ pj_stun_sock *stun_sock;
+ pj_ssize_t size;
+
+ stun_sock = (pj_stun_sock *) pj_stun_session_get_user_data(sess);
+
+ pj_assert(token==INTERNAL_MSG_TOKEN);
+ PJ_UNUSED_ARG(token);
+
+ size = pkt_size;
+ return pj_activesock_sendto(stun_sock->active_sock,
+ &stun_sock->int_send_key,
+ pkt, &size, 0, dst_addr, addr_len);
+}
+
+/* This callback is called by the STUN session when outgoing transaction
+ * is complete
+ */
+static void sess_on_request_complete(pj_stun_session *sess,
+ pj_status_t status,
+ void *token,
+ pj_stun_tx_data *tdata,
+ const pj_stun_msg *response,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_stun_sock *stun_sock;
+ const pj_stun_sockaddr_attr *mapped_attr;
+ pj_stun_sock_op op;
+ pj_bool_t mapped_changed;
+ pj_bool_t resched = PJ_TRUE;
+
+ stun_sock = (pj_stun_sock *) pj_stun_session_get_user_data(sess);
+
+ PJ_UNUSED_ARG(tdata);
+ PJ_UNUSED_ARG(token);
+ PJ_UNUSED_ARG(src_addr);
+ PJ_UNUSED_ARG(src_addr_len);
+
+ /* Check if this is a keep-alive or the first Binding request */
+ if (pj_sockaddr_has_addr(&stun_sock->mapped_addr))
+ op = PJ_STUN_SOCK_KEEP_ALIVE_OP;
+ else
+ op = PJ_STUN_SOCK_BINDING_OP;
+
+ /* Handle failure */
+ if (status != PJ_SUCCESS) {
+ resched = sess_fail(stun_sock, op, status);
+ goto on_return;
+ }
+
+ /* Get XOR-MAPPED-ADDRESS, or MAPPED-ADDRESS when XOR-MAPPED-ADDRESS
+ * doesn't exist.
+ */
+ mapped_attr = (const pj_stun_sockaddr_attr*)
+ pj_stun_msg_find_attr(response, PJ_STUN_ATTR_XOR_MAPPED_ADDR,
+ 0);
+ if (mapped_attr==NULL) {
+ mapped_attr = (const pj_stun_sockaddr_attr*)
+ pj_stun_msg_find_attr(response, PJ_STUN_ATTR_MAPPED_ADDR,
+ 0);
+ }
+
+ if (mapped_attr == NULL) {
+ resched = sess_fail(stun_sock, op, PJNATH_ESTUNNOMAPPEDADDR);
+ goto on_return;
+ }
+
+ /* Determine if mapped address has changed, and save the new mapped
+ * address and call callback if so
+ */
+ mapped_changed = !pj_sockaddr_has_addr(&stun_sock->mapped_addr) ||
+ pj_sockaddr_cmp(&stun_sock->mapped_addr,
+ &mapped_attr->sockaddr) != 0;
+ if (mapped_changed) {
+ /* Print mapped adress */
+ {
+ char addrinfo[PJ_INET6_ADDRSTRLEN+10];
+ PJ_LOG(4,(stun_sock->obj_name,
+ "STUN mapped address found/changed: %s",
+ pj_sockaddr_print(&mapped_attr->sockaddr,
+ addrinfo, sizeof(addrinfo), 3)));
+ }
+
+ pj_sockaddr_cp(&stun_sock->mapped_addr, &mapped_attr->sockaddr);
+
+ if (op==PJ_STUN_SOCK_KEEP_ALIVE_OP)
+ op = PJ_STUN_SOCK_MAPPED_ADDR_CHANGE;
+ }
+
+ /* Notify user */
+ resched = (*stun_sock->cb.on_status)(stun_sock, op, PJ_SUCCESS);
+
+on_return:
+ /* Start/restart keep-alive timer */
+ if (resched)
+ start_ka_timer(stun_sock);
+}
+
+/* Schedule keep-alive timer */
+static void start_ka_timer(pj_stun_sock *stun_sock)
+{
+ if (stun_sock->ka_timer.id != 0) {
+ pj_timer_heap_cancel(stun_sock->stun_cfg.timer_heap,
+ &stun_sock->ka_timer);
+ stun_sock->ka_timer.id = 0;
+ }
+
+ pj_assert(stun_sock->ka_interval != 0);
+ if (stun_sock->ka_interval > 0) {
+ pj_time_val delay;
+
+ delay.sec = stun_sock->ka_interval;
+ delay.msec = 0;
+
+ if (pj_timer_heap_schedule(stun_sock->stun_cfg.timer_heap,
+ &stun_sock->ka_timer,
+ &delay) == PJ_SUCCESS)
+ {
+ stun_sock->ka_timer.id = PJ_TRUE;
+ }
+ }
+}
+
+/* Keep-alive timer callback */
+static void ka_timer_cb(pj_timer_heap_t *th, pj_timer_entry *te)
+{
+ pj_stun_sock *stun_sock;
+
+ stun_sock = (pj_stun_sock *) te->user_data;
+
+ PJ_UNUSED_ARG(th);
+
+ /* Time to send STUN Binding request */
+ if (get_mapped_addr(stun_sock) != PJ_SUCCESS)
+ return;
+
+ /* Next keep-alive timer will be scheduled once the request
+ * is complete.
+ */
+}
+
+/* Callback from active socket when incoming packet is received */
+static pj_bool_t on_data_recvfrom(pj_activesock_t *asock,
+ void *data,
+ pj_size_t size,
+ const pj_sockaddr_t *src_addr,
+ int addr_len,
+ pj_status_t status)
+{
+ pj_stun_sock *stun_sock;
+ pj_stun_msg_hdr *hdr;
+ pj_uint16_t type;
+
+ stun_sock = (pj_stun_sock*) pj_activesock_get_user_data(asock);
+
+ /* Log socket error */
+ if (status != PJ_SUCCESS) {
+ PJ_PERROR(2,(stun_sock->obj_name, status, "recvfrom() error"));
+ return PJ_TRUE;
+ }
+
+ /* Check that this is STUN message */
+ status = pj_stun_msg_check((const pj_uint8_t*)data, size,
+ PJ_STUN_IS_DATAGRAM | PJ_STUN_CHECK_PACKET);
+ if (status != PJ_SUCCESS) {
+ /* Not STUN -- give it to application */
+ goto process_app_data;
+ }
+
+ /* Treat packet as STUN header and copy the STUN message type.
+ * We don't want to access the type directly from the header
+ * since it may not be properly aligned.
+ */
+ hdr = (pj_stun_msg_hdr*) data;
+ pj_memcpy(&type, &hdr->type, 2);
+ type = pj_ntohs(type);
+
+ /* If the packet is a STUN Binding response and part of the
+ * transaction ID matches our internal ID, then this is
+ * our internal STUN message (Binding request or keep alive).
+ * Give it to our STUN session.
+ */
+ if (!PJ_STUN_IS_RESPONSE(type) ||
+ PJ_STUN_GET_METHOD(type) != PJ_STUN_BINDING_METHOD ||
+ pj_memcmp(hdr->tsx_id, stun_sock->tsx_id, 10) != 0)
+ {
+ /* Not STUN Binding response, or STUN transaction ID mismatch.
+ * This is not our message too -- give it to application.
+ */
+ goto process_app_data;
+ }
+
+ /* This is our STUN Binding response. Give it to the STUN session */
+ status = pj_stun_session_on_rx_pkt(stun_sock->stun_sess, data, size,
+ PJ_STUN_IS_DATAGRAM, NULL, NULL,
+ src_addr, addr_len);
+ return status!=PJNATH_ESTUNDESTROYED ? PJ_TRUE : PJ_FALSE;
+
+process_app_data:
+ if (stun_sock->cb.on_rx_data) {
+ pj_bool_t ret;
+
+ ret = (*stun_sock->cb.on_rx_data)(stun_sock, data, size,
+ src_addr, addr_len);
+ return ret;
+ }
+
+ return PJ_TRUE;
+}
+
+/* Callback from active socket about send status */
+static pj_bool_t on_data_sent(pj_activesock_t *asock,
+ pj_ioqueue_op_key_t *send_key,
+ pj_ssize_t sent)
+{
+ pj_stun_sock *stun_sock;
+
+ stun_sock = (pj_stun_sock*) pj_activesock_get_user_data(asock);
+
+ /* Don't report to callback if this is internal message */
+ if (send_key == &stun_sock->int_send_key) {
+ return PJ_TRUE;
+ }
+
+ /* Report to callback */
+ if (stun_sock->cb.on_data_sent) {
+ pj_bool_t ret;
+
+ /* If app gives NULL send_key in sendto() function, then give
+ * NULL in the callback too
+ */
+ if (send_key == &stun_sock->send_key)
+ send_key = NULL;
+
+ /* Call callback */
+ ret = (*stun_sock->cb.on_data_sent)(stun_sock, send_key, sent);
+
+ return ret;
+ }
+
+ return PJ_TRUE;
+}
+
diff --git a/pjnath/src/pjnath/stun_transaction.c b/pjnath/src/pjnath/stun_transaction.c
new file mode 100644
index 0000000..d714ecf
--- /dev/null
+++ b/pjnath/src/pjnath/stun_transaction.c
@@ -0,0 +1,448 @@
+/* $Id: stun_transaction.c 3753 2011-09-18 14:59:56Z bennylp $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/stun_transaction.h>
+#include <pjnath/errno.h>
+#include <pj/assert.h>
+#include <pj/log.h>
+#include <pj/pool.h>
+#include <pj/string.h>
+#include <pj/timer.h>
+
+
+#define TIMER_ACTIVE 1
+
+
+struct pj_stun_client_tsx
+{
+ char obj_name[PJ_MAX_OBJ_NAME];
+ pj_stun_tsx_cb cb;
+ void *user_data;
+
+ pj_bool_t complete;
+
+ pj_bool_t require_retransmit;
+ unsigned rto_msec;
+ pj_timer_entry retransmit_timer;
+ unsigned transmit_count;
+ pj_time_val retransmit_time;
+ pj_timer_heap_t *timer_heap;
+
+ pj_timer_entry destroy_timer;
+
+ void *last_pkt;
+ unsigned last_pkt_size;
+};
+
+
+static void retransmit_timer_callback(pj_timer_heap_t *timer_heap,
+ pj_timer_entry *timer);
+static void destroy_timer_callback(pj_timer_heap_t *timer_heap,
+ pj_timer_entry *timer);
+
+#define stun_perror(tsx,msg,rc) pjnath_perror(tsx->obj_name, msg, rc)
+
+/*
+ * Create a STUN client transaction.
+ */
+PJ_DEF(pj_status_t) pj_stun_client_tsx_create(pj_stun_config *cfg,
+ pj_pool_t *pool,
+ const pj_stun_tsx_cb *cb,
+ pj_stun_client_tsx **p_tsx)
+{
+ pj_stun_client_tsx *tsx;
+
+ PJ_ASSERT_RETURN(cfg && cb && p_tsx, PJ_EINVAL);
+ PJ_ASSERT_RETURN(cb->on_send_msg, PJ_EINVAL);
+
+ tsx = PJ_POOL_ZALLOC_T(pool, pj_stun_client_tsx);
+ tsx->rto_msec = cfg->rto_msec;
+ tsx->timer_heap = cfg->timer_heap;
+ pj_memcpy(&tsx->cb, cb, sizeof(*cb));
+
+ tsx->retransmit_timer.cb = &retransmit_timer_callback;
+ tsx->retransmit_timer.user_data = tsx;
+
+ tsx->destroy_timer.cb = &destroy_timer_callback;
+ tsx->destroy_timer.user_data = tsx;
+
+ pj_ansi_snprintf(tsx->obj_name, sizeof(tsx->obj_name), "stuntsx%p", tsx);
+
+ *p_tsx = tsx;
+
+ PJ_LOG(5,(tsx->obj_name, "STUN client transaction created"));
+ return PJ_SUCCESS;
+}
+
+
+PJ_DEF(pj_status_t) pj_stun_client_tsx_schedule_destroy(
+ pj_stun_client_tsx *tsx,
+ const pj_time_val *delay)
+{
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(tsx && delay, PJ_EINVAL);
+ PJ_ASSERT_RETURN(tsx->cb.on_destroy, PJ_EINVAL);
+
+ /* Cancel previously registered timer */
+ if (tsx->destroy_timer.id != 0) {
+ pj_timer_heap_cancel(tsx->timer_heap, &tsx->destroy_timer);
+ tsx->destroy_timer.id = 0;
+ }
+
+ /* Stop retransmission, just in case */
+ if (tsx->retransmit_timer.id != 0) {
+ pj_timer_heap_cancel(tsx->timer_heap, &tsx->retransmit_timer);
+ tsx->retransmit_timer.id = 0;
+ }
+
+ status = pj_timer_heap_schedule(tsx->timer_heap,
+ &tsx->destroy_timer, delay);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ tsx->destroy_timer.id = TIMER_ACTIVE;
+ tsx->cb.on_complete = NULL;
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Destroy transaction immediately.
+ */
+PJ_DEF(pj_status_t) pj_stun_client_tsx_destroy(pj_stun_client_tsx *tsx)
+{
+ PJ_ASSERT_RETURN(tsx, PJ_EINVAL);
+
+ if (tsx->retransmit_timer.id != 0) {
+ pj_timer_heap_cancel(tsx->timer_heap, &tsx->retransmit_timer);
+ tsx->retransmit_timer.id = 0;
+ }
+ if (tsx->destroy_timer.id != 0) {
+ pj_timer_heap_cancel(tsx->timer_heap, &tsx->destroy_timer);
+ tsx->destroy_timer.id = 0;
+ }
+
+ PJ_LOG(5,(tsx->obj_name, "STUN client transaction destroyed"));
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Check if transaction has completed.
+ */
+PJ_DEF(pj_bool_t) pj_stun_client_tsx_is_complete(pj_stun_client_tsx *tsx)
+{
+ PJ_ASSERT_RETURN(tsx, PJ_FALSE);
+ return tsx->complete;
+}
+
+
+/*
+ * Set user data.
+ */
+PJ_DEF(pj_status_t) pj_stun_client_tsx_set_data(pj_stun_client_tsx *tsx,
+ void *data)
+{
+ PJ_ASSERT_RETURN(tsx, PJ_EINVAL);
+ tsx->user_data = data;
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Get the user data
+ */
+PJ_DEF(void*) pj_stun_client_tsx_get_data(pj_stun_client_tsx *tsx)
+{
+ PJ_ASSERT_RETURN(tsx, NULL);
+ return tsx->user_data;
+}
+
+
+/*
+ * Transmit message.
+ */
+static pj_status_t tsx_transmit_msg(pj_stun_client_tsx *tsx)
+{
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(tsx->retransmit_timer.id == 0 ||
+ !tsx->require_retransmit, PJ_EBUSY);
+
+ if (tsx->require_retransmit) {
+ /* Calculate retransmit/timeout delay */
+ if (tsx->transmit_count == 0) {
+ tsx->retransmit_time.sec = 0;
+ tsx->retransmit_time.msec = tsx->rto_msec;
+
+ } else if (tsx->transmit_count < PJ_STUN_MAX_TRANSMIT_COUNT-1) {
+ unsigned msec;
+
+ msec = PJ_TIME_VAL_MSEC(tsx->retransmit_time);
+ msec <<= 1;
+ tsx->retransmit_time.sec = msec / 1000;
+ tsx->retransmit_time.msec = msec % 1000;
+
+ } else {
+ tsx->retransmit_time.sec = PJ_STUN_TIMEOUT_VALUE / 1000;
+ tsx->retransmit_time.msec = PJ_STUN_TIMEOUT_VALUE % 1000;
+ }
+
+ /* Schedule timer first because when send_msg() failed we can
+ * cancel it (as opposed to when schedule_timer() failed we cannot
+ * cancel transmission).
+ */;
+ status = pj_timer_heap_schedule(tsx->timer_heap,
+ &tsx->retransmit_timer,
+ &tsx->retransmit_time);
+ if (status != PJ_SUCCESS) {
+ tsx->retransmit_timer.id = 0;
+ return status;
+ }
+ tsx->retransmit_timer.id = TIMER_ACTIVE;
+ }
+
+
+ tsx->transmit_count++;
+
+ PJ_LOG(5,(tsx->obj_name, "STUN sending message (transmit count=%d)",
+ tsx->transmit_count));
+ pj_log_push_indent();
+
+ /* Send message */
+ status = tsx->cb.on_send_msg(tsx, tsx->last_pkt, tsx->last_pkt_size);
+
+ if (status == PJNATH_ESTUNDESTROYED) {
+ /* We've been destroyed, don't access the object. */
+ } else if (status != PJ_SUCCESS) {
+ if (tsx->retransmit_timer.id != 0) {
+ pj_timer_heap_cancel(tsx->timer_heap,
+ &tsx->retransmit_timer);
+ tsx->retransmit_timer.id = 0;
+ }
+ stun_perror(tsx, "STUN error sending message", status);
+ }
+
+ pj_log_pop_indent();
+ return status;
+}
+
+
+/*
+ * Send outgoing message and start STUN transaction.
+ */
+PJ_DEF(pj_status_t) pj_stun_client_tsx_send_msg(pj_stun_client_tsx *tsx,
+ pj_bool_t retransmit,
+ void *pkt,
+ unsigned pkt_len)
+{
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(tsx && pkt && pkt_len, PJ_EINVAL);
+ PJ_ASSERT_RETURN(tsx->retransmit_timer.id == 0, PJ_EBUSY);
+
+ /* Encode message */
+ tsx->last_pkt = pkt;
+ tsx->last_pkt_size = pkt_len;
+
+ /* Update STUN retransmit flag */
+ tsx->require_retransmit = retransmit;
+
+ /* For TCP, schedule timeout timer after PJ_STUN_TIMEOUT_VALUE.
+ * Since we don't have timeout timer, simulate this by using
+ * retransmit timer.
+ */
+ if (!retransmit) {
+ unsigned timeout;
+
+ pj_assert(tsx->retransmit_timer.id == 0);
+ tsx->transmit_count = PJ_STUN_MAX_TRANSMIT_COUNT;
+
+ timeout = tsx->rto_msec * 16;
+ tsx->retransmit_time.sec = timeout / 1000;
+ tsx->retransmit_time.msec = timeout % 1000;
+
+ /* Schedule timer first because when send_msg() failed we can
+ * cancel it (as opposed to when schedule_timer() failed we cannot
+ * cancel transmission).
+ */;
+ status = pj_timer_heap_schedule(tsx->timer_heap,
+ &tsx->retransmit_timer,
+ &tsx->retransmit_time);
+ if (status != PJ_SUCCESS) {
+ tsx->retransmit_timer.id = 0;
+ return status;
+ }
+ tsx->retransmit_timer.id = TIMER_ACTIVE;
+ }
+
+ /* Send the message */
+ status = tsx_transmit_msg(tsx);
+ if (status != PJ_SUCCESS) {
+ if (tsx->retransmit_timer.id != 0) {
+ pj_timer_heap_cancel(tsx->timer_heap,
+ &tsx->retransmit_timer);
+ tsx->retransmit_timer.id = 0;
+ }
+ return status;
+ }
+
+ return PJ_SUCCESS;
+}
+
+
+/* Retransmit timer callback */
+static void retransmit_timer_callback(pj_timer_heap_t *timer_heap,
+ pj_timer_entry *timer)
+{
+ pj_stun_client_tsx *tsx = (pj_stun_client_tsx *) timer->user_data;
+ pj_status_t status;
+
+ PJ_UNUSED_ARG(timer_heap);
+
+ if (tsx->transmit_count >= PJ_STUN_MAX_TRANSMIT_COUNT) {
+ /* Retransmission count exceeded. Transaction has failed */
+ tsx->retransmit_timer.id = 0;
+ PJ_LOG(4,(tsx->obj_name, "STUN timeout waiting for response"));
+ pj_log_push_indent();
+ if (!tsx->complete) {
+ tsx->complete = PJ_TRUE;
+ if (tsx->cb.on_complete) {
+ tsx->cb.on_complete(tsx, PJNATH_ESTUNTIMEDOUT, NULL, NULL, 0);
+ }
+ }
+ /* We might have been destroyed, don't try to access the object */
+ pj_log_pop_indent();
+ return;
+ }
+
+ tsx->retransmit_timer.id = 0;
+ status = tsx_transmit_msg(tsx);
+ if (status == PJNATH_ESTUNDESTROYED) {
+ /* We've been destroyed, don't try to access the object */
+ } else if (status != PJ_SUCCESS) {
+ tsx->retransmit_timer.id = 0;
+ if (!tsx->complete) {
+ tsx->complete = PJ_TRUE;
+ if (tsx->cb.on_complete) {
+ tsx->cb.on_complete(tsx, status, NULL, NULL, 0);
+ }
+ }
+ /* We might have been destroyed, don't try to access the object */
+ }
+}
+
+/*
+ * Request to retransmit the request.
+ */
+PJ_DEF(pj_status_t) pj_stun_client_tsx_retransmit(pj_stun_client_tsx *tsx)
+{
+ if (tsx->destroy_timer.id != 0) {
+ return PJ_SUCCESS;
+ }
+
+ if (tsx->retransmit_timer.id != 0) {
+ pj_timer_heap_cancel(tsx->timer_heap, &tsx->retransmit_timer);
+ tsx->retransmit_timer.id = 0;
+ }
+
+ return tsx_transmit_msg(tsx);
+}
+
+/* Timer callback to destroy transaction */
+static void destroy_timer_callback(pj_timer_heap_t *timer_heap,
+ pj_timer_entry *timer)
+{
+ pj_stun_client_tsx *tsx = (pj_stun_client_tsx *) timer->user_data;
+
+ PJ_UNUSED_ARG(timer_heap);
+
+ tsx->destroy_timer.id = PJ_FALSE;
+ tsx->cb.on_destroy(tsx);
+ /* Don't access transaction after this */
+}
+
+
+/*
+ * Notify the STUN transaction about the arrival of STUN response.
+ */
+PJ_DEF(pj_status_t) pj_stun_client_tsx_on_rx_msg(pj_stun_client_tsx *tsx,
+ const pj_stun_msg *msg,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_stun_errcode_attr *err_attr;
+ pj_status_t status;
+
+ /* Must be STUN response message */
+ if (!PJ_STUN_IS_SUCCESS_RESPONSE(msg->hdr.type) &&
+ !PJ_STUN_IS_ERROR_RESPONSE(msg->hdr.type))
+ {
+ PJ_LOG(4,(tsx->obj_name,
+ "STUN rx_msg() error: not response message"));
+ return PJNATH_EINSTUNMSGTYPE;
+ }
+
+
+ /* We have a response with matching transaction ID.
+ * We can cancel retransmit timer now.
+ */
+ if (tsx->retransmit_timer.id) {
+ pj_timer_heap_cancel(tsx->timer_heap, &tsx->retransmit_timer);
+ tsx->retransmit_timer.id = 0;
+ }
+
+ /* Find STUN error code attribute */
+ err_attr = (pj_stun_errcode_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_ERROR_CODE, 0);
+
+ if (err_attr && err_attr->err_code <= 200) {
+ /* draft-ietf-behave-rfc3489bis-05.txt Section 8.3.2:
+ * Any response between 100 and 299 MUST result in the cessation
+ * of request retransmissions, but otherwise is discarded.
+ */
+ PJ_LOG(4,(tsx->obj_name,
+ "STUN rx_msg() error: received provisional %d code (%.*s)",
+ err_attr->err_code,
+ (int)err_attr->reason.slen,
+ err_attr->reason.ptr));
+ return PJ_SUCCESS;
+ }
+
+ if (err_attr == NULL) {
+ status = PJ_SUCCESS;
+ } else {
+ status = PJ_STATUS_FROM_STUN_CODE(err_attr->err_code);
+ }
+
+ /* Call callback */
+ if (!tsx->complete) {
+ tsx->complete = PJ_TRUE;
+ if (tsx->cb.on_complete) {
+ tsx->cb.on_complete(tsx, status, msg, src_addr, src_addr_len);
+ }
+ /* We might have been destroyed, don't try to access the object */
+ }
+
+ return PJ_SUCCESS;
+
+}
+
diff --git a/pjnath/src/pjnath/turn_session.c b/pjnath/src/pjnath/turn_session.c
new file mode 100644
index 0000000..cbe8f5c
--- /dev/null
+++ b/pjnath/src/pjnath/turn_session.c
@@ -0,0 +1,2040 @@
+/* $Id: turn_session.c 3844 2011-10-24 15:03:43Z bennylp $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/turn_session.h>
+#include <pjnath/errno.h>
+#include <pjlib-util/srv_resolver.h>
+#include <pj/addr_resolv.h>
+#include <pj/assert.h>
+#include <pj/errno.h>
+#include <pj/hash.h>
+#include <pj/lock.h>
+#include <pj/log.h>
+#include <pj/os.h>
+#include <pj/pool.h>
+#include <pj/rand.h>
+#include <pj/sock.h>
+
+#define PJ_TURN_CHANNEL_MIN 0x4000
+#define PJ_TURN_CHANNEL_MAX 0x7FFF /* inclusive */
+#define PJ_TURN_CHANNEL_HTABLE_SIZE 8
+#define PJ_TURN_PERM_HTABLE_SIZE 8
+
+static const char *state_names[] =
+{
+ "Null",
+ "Resolving",
+ "Resolved",
+ "Allocating",
+ "Ready",
+ "Deallocating",
+ "Deallocated",
+ "Destroying"
+};
+
+enum timer_id_t
+{
+ TIMER_NONE,
+ TIMER_KEEP_ALIVE,
+ TIMER_DESTROY
+};
+
+/* This structure describes a channel binding. A channel binding is index by
+ * the channel number or IP address and port number of the peer.
+ */
+struct ch_t
+{
+ /* The channel number */
+ pj_uint16_t num;
+
+ /* PJ_TRUE if we've received successful response to ChannelBind request
+ * for this channel.
+ */
+ pj_bool_t bound;
+
+ /* The peer IP address and port */
+ pj_sockaddr addr;
+
+ /* The channel binding expiration */
+ pj_time_val expiry;
+};
+
+
+/* This structure describes a permission. A permission is identified by the
+ * IP address only.
+ */
+struct perm_t
+{
+ /* Cache of hash value to speed-up lookup */
+ pj_uint32_t hval;
+
+ /* The permission IP address. The port number MUST be zero */
+ pj_sockaddr addr;
+
+ /* Number of peers that uses this permission. */
+ unsigned peer_cnt;
+
+ /* Automatically renew this permission once it expires? */
+ pj_bool_t renew;
+
+ /* The permission expiration */
+ pj_time_val expiry;
+
+ /* Arbitrary/random pointer value (token) to map this perm with the
+ * request to create it. It is used to invalidate this perm when the
+ * request fails.
+ */
+ void *req_token;
+};
+
+
+/* The TURN client session structure */
+struct pj_turn_session
+{
+ pj_pool_t *pool;
+ const char *obj_name;
+ pj_turn_session_cb cb;
+ void *user_data;
+ pj_stun_config stun_cfg;
+
+ pj_lock_t *lock;
+ int busy;
+
+ pj_turn_state_t state;
+ pj_status_t last_status;
+ pj_bool_t pending_destroy;
+ pj_bool_t destroy_notified;
+
+ pj_stun_session *stun;
+
+ unsigned lifetime;
+ int ka_interval;
+ pj_time_val expiry;
+
+ pj_timer_heap_t *timer_heap;
+ pj_timer_entry timer;
+
+ pj_dns_srv_async_query *dns_async;
+ pj_uint16_t default_port;
+
+ pj_uint16_t af;
+ pj_turn_tp_type conn_type;
+ pj_uint16_t srv_addr_cnt;
+ pj_sockaddr *srv_addr_list;
+ pj_sockaddr *srv_addr;
+
+ pj_bool_t pending_alloc;
+ pj_turn_alloc_param alloc_param;
+
+ pj_sockaddr mapped_addr;
+ pj_sockaddr relay_addr;
+
+ pj_hash_table_t *ch_table;
+ pj_hash_table_t *perm_table;
+
+ pj_uint32_t send_ind_tsx_id[3];
+ /* tx_pkt must be 16bit aligned */
+ pj_uint8_t tx_pkt[PJ_TURN_MAX_PKT_LEN];
+
+ pj_uint16_t next_ch;
+};
+
+
+/*
+ * Prototypes.
+ */
+static void sess_shutdown(pj_turn_session *sess,
+ pj_status_t status);
+static void do_destroy(pj_turn_session *sess);
+static void send_refresh(pj_turn_session *sess, int lifetime);
+static pj_status_t stun_on_send_msg(pj_stun_session *sess,
+ void *token,
+ const void *pkt,
+ pj_size_t pkt_size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len);
+static void stun_on_request_complete(pj_stun_session *sess,
+ pj_status_t status,
+ void *token,
+ pj_stun_tx_data *tdata,
+ const pj_stun_msg *response,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len);
+static pj_status_t stun_on_rx_indication(pj_stun_session *sess,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_stun_msg *msg,
+ void *token,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len);
+static void dns_srv_resolver_cb(void *user_data,
+ pj_status_t status,
+ const pj_dns_srv_record *rec);
+static struct ch_t *lookup_ch_by_addr(pj_turn_session *sess,
+ const pj_sockaddr_t *addr,
+ unsigned addr_len,
+ pj_bool_t update,
+ pj_bool_t bind_channel);
+static struct ch_t *lookup_ch_by_chnum(pj_turn_session *sess,
+ pj_uint16_t chnum);
+static struct perm_t *lookup_perm(pj_turn_session *sess,
+ const pj_sockaddr_t *addr,
+ unsigned addr_len,
+ pj_bool_t update);
+static void invalidate_perm(pj_turn_session *sess,
+ struct perm_t *perm);
+static void on_timer_event(pj_timer_heap_t *th, pj_timer_entry *e);
+
+
+/*
+ * Create default pj_turn_alloc_param.
+ */
+PJ_DEF(void) pj_turn_alloc_param_default(pj_turn_alloc_param *prm)
+{
+ pj_bzero(prm, sizeof(*prm));
+}
+
+/*
+ * Duplicate pj_turn_alloc_param.
+ */
+PJ_DEF(void) pj_turn_alloc_param_copy( pj_pool_t *pool,
+ pj_turn_alloc_param *dst,
+ const pj_turn_alloc_param *src)
+{
+ PJ_UNUSED_ARG(pool);
+ pj_memcpy(dst, src, sizeof(*dst));
+}
+
+/*
+ * Get TURN state name.
+ */
+PJ_DEF(const char*) pj_turn_state_name(pj_turn_state_t state)
+{
+ return state_names[state];
+}
+
+/*
+ * Create TURN client session.
+ */
+PJ_DEF(pj_status_t) pj_turn_session_create( const pj_stun_config *cfg,
+ const char *name,
+ int af,
+ pj_turn_tp_type conn_type,
+ const pj_turn_session_cb *cb,
+ unsigned options,
+ void *user_data,
+ pj_turn_session **p_sess)
+{
+ pj_pool_t *pool;
+ pj_turn_session *sess;
+ pj_stun_session_cb stun_cb;
+ pj_lock_t *null_lock;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(cfg && cfg->pf && cb && p_sess, PJ_EINVAL);
+ PJ_ASSERT_RETURN(cb->on_send_pkt, PJ_EINVAL);
+
+ PJ_UNUSED_ARG(options);
+
+ if (name == NULL)
+ name = "turn%p";
+
+ /* Allocate and create TURN session */
+ pool = pj_pool_create(cfg->pf, name, PJNATH_POOL_LEN_TURN_SESS,
+ PJNATH_POOL_INC_TURN_SESS, NULL);
+ sess = PJ_POOL_ZALLOC_T(pool, pj_turn_session);
+ sess->pool = pool;
+ sess->obj_name = pool->obj_name;
+ sess->timer_heap = cfg->timer_heap;
+ sess->af = (pj_uint16_t)af;
+ sess->conn_type = conn_type;
+ sess->ka_interval = PJ_TURN_KEEP_ALIVE_SEC;
+ sess->user_data = user_data;
+ sess->next_ch = PJ_TURN_CHANNEL_MIN;
+
+ /* Copy STUN session */
+ pj_memcpy(&sess->stun_cfg, cfg, sizeof(pj_stun_config));
+
+ /* Copy callback */
+ pj_memcpy(&sess->cb, cb, sizeof(*cb));
+
+ /* Peer hash table */
+ sess->ch_table = pj_hash_create(pool, PJ_TURN_CHANNEL_HTABLE_SIZE);
+
+ /* Permission hash table */
+ sess->perm_table = pj_hash_create(pool, PJ_TURN_PERM_HTABLE_SIZE);
+
+ /* Session lock */
+ status = pj_lock_create_recursive_mutex(pool, sess->obj_name,
+ &sess->lock);
+ if (status != PJ_SUCCESS) {
+ do_destroy(sess);
+ return status;
+ }
+
+ /* Timer */
+ pj_timer_entry_init(&sess->timer, TIMER_NONE, sess, &on_timer_event);
+
+ /* Create STUN session */
+ pj_bzero(&stun_cb, sizeof(stun_cb));
+ stun_cb.on_send_msg = &stun_on_send_msg;
+ stun_cb.on_request_complete = &stun_on_request_complete;
+ stun_cb.on_rx_indication = &stun_on_rx_indication;
+ status = pj_stun_session_create(&sess->stun_cfg, sess->obj_name, &stun_cb,
+ PJ_FALSE, &sess->stun);
+ if (status != PJ_SUCCESS) {
+ do_destroy(sess);
+ return status;
+ }
+
+ /* Attach ourself to STUN session */
+ pj_stun_session_set_user_data(sess->stun, sess);
+
+ /* Replace mutex in STUN session with a NULL mutex, since access to
+ * STUN session is serialized.
+ */
+ status = pj_lock_create_null_mutex(pool, name, &null_lock);
+ if (status != PJ_SUCCESS) {
+ do_destroy(sess);
+ return status;
+ }
+ pj_stun_session_set_lock(sess->stun, null_lock, PJ_TRUE);
+
+ /* Done */
+
+ PJ_LOG(4,(sess->obj_name, "TURN client session created"));
+
+ *p_sess = sess;
+ return PJ_SUCCESS;
+}
+
+
+/* Destroy */
+static void do_destroy(pj_turn_session *sess)
+{
+ /* Lock session */
+ if (sess->lock) {
+ pj_lock_acquire(sess->lock);
+ }
+
+ /* Cancel pending timer, if any */
+ if (sess->timer.id != TIMER_NONE) {
+ pj_timer_heap_cancel(sess->timer_heap, &sess->timer);
+ sess->timer.id = TIMER_NONE;
+ }
+
+ /* Destroy STUN session */
+ if (sess->stun) {
+ pj_stun_session_destroy(sess->stun);
+ sess->stun = NULL;
+ }
+
+ /* Destroy lock */
+ if (sess->lock) {
+ pj_lock_release(sess->lock);
+ pj_lock_destroy(sess->lock);
+ sess->lock = NULL;
+ }
+
+ /* Destroy pool */
+ if (sess->pool) {
+ pj_pool_t *pool = sess->pool;
+
+ PJ_LOG(4,(sess->obj_name, "TURN client session destroyed"));
+
+ sess->pool = NULL;
+ pj_pool_release(pool);
+ }
+}
+
+
+/* Set session state */
+static void set_state(pj_turn_session *sess, enum pj_turn_state_t state)
+{
+ pj_turn_state_t old_state = sess->state;
+
+ if (state==sess->state)
+ return;
+
+ PJ_LOG(4,(sess->obj_name, "State changed %s --> %s",
+ state_names[old_state], state_names[state]));
+ sess->state = state;
+
+ if (sess->cb.on_state) {
+ (*sess->cb.on_state)(sess, old_state, state);
+ }
+}
+
+/*
+ * Notify application and shutdown the TURN session.
+ */
+static void sess_shutdown(pj_turn_session *sess,
+ pj_status_t status)
+{
+ pj_bool_t can_destroy = PJ_TRUE;
+
+ PJ_LOG(4,(sess->obj_name, "Request to shutdown in state %s, cause:%d",
+ state_names[sess->state], status));
+
+ if (sess->last_status == PJ_SUCCESS && status != PJ_SUCCESS)
+ sess->last_status = status;
+
+ switch (sess->state) {
+ case PJ_TURN_STATE_NULL:
+ break;
+ case PJ_TURN_STATE_RESOLVING:
+ if (sess->dns_async != NULL) {
+ pj_dns_srv_cancel_query(sess->dns_async, PJ_FALSE);
+ sess->dns_async = NULL;
+ }
+ break;
+ case PJ_TURN_STATE_RESOLVED:
+ break;
+ case PJ_TURN_STATE_ALLOCATING:
+ /* We need to wait until allocation complete */
+ sess->pending_destroy = PJ_TRUE;
+ can_destroy = PJ_FALSE;
+ break;
+ case PJ_TURN_STATE_READY:
+ /* Send REFRESH with LIFETIME=0 */
+ can_destroy = PJ_FALSE;
+ send_refresh(sess, 0);
+ break;
+ case PJ_TURN_STATE_DEALLOCATING:
+ can_destroy = PJ_FALSE;
+ /* This may recursively call this function again with
+ * state==PJ_TURN_STATE_DEALLOCATED.
+ */
+ send_refresh(sess, 0);
+ break;
+ case PJ_TURN_STATE_DEALLOCATED:
+ case PJ_TURN_STATE_DESTROYING:
+ break;
+ }
+
+ if (can_destroy) {
+ /* Schedule destroy */
+ pj_time_val delay = {0, 0};
+
+ set_state(sess, PJ_TURN_STATE_DESTROYING);
+
+ if (sess->timer.id != TIMER_NONE) {
+ pj_timer_heap_cancel(sess->timer_heap, &sess->timer);
+ sess->timer.id = TIMER_NONE;
+ }
+
+ sess->timer.id = TIMER_DESTROY;
+ pj_timer_heap_schedule(sess->timer_heap, &sess->timer, &delay);
+ }
+}
+
+
+/*
+ * Public API to destroy TURN client session.
+ */
+PJ_DEF(pj_status_t) pj_turn_session_shutdown(pj_turn_session *sess)
+{
+ PJ_ASSERT_RETURN(sess, PJ_EINVAL);
+
+ pj_lock_acquire(sess->lock);
+
+ sess_shutdown(sess, PJ_SUCCESS);
+
+ pj_lock_release(sess->lock);
+
+ return PJ_SUCCESS;
+}
+
+
+/**
+ * Forcefully destroy the TURN session.
+ */
+PJ_DEF(pj_status_t) pj_turn_session_destroy( pj_turn_session *sess,
+ pj_status_t last_err)
+{
+ PJ_ASSERT_RETURN(sess, PJ_EINVAL);
+
+ if (last_err != PJ_SUCCESS && sess->last_status == PJ_SUCCESS)
+ sess->last_status = last_err;
+ set_state(sess, PJ_TURN_STATE_DEALLOCATED);
+ sess_shutdown(sess, PJ_SUCCESS);
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Get TURN session info.
+ */
+PJ_DEF(pj_status_t) pj_turn_session_get_info( pj_turn_session *sess,
+ pj_turn_session_info *info)
+{
+ pj_time_val now;
+
+ PJ_ASSERT_RETURN(sess && info, PJ_EINVAL);
+
+ pj_gettimeofday(&now);
+
+ info->state = sess->state;
+ info->conn_type = sess->conn_type;
+ info->lifetime = sess->expiry.sec - now.sec;
+ info->last_status = sess->last_status;
+
+ if (sess->srv_addr)
+ pj_memcpy(&info->server, sess->srv_addr, sizeof(info->server));
+ else
+ pj_bzero(&info->server, sizeof(info->server));
+
+ pj_memcpy(&info->mapped_addr, &sess->mapped_addr,
+ sizeof(sess->mapped_addr));
+ pj_memcpy(&info->relay_addr, &sess->relay_addr,
+ sizeof(sess->relay_addr));
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Re-assign user data.
+ */
+PJ_DEF(pj_status_t) pj_turn_session_set_user_data( pj_turn_session *sess,
+ void *user_data)
+{
+ sess->user_data = user_data;
+ return PJ_SUCCESS;
+}
+
+
+/**
+ * Retrieve user data.
+ */
+PJ_DEF(void*) pj_turn_session_get_user_data(pj_turn_session *sess)
+{
+ return sess->user_data;
+}
+
+
+/*
+ * Configure message logging. By default all flags are enabled.
+ *
+ * @param sess The TURN client session.
+ * @param flags Bitmask combination of #pj_stun_sess_msg_log_flag
+ */
+PJ_DEF(void) pj_turn_session_set_log( pj_turn_session *sess,
+ unsigned flags)
+{
+ pj_stun_session_set_log(sess->stun, flags);
+}
+
+
+/*
+ * Set software name
+ */
+PJ_DEF(pj_status_t) pj_turn_session_set_software_name( pj_turn_session *sess,
+ const pj_str_t *sw)
+{
+ pj_status_t status;
+
+ pj_lock_acquire(sess->lock);
+ status = pj_stun_session_set_software_name(sess->stun, sw);
+ pj_lock_release(sess->lock);
+
+ return status;
+}
+
+
+/**
+ * Set the server or domain name of the server.
+ */
+PJ_DEF(pj_status_t) pj_turn_session_set_server( pj_turn_session *sess,
+ const pj_str_t *domain,
+ int default_port,
+ pj_dns_resolver *resolver)
+{
+ pj_sockaddr tmp_addr;
+ pj_bool_t is_ip_addr;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(sess && domain, PJ_EINVAL);
+ PJ_ASSERT_RETURN(sess->state == PJ_TURN_STATE_NULL, PJ_EINVALIDOP);
+
+ pj_lock_acquire(sess->lock);
+
+ /* See if "domain" contains just IP address */
+ tmp_addr.addr.sa_family = sess->af;
+ status = pj_inet_pton(sess->af, domain,
+ pj_sockaddr_get_addr(&tmp_addr));
+ is_ip_addr = (status == PJ_SUCCESS);
+
+ if (!is_ip_addr && resolver) {
+ /* Resolve with DNS SRV resolution, and fallback to DNS A resolution
+ * if default_port is specified.
+ */
+ unsigned opt = 0;
+ pj_str_t res_name;
+
+ switch (sess->conn_type) {
+ case PJ_TURN_TP_UDP:
+ res_name = pj_str("_turn._udp.");
+ break;
+ case PJ_TURN_TP_TCP:
+ res_name = pj_str("_turn._tcp.");
+ break;
+ case PJ_TURN_TP_TLS:
+ res_name = pj_str("_turns._tcp.");
+ break;
+ default:
+ status = PJNATH_ETURNINTP;
+ goto on_return;
+ }
+
+ /* Fallback to DNS A only if default port is specified */
+ if (default_port>0 && default_port<65536) {
+ opt = PJ_DNS_SRV_FALLBACK_A;
+ sess->default_port = (pj_uint16_t)default_port;
+ }
+
+ PJ_LOG(5,(sess->obj_name, "Resolving %.*s%.*s with DNS SRV",
+ (int)res_name.slen, res_name.ptr,
+ (int)domain->slen, domain->ptr));
+ set_state(sess, PJ_TURN_STATE_RESOLVING);
+
+ /* User may have destroyed us in the callback */
+ if (sess->state != PJ_TURN_STATE_RESOLVING) {
+ status = PJ_ECANCELLED;
+ goto on_return;
+ }
+
+ status = pj_dns_srv_resolve(domain, &res_name, default_port,
+ sess->pool, resolver, opt, sess,
+ &dns_srv_resolver_cb, &sess->dns_async);
+ if (status != PJ_SUCCESS) {
+ set_state(sess, PJ_TURN_STATE_NULL);
+ goto on_return;
+ }
+
+ } else {
+ /* Resolver is not specified, resolve with standard gethostbyname().
+ * The default_port MUST be specified in this case.
+ */
+ pj_addrinfo *ai;
+ unsigned i, cnt;
+
+ /* Default port must be specified */
+ PJ_ASSERT_RETURN(default_port>0 && default_port<65536, PJ_EINVAL);
+ sess->default_port = (pj_uint16_t)default_port;
+
+ cnt = PJ_TURN_MAX_DNS_SRV_CNT;
+ ai = (pj_addrinfo*)
+ pj_pool_calloc(sess->pool, cnt, sizeof(pj_addrinfo));
+
+ PJ_LOG(5,(sess->obj_name, "Resolving %.*s with DNS A",
+ (int)domain->slen, domain->ptr));
+ set_state(sess, PJ_TURN_STATE_RESOLVING);
+
+ /* User may have destroyed us in the callback */
+ if (sess->state != PJ_TURN_STATE_RESOLVING) {
+ status = PJ_ECANCELLED;
+ goto on_return;
+ }
+
+ status = pj_getaddrinfo(sess->af, domain, &cnt, ai);
+ if (status != PJ_SUCCESS)
+ goto on_return;
+
+ sess->srv_addr_cnt = (pj_uint16_t)cnt;
+ sess->srv_addr_list = (pj_sockaddr*)
+ pj_pool_calloc(sess->pool, cnt,
+ sizeof(pj_sockaddr));
+ for (i=0; i<cnt; ++i) {
+ pj_sockaddr *addr = &sess->srv_addr_list[i];
+ pj_memcpy(addr, &ai[i].ai_addr, sizeof(pj_sockaddr));
+ addr->addr.sa_family = sess->af;
+ addr->ipv4.sin_port = pj_htons(sess->default_port);
+ }
+
+ sess->srv_addr = &sess->srv_addr_list[0];
+ set_state(sess, PJ_TURN_STATE_RESOLVED);
+ }
+
+on_return:
+ pj_lock_release(sess->lock);
+ return status;
+}
+
+
+/**
+ * Set credential to be used by the session.
+ */
+PJ_DEF(pj_status_t) pj_turn_session_set_credential(pj_turn_session *sess,
+ const pj_stun_auth_cred *cred)
+{
+ PJ_ASSERT_RETURN(sess && cred, PJ_EINVAL);
+ PJ_ASSERT_RETURN(sess->stun, PJ_EINVALIDOP);
+
+ pj_lock_acquire(sess->lock);
+
+ pj_stun_session_set_credential(sess->stun, PJ_STUN_AUTH_LONG_TERM, cred);
+
+ pj_lock_release(sess->lock);
+
+ return PJ_SUCCESS;
+}
+
+
+/**
+ * Create TURN allocation.
+ */
+PJ_DEF(pj_status_t) pj_turn_session_alloc(pj_turn_session *sess,
+ const pj_turn_alloc_param *param)
+{
+ pj_stun_tx_data *tdata;
+ pj_bool_t retransmit;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(sess, PJ_EINVAL);
+ PJ_ASSERT_RETURN(sess->state>PJ_TURN_STATE_NULL &&
+ sess->state<=PJ_TURN_STATE_RESOLVED,
+ PJ_EINVALIDOP);
+
+ pj_lock_acquire(sess->lock);
+
+ if (param && param != &sess->alloc_param)
+ pj_turn_alloc_param_copy(sess->pool, &sess->alloc_param, param);
+
+ if (sess->state < PJ_TURN_STATE_RESOLVED) {
+ sess->pending_alloc = PJ_TRUE;
+
+ PJ_LOG(4,(sess->obj_name, "Pending ALLOCATE in state %s",
+ state_names[sess->state]));
+
+ pj_lock_release(sess->lock);
+ return PJ_SUCCESS;
+
+ }
+
+ /* Ready to allocate */
+ pj_assert(sess->state == PJ_TURN_STATE_RESOLVED);
+
+ /* Create a bare request */
+ status = pj_stun_session_create_req(sess->stun, PJ_STUN_ALLOCATE_REQUEST,
+ PJ_STUN_MAGIC, NULL, &tdata);
+ if (status != PJ_SUCCESS) {
+ pj_lock_release(sess->lock);
+ return status;
+ }
+
+ /* MUST include REQUESTED-TRANSPORT attribute */
+ pj_stun_msg_add_uint_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_REQ_TRANSPORT,
+ PJ_STUN_SET_RT_PROTO(PJ_TURN_TP_UDP));
+
+ /* Include BANDWIDTH if requested */
+ if (sess->alloc_param.bandwidth > 0) {
+ pj_stun_msg_add_uint_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_BANDWIDTH,
+ sess->alloc_param.bandwidth);
+ }
+
+ /* Include LIFETIME if requested */
+ if (sess->alloc_param.lifetime > 0) {
+ pj_stun_msg_add_uint_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_LIFETIME,
+ sess->alloc_param.lifetime);
+ }
+
+ /* Server address must be set */
+ pj_assert(sess->srv_addr != NULL);
+
+ /* Send request */
+ set_state(sess, PJ_TURN_STATE_ALLOCATING);
+ retransmit = (sess->conn_type == PJ_TURN_TP_UDP);
+ status = pj_stun_session_send_msg(sess->stun, NULL, PJ_FALSE,
+ retransmit, sess->srv_addr,
+ pj_sockaddr_get_len(sess->srv_addr),
+ tdata);
+ if (status != PJ_SUCCESS) {
+ /* Set state back to RESOLVED. We don't want to destroy session now,
+ * let the application do it if it wants to.
+ */
+ set_state(sess, PJ_TURN_STATE_RESOLVED);
+ }
+
+ pj_lock_release(sess->lock);
+ return status;
+}
+
+
+/*
+ * Install or renew permissions
+ */
+PJ_DEF(pj_status_t) pj_turn_session_set_perm( pj_turn_session *sess,
+ unsigned addr_cnt,
+ const pj_sockaddr addr[],
+ unsigned options)
+{
+ pj_stun_tx_data *tdata;
+ pj_hash_iterator_t it_buf, *it;
+ void *req_token;
+ unsigned i, attr_added=0;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(sess && addr_cnt && addr, PJ_EINVAL);
+
+ pj_lock_acquire(sess->lock);
+
+ /* Create a bare CreatePermission request */
+ status = pj_stun_session_create_req(sess->stun,
+ PJ_STUN_CREATE_PERM_REQUEST,
+ PJ_STUN_MAGIC, NULL, &tdata);
+ if (status != PJ_SUCCESS) {
+ pj_lock_release(sess->lock);
+ return status;
+ }
+
+ /* Create request token to map the request to the perm structures
+ * which the request belongs.
+ */
+ req_token = (void*)(long)pj_rand();
+
+ /* Process the addresses */
+ for (i=0; i<addr_cnt; ++i) {
+ struct perm_t *perm;
+
+ /* Lookup the perm structure and create if it doesn't exist */
+ perm = lookup_perm(sess, &addr[i], pj_sockaddr_get_len(&addr[i]),
+ PJ_TRUE);
+ perm->renew = (options & 0x01);
+
+ /* Only add to the request if the request doesn't contain this
+ * address yet.
+ */
+ if (perm->req_token != req_token) {
+ perm->req_token = req_token;
+
+ /* Add XOR-PEER-ADDRESS */
+ status = pj_stun_msg_add_sockaddr_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_XOR_PEER_ADDR,
+ PJ_TRUE,
+ &addr[i],
+ sizeof(addr[i]));
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ ++attr_added;
+ }
+ }
+
+ pj_assert(attr_added != 0);
+
+ /* Send the request */
+ status = pj_stun_session_send_msg(sess->stun, req_token, PJ_FALSE,
+ (sess->conn_type==PJ_TURN_TP_UDP),
+ sess->srv_addr,
+ pj_sockaddr_get_len(sess->srv_addr),
+ tdata);
+ if (status != PJ_SUCCESS) {
+ /* tdata is already destroyed */
+ tdata = NULL;
+ goto on_error;
+ }
+
+ pj_lock_release(sess->lock);
+ return PJ_SUCCESS;
+
+on_error:
+ /* destroy tdata */
+ if (tdata) {
+ pj_stun_msg_destroy_tdata(sess->stun, tdata);
+ }
+ /* invalidate perm structures associated with this request */
+ it = pj_hash_first(sess->perm_table, &it_buf);
+ while (it) {
+ struct perm_t *perm = (struct perm_t*)
+ pj_hash_this(sess->perm_table, it);
+ it = pj_hash_next(sess->perm_table, it);
+ if (perm->req_token == req_token)
+ invalidate_perm(sess, perm);
+ }
+ pj_lock_release(sess->lock);
+ return status;
+}
+
+/*
+ * Send REFRESH
+ */
+static void send_refresh(pj_turn_session *sess, int lifetime)
+{
+ pj_stun_tx_data *tdata;
+ pj_status_t status;
+
+ PJ_ASSERT_ON_FAIL(sess->state==PJ_TURN_STATE_READY, return);
+
+ /* Create a bare REFRESH request */
+ status = pj_stun_session_create_req(sess->stun, PJ_STUN_REFRESH_REQUEST,
+ PJ_STUN_MAGIC, NULL, &tdata);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Add LIFETIME */
+ if (lifetime >= 0) {
+ pj_stun_msg_add_uint_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_LIFETIME, lifetime);
+ }
+
+ /* Send request */
+ if (lifetime == 0) {
+ set_state(sess, PJ_TURN_STATE_DEALLOCATING);
+ }
+
+ status = pj_stun_session_send_msg(sess->stun, NULL, PJ_FALSE,
+ (sess->conn_type==PJ_TURN_TP_UDP),
+ sess->srv_addr,
+ pj_sockaddr_get_len(sess->srv_addr),
+ tdata);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ return;
+
+on_error:
+ if (lifetime == 0) {
+ set_state(sess, PJ_TURN_STATE_DEALLOCATED);
+ sess_shutdown(sess, status);
+ }
+}
+
+
+/**
+ * Relay data to the specified peer through the session.
+ */
+PJ_DEF(pj_status_t) pj_turn_session_sendto( pj_turn_session *sess,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *addr,
+ unsigned addr_len)
+{
+ struct ch_t *ch;
+ struct perm_t *perm;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(sess && pkt && pkt_len && addr && addr_len,
+ PJ_EINVAL);
+
+ /* Return error if we're not ready */
+ if (sess->state != PJ_TURN_STATE_READY) {
+ return PJ_EIGNORED;
+ }
+
+ /* Lock session now */
+ pj_lock_acquire(sess->lock);
+
+ /* Lookup permission first */
+ perm = lookup_perm(sess, addr, pj_sockaddr_get_len(addr), PJ_FALSE);
+ if (perm == NULL) {
+ /* Permission doesn't exist, install it first */
+ char ipstr[PJ_INET6_ADDRSTRLEN+2];
+
+ PJ_LOG(4,(sess->obj_name,
+ "sendto(): IP %s has no permission, requesting it first..",
+ pj_sockaddr_print(addr, ipstr, sizeof(ipstr), 2)));
+
+ status = pj_turn_session_set_perm(sess, 1, (const pj_sockaddr*)addr,
+ 0);
+ if (status != PJ_SUCCESS) {
+ pj_lock_release(sess->lock);
+ return status;
+ }
+ }
+
+ /* See if the peer is bound to a channel number */
+ ch = lookup_ch_by_addr(sess, addr, pj_sockaddr_get_len(addr),
+ PJ_FALSE, PJ_FALSE);
+ if (ch && ch->num != PJ_TURN_INVALID_CHANNEL && ch->bound) {
+ unsigned total_len;
+
+ /* Peer is assigned a channel number, we can use ChannelData */
+ pj_turn_channel_data *cd = (pj_turn_channel_data*)sess->tx_pkt;
+
+ pj_assert(sizeof(*cd)==4);
+
+ /* Calculate total length, including paddings */
+ total_len = (pkt_len + sizeof(*cd) + 3) & (~3);
+ if (total_len > sizeof(sess->tx_pkt)) {
+ status = PJ_ETOOBIG;
+ goto on_return;
+ }
+
+ cd->ch_number = pj_htons((pj_uint16_t)ch->num);
+ cd->length = pj_htons((pj_uint16_t)pkt_len);
+ pj_memcpy(cd+1, pkt, pkt_len);
+
+ pj_assert(sess->srv_addr != NULL);
+
+ status = sess->cb.on_send_pkt(sess, sess->tx_pkt, total_len,
+ sess->srv_addr,
+ pj_sockaddr_get_len(sess->srv_addr));
+
+ } else {
+ /* Use Send Indication. */
+ pj_stun_sockaddr_attr peer_attr;
+ pj_stun_binary_attr data_attr;
+ pj_stun_msg send_ind;
+ pj_size_t send_ind_len;
+
+ /* Increment counter */
+ ++sess->send_ind_tsx_id[2];
+
+ /* Create blank SEND-INDICATION */
+ status = pj_stun_msg_init(&send_ind, PJ_STUN_SEND_INDICATION,
+ PJ_STUN_MAGIC,
+ (const pj_uint8_t*)sess->send_ind_tsx_id);
+ if (status != PJ_SUCCESS)
+ goto on_return;
+
+ /* Add XOR-PEER-ADDRESS */
+ pj_stun_sockaddr_attr_init(&peer_attr, PJ_STUN_ATTR_XOR_PEER_ADDR,
+ PJ_TRUE, addr, addr_len);
+ pj_stun_msg_add_attr(&send_ind, (pj_stun_attr_hdr*)&peer_attr);
+
+ /* Add DATA attribute */
+ pj_stun_binary_attr_init(&data_attr, NULL, PJ_STUN_ATTR_DATA, NULL, 0);
+ data_attr.data = (pj_uint8_t*)pkt;
+ data_attr.length = pkt_len;
+ pj_stun_msg_add_attr(&send_ind, (pj_stun_attr_hdr*)&data_attr);
+
+ /* Encode the message */
+ status = pj_stun_msg_encode(&send_ind, sess->tx_pkt,
+ sizeof(sess->tx_pkt), 0,
+ NULL, &send_ind_len);
+ if (status != PJ_SUCCESS)
+ goto on_return;
+
+ /* Send the Send Indication */
+ status = sess->cb.on_send_pkt(sess, sess->tx_pkt, send_ind_len,
+ sess->srv_addr,
+ pj_sockaddr_get_len(sess->srv_addr));
+ }
+
+on_return:
+ pj_lock_release(sess->lock);
+ return status;
+}
+
+
+/**
+ * Bind a peer address to a channel number.
+ */
+PJ_DEF(pj_status_t) pj_turn_session_bind_channel(pj_turn_session *sess,
+ const pj_sockaddr_t *peer_adr,
+ unsigned addr_len)
+{
+ struct ch_t *ch;
+ pj_stun_tx_data *tdata;
+ pj_uint16_t ch_num;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(sess && peer_adr && addr_len, PJ_EINVAL);
+ PJ_ASSERT_RETURN(sess->state == PJ_TURN_STATE_READY, PJ_EINVALIDOP);
+
+ pj_lock_acquire(sess->lock);
+
+ /* Create blank ChannelBind request */
+ status = pj_stun_session_create_req(sess->stun,
+ PJ_STUN_CHANNEL_BIND_REQUEST,
+ PJ_STUN_MAGIC, NULL, &tdata);
+ if (status != PJ_SUCCESS)
+ goto on_return;
+
+ /* Lookup if this peer has already been assigned a number */
+ ch = lookup_ch_by_addr(sess, peer_adr, pj_sockaddr_get_len(peer_adr),
+ PJ_TRUE, PJ_FALSE);
+ pj_assert(ch);
+
+ if (ch->num != PJ_TURN_INVALID_CHANNEL) {
+ /* Channel is already bound. This is a refresh request. */
+ ch_num = ch->num;
+ } else {
+ PJ_ASSERT_ON_FAIL(sess->next_ch <= PJ_TURN_CHANNEL_MAX,
+ {status=PJ_ETOOMANY; goto on_return;});
+ ch->num = ch_num = sess->next_ch++;
+ }
+
+ /* Add CHANNEL-NUMBER attribute */
+ pj_stun_msg_add_uint_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_CHANNEL_NUMBER,
+ PJ_STUN_SET_CH_NB(ch_num));
+
+ /* Add XOR-PEER-ADDRESS attribute */
+ pj_stun_msg_add_sockaddr_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_XOR_PEER_ADDR, PJ_TRUE,
+ peer_adr, addr_len);
+
+ /* Send the request, associate peer data structure with tdata
+ * for future reference when we receive the ChannelBind response.
+ */
+ status = pj_stun_session_send_msg(sess->stun, ch, PJ_FALSE,
+ (sess->conn_type==PJ_TURN_TP_UDP),
+ sess->srv_addr,
+ pj_sockaddr_get_len(sess->srv_addr),
+ tdata);
+
+on_return:
+ pj_lock_release(sess->lock);
+ return status;
+}
+
+
+/**
+ * Notify TURN client session upon receiving a packet from server.
+ * The packet maybe a STUN packet or ChannelData packet.
+ */
+PJ_DEF(pj_status_t) pj_turn_session_on_rx_pkt(pj_turn_session *sess,
+ void *pkt,
+ pj_size_t pkt_len,
+ pj_size_t *parsed_len)
+{
+ pj_bool_t is_stun;
+ pj_status_t status;
+ pj_bool_t is_datagram;
+
+ /* Packet could be ChannelData or STUN message (response or
+ * indication).
+ */
+
+ /* Start locking the session */
+ pj_lock_acquire(sess->lock);
+
+ is_datagram = (sess->conn_type==PJ_TURN_TP_UDP);
+
+ /* Quickly check if this is STUN message */
+ is_stun = ((((pj_uint8_t*)pkt)[0] & 0xC0) == 0);
+
+ if (is_stun) {
+ /* This looks like STUN, give it to the STUN session */
+ unsigned options;
+
+ options = PJ_STUN_CHECK_PACKET | PJ_STUN_NO_FINGERPRINT_CHECK;
+ if (is_datagram)
+ options |= PJ_STUN_IS_DATAGRAM;
+ status=pj_stun_session_on_rx_pkt(sess->stun, pkt, pkt_len,
+ options, NULL, parsed_len,
+ sess->srv_addr,
+ pj_sockaddr_get_len(sess->srv_addr));
+
+ } else {
+ /* This must be ChannelData. */
+ pj_turn_channel_data cd;
+ struct ch_t *ch;
+
+ if (pkt_len < 4) {
+ if (parsed_len) *parsed_len = 0;
+ return PJ_ETOOSMALL;
+ }
+
+ /* Decode ChannelData packet */
+ pj_memcpy(&cd, pkt, sizeof(pj_turn_channel_data));
+ cd.ch_number = pj_ntohs(cd.ch_number);
+ cd.length = pj_ntohs(cd.length);
+
+ /* Check that size is sane */
+ if (pkt_len < cd.length+sizeof(cd)) {
+ if (parsed_len) {
+ if (is_datagram) {
+ /* Discard the datagram */
+ *parsed_len = pkt_len;
+ } else {
+ /* Insufficient fragment */
+ *parsed_len = 0;
+ }
+ }
+ status = PJ_ETOOSMALL;
+ goto on_return;
+ } else {
+ if (parsed_len) {
+ /* Apply padding too */
+ *parsed_len = ((cd.length + 3) & (~3)) + sizeof(cd);
+ }
+ }
+
+ /* Lookup channel */
+ ch = lookup_ch_by_chnum(sess, cd.ch_number);
+ if (!ch || !ch->bound) {
+ status = PJ_ENOTFOUND;
+ goto on_return;
+ }
+
+ /* Notify application */
+ if (sess->cb.on_rx_data) {
+ (*sess->cb.on_rx_data)(sess, ((pj_uint8_t*)pkt)+sizeof(cd),
+ cd.length, &ch->addr,
+ pj_sockaddr_get_len(&ch->addr));
+ }
+
+ status = PJ_SUCCESS;
+ }
+
+on_return:
+ pj_lock_release(sess->lock);
+ return status;
+}
+
+
+/*
+ * This is a callback from STUN session to send outgoing packet.
+ */
+static pj_status_t stun_on_send_msg(pj_stun_session *stun,
+ void *token,
+ const void *pkt,
+ pj_size_t pkt_size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len)
+{
+ pj_turn_session *sess;
+
+ PJ_UNUSED_ARG(token);
+
+ sess = (pj_turn_session*) pj_stun_session_get_user_data(stun);
+ return (*sess->cb.on_send_pkt)(sess, (const pj_uint8_t*)pkt, pkt_size,
+ dst_addr, addr_len);
+}
+
+
+/*
+ * Handle failed ALLOCATE or REFRESH request. This may switch to alternate
+ * server if we have one.
+ */
+static void on_session_fail( pj_turn_session *sess,
+ enum pj_stun_method_e method,
+ pj_status_t status,
+ const pj_str_t *reason)
+{
+ sess->last_status = status;
+
+ do {
+ pj_str_t reason1;
+ char err_msg[PJ_ERR_MSG_SIZE];
+
+ if (reason == NULL) {
+ pj_strerror(status, err_msg, sizeof(err_msg));
+ reason1 = pj_str(err_msg);
+ reason = &reason1;
+ }
+
+ PJ_LOG(4,(sess->obj_name, "%s error: %.*s",
+ pj_stun_get_method_name(method),
+ (int)reason->slen, reason->ptr));
+
+ /* If this is ALLOCATE response and we don't have more server
+ * addresses to try, notify application and destroy the TURN
+ * session.
+ */
+ if (method==PJ_STUN_ALLOCATE_METHOD &&
+ sess->srv_addr == &sess->srv_addr_list[sess->srv_addr_cnt-1])
+ {
+
+ set_state(sess, PJ_TURN_STATE_DEALLOCATED);
+ sess_shutdown(sess, status);
+ return;
+ }
+
+ /* Otherwise if this is not ALLOCATE response, notify application
+ * that session has been TERMINATED.
+ */
+ if (method!=PJ_STUN_ALLOCATE_METHOD) {
+ set_state(sess, PJ_TURN_STATE_DEALLOCATED);
+ sess_shutdown(sess, status);
+ return;
+ }
+
+ /* Try next server */
+ ++sess->srv_addr;
+ reason = NULL;
+
+ PJ_LOG(4,(sess->obj_name, "Trying next server"));
+ set_state(sess, PJ_TURN_STATE_RESOLVED);
+
+ } while (0);
+}
+
+
+/*
+ * Handle successful response to ALLOCATE or REFRESH request.
+ */
+static void on_allocate_success(pj_turn_session *sess,
+ enum pj_stun_method_e method,
+ const pj_stun_msg *msg)
+{
+ const pj_stun_lifetime_attr *lf_attr;
+ const pj_stun_xor_relayed_addr_attr *raddr_attr;
+ const pj_stun_sockaddr_attr *mapped_attr;
+ pj_str_t s;
+ pj_time_val timeout;
+
+ /* Must have LIFETIME attribute */
+ lf_attr = (const pj_stun_lifetime_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_LIFETIME, 0);
+ if (lf_attr == NULL) {
+ on_session_fail(sess, method, PJNATH_EINSTUNMSG,
+ pj_cstr(&s, "Error: Missing LIFETIME attribute"));
+ return;
+ }
+
+ /* If LIFETIME is zero, this is a deallocation */
+ if (lf_attr->value == 0) {
+ set_state(sess, PJ_TURN_STATE_DEALLOCATED);
+ sess_shutdown(sess, PJ_SUCCESS);
+ return;
+ }
+
+ /* Update lifetime and keep-alive interval */
+ sess->lifetime = lf_attr->value;
+ pj_gettimeofday(&sess->expiry);
+
+ if (sess->lifetime < PJ_TURN_KEEP_ALIVE_SEC) {
+ if (sess->lifetime <= 2) {
+ on_session_fail(sess, method, PJ_ETOOSMALL,
+ pj_cstr(&s, "Error: LIFETIME too small"));
+ return;
+ }
+ sess->ka_interval = sess->lifetime - 2;
+ sess->expiry.sec += (sess->ka_interval-1);
+ } else {
+ int timeout;
+
+ sess->ka_interval = PJ_TURN_KEEP_ALIVE_SEC;
+
+ timeout = sess->lifetime - PJ_TURN_REFRESH_SEC_BEFORE;
+ if (timeout < sess->ka_interval)
+ timeout = sess->ka_interval - 1;
+
+ sess->expiry.sec += timeout;
+ }
+
+ /* Check that relayed transport address contains correct
+ * address family.
+ */
+ raddr_attr = (const pj_stun_xor_relayed_addr_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_XOR_RELAYED_ADDR, 0);
+ if (raddr_attr == NULL && method==PJ_STUN_ALLOCATE_METHOD) {
+ on_session_fail(sess, method, PJNATH_EINSTUNMSG,
+ pj_cstr(&s, "Error: Received ALLOCATE without "
+ "RELAY-ADDRESS attribute"));
+ return;
+ }
+ if (raddr_attr && raddr_attr->sockaddr.addr.sa_family != sess->af) {
+ on_session_fail(sess, method, PJNATH_EINSTUNMSG,
+ pj_cstr(&s, "Error: RELAY-ADDRESS with non IPv4"
+ " address family is not supported "
+ "for now"));
+ return;
+ }
+ if (raddr_attr && !pj_sockaddr_has_addr(&raddr_attr->sockaddr)) {
+ on_session_fail(sess, method, PJNATH_EINSTUNMSG,
+ pj_cstr(&s, "Error: Invalid IP address in "
+ "RELAY-ADDRESS attribute"));
+ return;
+ }
+
+ /* Save relayed address */
+ if (raddr_attr) {
+ /* If we already have relay address, check if the relay address
+ * in the response matches our relay address.
+ */
+ if (pj_sockaddr_has_addr(&sess->relay_addr)) {
+ if (pj_sockaddr_cmp(&sess->relay_addr, &raddr_attr->sockaddr)) {
+ on_session_fail(sess, method, PJNATH_EINSTUNMSG,
+ pj_cstr(&s, "Error: different RELAY-ADDRESS is"
+ "returned by server"));
+ return;
+ }
+ } else {
+ /* Otherwise save the relayed address */
+ pj_memcpy(&sess->relay_addr, &raddr_attr->sockaddr,
+ sizeof(pj_sockaddr));
+ }
+ }
+
+ /* Get mapped address */
+ mapped_attr = (const pj_stun_sockaddr_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_XOR_MAPPED_ADDR, 0);
+ if (mapped_attr) {
+ pj_memcpy(&sess->mapped_addr, &mapped_attr->sockaddr,
+ sizeof(mapped_attr->sockaddr));
+ }
+
+ /* Success */
+
+ /* Cancel existing keep-alive timer, if any */
+ pj_assert(sess->timer.id != TIMER_DESTROY);
+
+ if (sess->timer.id != TIMER_NONE) {
+ pj_timer_heap_cancel(sess->timer_heap, &sess->timer);
+ sess->timer.id = TIMER_NONE;
+ }
+
+ /* Start keep-alive timer once allocation succeeds */
+ timeout.sec = sess->ka_interval;
+ timeout.msec = 0;
+
+ sess->timer.id = TIMER_KEEP_ALIVE;
+ pj_timer_heap_schedule(sess->timer_heap, &sess->timer, &timeout);
+
+ set_state(sess, PJ_TURN_STATE_READY);
+}
+
+/*
+ * Notification from STUN session on request completion.
+ */
+static void stun_on_request_complete(pj_stun_session *stun,
+ pj_status_t status,
+ void *token,
+ pj_stun_tx_data *tdata,
+ const pj_stun_msg *response,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_turn_session *sess;
+ enum pj_stun_method_e method = (enum pj_stun_method_e)
+ PJ_STUN_GET_METHOD(tdata->msg->hdr.type);
+
+ PJ_UNUSED_ARG(src_addr);
+ PJ_UNUSED_ARG(src_addr_len);
+
+ sess = (pj_turn_session*)pj_stun_session_get_user_data(stun);
+
+ if (method == PJ_STUN_ALLOCATE_METHOD) {
+
+ /* Destroy if we have pending destroy request */
+ if (sess->pending_destroy) {
+ if (status == PJ_SUCCESS)
+ sess->state = PJ_TURN_STATE_READY;
+ else
+ sess->state = PJ_TURN_STATE_DEALLOCATED;
+ sess_shutdown(sess, PJ_SUCCESS);
+ return;
+ }
+
+ /* Handle ALLOCATE response */
+ if (status==PJ_SUCCESS &&
+ PJ_STUN_IS_SUCCESS_RESPONSE(response->hdr.type))
+ {
+
+ /* Successful Allocate response */
+ on_allocate_success(sess, method, response);
+
+ } else {
+ /* Failed Allocate request */
+ const pj_str_t *err_msg = NULL;
+
+ if (status == PJ_SUCCESS) {
+ const pj_stun_errcode_attr *err_attr;
+ err_attr = (const pj_stun_errcode_attr*)
+ pj_stun_msg_find_attr(response,
+ PJ_STUN_ATTR_ERROR_CODE, 0);
+ if (err_attr) {
+ status = PJ_STATUS_FROM_STUN_CODE(err_attr->err_code);
+ err_msg = &err_attr->reason;
+ } else {
+ status = PJNATH_EINSTUNMSG;
+ }
+ }
+
+ on_session_fail(sess, method, status, err_msg);
+ }
+
+ } else if (method == PJ_STUN_REFRESH_METHOD) {
+ /* Handle Refresh response */
+ if (status==PJ_SUCCESS &&
+ PJ_STUN_IS_SUCCESS_RESPONSE(response->hdr.type))
+ {
+ /* Success, schedule next refresh. */
+ on_allocate_success(sess, method, response);
+
+ } else {
+ /* Failed Refresh request */
+ const pj_str_t *err_msg = NULL;
+
+ pj_assert(status != PJ_SUCCESS);
+
+ if (response) {
+ const pj_stun_errcode_attr *err_attr;
+ err_attr = (const pj_stun_errcode_attr*)
+ pj_stun_msg_find_attr(response,
+ PJ_STUN_ATTR_ERROR_CODE, 0);
+ if (err_attr) {
+ status = PJ_STATUS_FROM_STUN_CODE(err_attr->err_code);
+ err_msg = &err_attr->reason;
+ }
+ }
+
+ /* Notify and destroy */
+ on_session_fail(sess, method, status, err_msg);
+ }
+
+ } else if (method == PJ_STUN_CHANNEL_BIND_METHOD) {
+ /* Handle ChannelBind response */
+ if (status==PJ_SUCCESS &&
+ PJ_STUN_IS_SUCCESS_RESPONSE(response->hdr.type))
+ {
+ /* Successful ChannelBind response */
+ struct ch_t *ch = (struct ch_t*)token;
+
+ pj_assert(ch->num != PJ_TURN_INVALID_CHANNEL);
+ ch->bound = PJ_TRUE;
+
+ /* Update hash table */
+ lookup_ch_by_addr(sess, &ch->addr,
+ pj_sockaddr_get_len(&ch->addr),
+ PJ_TRUE, PJ_TRUE);
+
+ } else {
+ /* Failed ChannelBind response */
+ pj_str_t reason = {"", 0};
+ int err_code = 0;
+ char errbuf[PJ_ERR_MSG_SIZE];
+
+ pj_assert(status != PJ_SUCCESS);
+
+ if (response) {
+ const pj_stun_errcode_attr *err_attr;
+ err_attr = (const pj_stun_errcode_attr*)
+ pj_stun_msg_find_attr(response,
+ PJ_STUN_ATTR_ERROR_CODE, 0);
+ if (err_attr) {
+ err_code = err_attr->err_code;
+ status = PJ_STATUS_FROM_STUN_CODE(err_attr->err_code);
+ reason = err_attr->reason;
+ }
+ } else {
+ err_code = status;
+ reason = pj_strerror(status, errbuf, sizeof(errbuf));
+ }
+
+ PJ_LOG(1,(sess->obj_name, "ChannelBind failed: %d/%.*s",
+ err_code, (int)reason.slen, reason.ptr));
+
+ if (err_code == PJ_STUN_SC_ALLOCATION_MISMATCH) {
+ /* Allocation mismatch means allocation no longer exists */
+ on_session_fail(sess, PJ_STUN_CHANNEL_BIND_METHOD,
+ status, &reason);
+ return;
+ }
+ }
+
+ } else if (method == PJ_STUN_CREATE_PERM_METHOD) {
+ /* Handle CreatePermission response */
+ if (status==PJ_SUCCESS &&
+ PJ_STUN_IS_SUCCESS_RESPONSE(response->hdr.type))
+ {
+ /* No special handling when the request is successful. */
+ } else {
+ /* Iterate the permission table and invalidate all permissions
+ * that are related to this request.
+ */
+ pj_hash_iterator_t it_buf, *it;
+ char ipstr[PJ_INET6_ADDRSTRLEN+10];
+ int err_code;
+ char errbuf[PJ_ERR_MSG_SIZE];
+ pj_str_t reason;
+
+ pj_assert(status != PJ_SUCCESS);
+
+ if (response) {
+ const pj_stun_errcode_attr *eattr;
+
+ eattr = (const pj_stun_errcode_attr*)
+ pj_stun_msg_find_attr(response,
+ PJ_STUN_ATTR_ERROR_CODE, 0);
+ if (eattr) {
+ err_code = eattr->err_code;
+ reason = eattr->reason;
+ } else {
+ err_code = -1;
+ reason = pj_str("?");
+ }
+ } else {
+ err_code = status;
+ reason = pj_strerror(status, errbuf, sizeof(errbuf));
+ }
+
+ it = pj_hash_first(sess->perm_table, &it_buf);
+ while (it) {
+ struct perm_t *perm = (struct perm_t*)
+ pj_hash_this(sess->perm_table, it);
+ it = pj_hash_next(sess->perm_table, it);
+
+ if (perm->req_token == token) {
+ PJ_LOG(1,(sess->obj_name,
+ "CreatePermission failed for IP %s: %d/%.*s",
+ pj_sockaddr_print(&perm->addr, ipstr,
+ sizeof(ipstr), 2),
+ err_code, (int)reason.slen, reason.ptr));
+
+ invalidate_perm(sess, perm);
+ }
+ }
+
+ if (err_code == PJ_STUN_SC_ALLOCATION_MISMATCH) {
+ /* Allocation mismatch means allocation no longer exists */
+ on_session_fail(sess, PJ_STUN_CREATE_PERM_METHOD,
+ status, &reason);
+ return;
+ }
+ }
+
+ } else {
+ PJ_LOG(4,(sess->obj_name, "Unexpected STUN %s response",
+ pj_stun_get_method_name(response->hdr.type)));
+ }
+}
+
+
+/*
+ * Notification from STUN session on incoming STUN Indication
+ * message.
+ */
+static pj_status_t stun_on_rx_indication(pj_stun_session *stun,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_stun_msg *msg,
+ void *token,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_turn_session *sess;
+ pj_stun_xor_peer_addr_attr *peer_attr;
+ pj_stun_icmp_attr *icmp;
+ pj_stun_data_attr *data_attr;
+
+ PJ_UNUSED_ARG(token);
+ PJ_UNUSED_ARG(pkt);
+ PJ_UNUSED_ARG(pkt_len);
+ PJ_UNUSED_ARG(src_addr);
+ PJ_UNUSED_ARG(src_addr_len);
+
+ sess = (pj_turn_session*)pj_stun_session_get_user_data(stun);
+
+ /* Expecting Data Indication only */
+ if (msg->hdr.type != PJ_STUN_DATA_INDICATION) {
+ PJ_LOG(4,(sess->obj_name, "Unexpected STUN %s indication",
+ pj_stun_get_method_name(msg->hdr.type)));
+ return PJ_EINVALIDOP;
+ }
+
+ /* Check if there is ICMP attribute in the message */
+ icmp = (pj_stun_icmp_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_ICMP, 0);
+ if (icmp != NULL) {
+ /* This is a forwarded ICMP packet. Ignore it for now */
+ return PJ_SUCCESS;
+ }
+
+ /* Get XOR-PEER-ADDRESS attribute */
+ peer_attr = (pj_stun_xor_peer_addr_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_XOR_PEER_ADDR, 0);
+
+ /* Get DATA attribute */
+ data_attr = (pj_stun_data_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_DATA, 0);
+
+ /* Must have both XOR-PEER-ADDRESS and DATA attributes */
+ if (!peer_attr || !data_attr) {
+ PJ_LOG(4,(sess->obj_name,
+ "Received Data indication with missing attributes"));
+ return PJ_EINVALIDOP;
+ }
+
+ /* Notify application */
+ if (sess->cb.on_rx_data) {
+ (*sess->cb.on_rx_data)(sess, data_attr->data, data_attr->length,
+ &peer_attr->sockaddr,
+ pj_sockaddr_get_len(&peer_attr->sockaddr));
+ }
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Notification on completion of DNS SRV resolution.
+ */
+static void dns_srv_resolver_cb(void *user_data,
+ pj_status_t status,
+ const pj_dns_srv_record *rec)
+{
+ pj_turn_session *sess = (pj_turn_session*) user_data;
+ unsigned i, cnt, tot_cnt;
+
+ /* Clear async resolver */
+ sess->dns_async = NULL;
+
+ /* Check failure */
+ if (status != PJ_SUCCESS) {
+ sess_shutdown(sess, status);
+ return;
+ }
+
+ /* Calculate total number of server entries in the response */
+ tot_cnt = 0;
+ for (i=0; i<rec->count; ++i) {
+ tot_cnt += rec->entry[i].server.addr_count;
+ }
+
+ if (tot_cnt > PJ_TURN_MAX_DNS_SRV_CNT)
+ tot_cnt = PJ_TURN_MAX_DNS_SRV_CNT;
+
+ /* Allocate server entries */
+ sess->srv_addr_list = (pj_sockaddr*)
+ pj_pool_calloc(sess->pool, tot_cnt,
+ sizeof(pj_sockaddr));
+
+ /* Copy results to server entries */
+ for (i=0, cnt=0; i<rec->count && cnt<PJ_TURN_MAX_DNS_SRV_CNT; ++i) {
+ unsigned j;
+
+ for (j=0; j<rec->entry[i].server.addr_count &&
+ cnt<PJ_TURN_MAX_DNS_SRV_CNT; ++j)
+ {
+ pj_sockaddr_in *addr = &sess->srv_addr_list[cnt].ipv4;
+
+ addr->sin_family = sess->af;
+ addr->sin_port = pj_htons(rec->entry[i].port);
+ addr->sin_addr.s_addr = rec->entry[i].server.addr[j].s_addr;
+
+ ++cnt;
+ }
+ }
+ sess->srv_addr_cnt = (pj_uint16_t)cnt;
+
+ /* Set current server */
+ sess->srv_addr = &sess->srv_addr_list[0];
+
+ /* Set state to PJ_TURN_STATE_RESOLVED */
+ set_state(sess, PJ_TURN_STATE_RESOLVED);
+
+ /* Run pending allocation */
+ if (sess->pending_alloc) {
+ pj_turn_session_alloc(sess, NULL);
+ }
+}
+
+
+/*
+ * Lookup peer descriptor from its address.
+ */
+static struct ch_t *lookup_ch_by_addr(pj_turn_session *sess,
+ const pj_sockaddr_t *addr,
+ unsigned addr_len,
+ pj_bool_t update,
+ pj_bool_t bind_channel)
+{
+ pj_uint32_t hval = 0;
+ struct ch_t *ch;
+
+ ch = (struct ch_t*)
+ pj_hash_get(sess->ch_table, addr, addr_len, &hval);
+ if (ch == NULL && update) {
+ ch = PJ_POOL_ZALLOC_T(sess->pool, struct ch_t);
+ ch->num = PJ_TURN_INVALID_CHANNEL;
+ pj_memcpy(&ch->addr, addr, addr_len);
+
+ /* Register by peer address */
+ pj_hash_set(sess->pool, sess->ch_table, &ch->addr, addr_len,
+ hval, ch);
+ }
+
+ if (ch && update) {
+ pj_gettimeofday(&ch->expiry);
+ ch->expiry.sec += PJ_TURN_PERM_TIMEOUT - sess->ka_interval - 1;
+
+ if (bind_channel) {
+ pj_uint32_t hval = 0;
+ /* Register by channel number */
+ pj_assert(ch->num != PJ_TURN_INVALID_CHANNEL && ch->bound);
+
+ if (pj_hash_get(sess->ch_table, &ch->num,
+ sizeof(ch->num), &hval)==0) {
+ pj_hash_set(sess->pool, sess->ch_table, &ch->num,
+ sizeof(ch->num), hval, ch);
+ }
+ }
+ }
+
+ /* Also create/update permission for this destination. Ideally we
+ * should update this when we receive the successful response,
+ * but that would cause duplicate CreatePermission to be sent
+ * during refreshing.
+ */
+ if (ch && update) {
+ lookup_perm(sess, &ch->addr, pj_sockaddr_get_len(&ch->addr), PJ_TRUE);
+ }
+
+ return ch;
+}
+
+
+/*
+ * Lookup channel descriptor from its channel number.
+ */
+static struct ch_t *lookup_ch_by_chnum(pj_turn_session *sess,
+ pj_uint16_t chnum)
+{
+ return (struct ch_t*) pj_hash_get(sess->ch_table, &chnum,
+ sizeof(chnum), NULL);
+}
+
+
+/*
+ * Lookup permission and optionally create if it doesn't exist.
+ */
+static struct perm_t *lookup_perm(pj_turn_session *sess,
+ const pj_sockaddr_t *addr,
+ unsigned addr_len,
+ pj_bool_t update)
+{
+ pj_uint32_t hval = 0;
+ pj_sockaddr perm_addr;
+ struct perm_t *perm;
+
+ /* make sure port number if zero */
+ if (pj_sockaddr_get_port(addr) != 0) {
+ pj_memcpy(&perm_addr, addr, addr_len);
+ pj_sockaddr_set_port(&perm_addr, 0);
+ addr = &perm_addr;
+ }
+
+ /* lookup and create if it doesn't exist and wanted */
+ perm = (struct perm_t*)
+ pj_hash_get(sess->perm_table, addr, addr_len, &hval);
+ if (perm == NULL && update) {
+ perm = PJ_POOL_ZALLOC_T(sess->pool, struct perm_t);
+ pj_memcpy(&perm->addr, addr, addr_len);
+ perm->hval = hval;
+
+ pj_hash_set(sess->pool, sess->perm_table, &perm->addr, addr_len,
+ perm->hval, perm);
+ }
+
+ if (perm && update) {
+ pj_gettimeofday(&perm->expiry);
+ perm->expiry.sec += PJ_TURN_PERM_TIMEOUT - sess->ka_interval - 1;
+
+ }
+
+ return perm;
+}
+
+/*
+ * Delete permission
+ */
+static void invalidate_perm(pj_turn_session *sess,
+ struct perm_t *perm)
+{
+ pj_hash_set(NULL, sess->perm_table, &perm->addr,
+ pj_sockaddr_get_len(&perm->addr), perm->hval, NULL);
+}
+
+/*
+ * Scan permission's hash table to refresh the permission.
+ */
+static unsigned refresh_permissions(pj_turn_session *sess,
+ const pj_time_val *now)
+{
+ pj_stun_tx_data *tdata = NULL;
+ unsigned count = 0;
+ void *req_token = NULL;
+ pj_hash_iterator_t *it, itbuf;
+ pj_status_t status;
+
+ it = pj_hash_first(sess->perm_table, &itbuf);
+ while (it) {
+ struct perm_t *perm = (struct perm_t*)
+ pj_hash_this(sess->perm_table, it);
+
+ it = pj_hash_next(sess->perm_table, it);
+
+ if (perm->expiry.sec-1 <= now->sec) {
+ if (perm->renew) {
+ /* Renew this permission */
+ if (tdata == NULL) {
+ /* Create a bare CreatePermission request */
+ status = pj_stun_session_create_req(
+ sess->stun,
+ PJ_STUN_CREATE_PERM_REQUEST,
+ PJ_STUN_MAGIC, NULL, &tdata);
+ if (status != PJ_SUCCESS) {
+ PJ_LOG(1,(sess->obj_name,
+ "Error creating CreatePermission request: %d",
+ status));
+ return 0;
+ }
+
+ /* Create request token to map the request to the perm
+ * structures which the request belongs.
+ */
+ req_token = (void*)(long)pj_rand();
+ }
+
+ status = pj_stun_msg_add_sockaddr_attr(
+ tdata->pool,
+ tdata->msg,
+ PJ_STUN_ATTR_XOR_PEER_ADDR,
+ PJ_TRUE,
+ &perm->addr,
+ sizeof(perm->addr));
+ if (status != PJ_SUCCESS) {
+ pj_stun_msg_destroy_tdata(sess->stun, tdata);
+ return 0;
+ }
+
+ perm->expiry = *now;
+ perm->expiry.sec += PJ_TURN_PERM_TIMEOUT-sess->ka_interval-1;
+ perm->req_token = req_token;
+ ++count;
+
+ } else {
+ /* This permission has expired and app doesn't want
+ * us to renew, so delete it from the hash table.
+ */
+ invalidate_perm(sess, perm);
+ }
+ }
+ }
+
+ if (tdata) {
+ status = pj_stun_session_send_msg(sess->stun, req_token, PJ_FALSE,
+ (sess->conn_type==PJ_TURN_TP_UDP),
+ sess->srv_addr,
+ pj_sockaddr_get_len(sess->srv_addr),
+ tdata);
+ if (status != PJ_SUCCESS) {
+ PJ_LOG(1,(sess->obj_name,
+ "Error sending CreatePermission request: %d",
+ status));
+ count = 0;
+ }
+
+ }
+
+ return count;
+}
+
+/*
+ * Timer event.
+ */
+static void on_timer_event(pj_timer_heap_t *th, pj_timer_entry *e)
+{
+ pj_turn_session *sess = (pj_turn_session*)e->user_data;
+ enum timer_id_t eid;
+
+ PJ_UNUSED_ARG(th);
+
+ pj_lock_acquire(sess->lock);
+
+ eid = (enum timer_id_t) e->id;
+ e->id = TIMER_NONE;
+
+ if (eid == TIMER_KEEP_ALIVE) {
+ pj_time_val now;
+ pj_hash_iterator_t itbuf, *it;
+ pj_bool_t resched = PJ_TRUE;
+ pj_bool_t pkt_sent = PJ_FALSE;
+
+ pj_gettimeofday(&now);
+
+ /* Refresh allocation if it's time to do so */
+ if (PJ_TIME_VAL_LTE(sess->expiry, now)) {
+ int lifetime = sess->alloc_param.lifetime;
+
+ if (lifetime == 0)
+ lifetime = -1;
+
+ send_refresh(sess, lifetime);
+ resched = PJ_FALSE;
+ pkt_sent = PJ_TRUE;
+ }
+
+ /* Scan hash table to refresh bound channels */
+ it = pj_hash_first(sess->ch_table, &itbuf);
+ while (it) {
+ struct ch_t *ch = (struct ch_t*)
+ pj_hash_this(sess->ch_table, it);
+ if (ch->bound && PJ_TIME_VAL_LTE(ch->expiry, now)) {
+
+ /* Send ChannelBind to refresh channel binding and
+ * permission.
+ */
+ pj_turn_session_bind_channel(sess, &ch->addr,
+ pj_sockaddr_get_len(&ch->addr));
+ pkt_sent = PJ_TRUE;
+ }
+
+ it = pj_hash_next(sess->ch_table, it);
+ }
+
+ /* Scan permission table to refresh permissions */
+ if (refresh_permissions(sess, &now))
+ pkt_sent = PJ_TRUE;
+
+ /* If no packet is sent, send a blank Send indication to
+ * refresh local NAT.
+ */
+ if (!pkt_sent && sess->alloc_param.ka_interval > 0) {
+ pj_stun_tx_data *tdata;
+ pj_status_t rc;
+
+ /* Create blank SEND-INDICATION */
+ rc = pj_stun_session_create_ind(sess->stun,
+ PJ_STUN_SEND_INDICATION, &tdata);
+ if (rc == PJ_SUCCESS) {
+ /* Add DATA attribute with zero length */
+ pj_stun_msg_add_binary_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_DATA, NULL, 0);
+
+ /* Send the indication */
+ pj_stun_session_send_msg(sess->stun, NULL, PJ_FALSE,
+ PJ_FALSE, sess->srv_addr,
+ pj_sockaddr_get_len(sess->srv_addr),
+ tdata);
+ }
+ }
+
+ /* Reshcedule timer */
+ if (resched) {
+ pj_time_val delay;
+
+ delay.sec = sess->ka_interval;
+ delay.msec = 0;
+
+ sess->timer.id = TIMER_KEEP_ALIVE;
+ pj_timer_heap_schedule(sess->timer_heap, &sess->timer, &delay);
+ }
+
+ pj_lock_release(sess->lock);
+
+ } else if (eid == TIMER_DESTROY) {
+ /* Time to destroy */
+ pj_lock_release(sess->lock);
+ do_destroy(sess);
+ } else {
+ pj_assert(!"Unknown timer event");
+ pj_lock_release(sess->lock);
+ }
+}
+
diff --git a/pjnath/src/pjnath/turn_sock.c b/pjnath/src/pjnath/turn_sock.c
new file mode 100644
index 0000000..799b557
--- /dev/null
+++ b/pjnath/src/pjnath/turn_sock.c
@@ -0,0 +1,808 @@
+/* $Id: turn_sock.c 3841 2011-10-24 09:28:13Z ming $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/turn_sock.h>
+#include <pj/activesock.h>
+#include <pj/assert.h>
+#include <pj/errno.h>
+#include <pj/lock.h>
+#include <pj/log.h>
+#include <pj/pool.h>
+#include <pj/ioqueue.h>
+
+enum
+{
+ TIMER_NONE,
+ TIMER_DESTROY
+};
+
+#define INIT 0x1FFFFFFF
+
+struct pj_turn_sock
+{
+ pj_pool_t *pool;
+ const char *obj_name;
+ pj_turn_session *sess;
+ pj_turn_sock_cb cb;
+ void *user_data;
+
+ pj_lock_t *lock;
+
+ pj_turn_alloc_param alloc_param;
+ pj_stun_config cfg;
+ pj_turn_sock_cfg setting;
+
+ pj_bool_t destroy_request;
+ pj_timer_entry timer;
+
+ int af;
+ pj_turn_tp_type conn_type;
+ pj_activesock_t *active_sock;
+ pj_ioqueue_op_key_t send_key;
+};
+
+
+/*
+ * Callback prototypes.
+ */
+static pj_status_t turn_on_send_pkt(pj_turn_session *sess,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *dst_addr,
+ unsigned dst_addr_len);
+static void turn_on_channel_bound(pj_turn_session *sess,
+ const pj_sockaddr_t *peer_addr,
+ unsigned addr_len,
+ unsigned ch_num);
+static void turn_on_rx_data(pj_turn_session *sess,
+ void *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *peer_addr,
+ unsigned addr_len);
+static void turn_on_state(pj_turn_session *sess,
+ pj_turn_state_t old_state,
+ pj_turn_state_t new_state);
+
+static pj_bool_t on_data_read(pj_activesock_t *asock,
+ void *data,
+ pj_size_t size,
+ pj_status_t status,
+ pj_size_t *remainder);
+static pj_bool_t on_connect_complete(pj_activesock_t *asock,
+ pj_status_t status);
+
+
+
+static void destroy(pj_turn_sock *turn_sock);
+static void timer_cb(pj_timer_heap_t *th, pj_timer_entry *e);
+
+
+/* Init config */
+PJ_DEF(void) pj_turn_sock_cfg_default(pj_turn_sock_cfg *cfg)
+{
+ pj_bzero(cfg, sizeof(*cfg));
+ cfg->qos_type = PJ_QOS_TYPE_BEST_EFFORT;
+ cfg->qos_ignore_error = PJ_TRUE;
+}
+
+/*
+ * Create.
+ */
+PJ_DEF(pj_status_t) pj_turn_sock_create(pj_stun_config *cfg,
+ int af,
+ pj_turn_tp_type conn_type,
+ const pj_turn_sock_cb *cb,
+ const pj_turn_sock_cfg *setting,
+ void *user_data,
+ pj_turn_sock **p_turn_sock)
+{
+ pj_turn_sock *turn_sock;
+ pj_turn_session_cb sess_cb;
+ pj_turn_sock_cfg default_setting;
+ pj_pool_t *pool;
+ const char *name_tmpl;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(cfg && p_turn_sock, PJ_EINVAL);
+ PJ_ASSERT_RETURN(af==pj_AF_INET() || af==pj_AF_INET6(), PJ_EINVAL);
+ PJ_ASSERT_RETURN(conn_type!=PJ_TURN_TP_TCP || PJ_HAS_TCP, PJ_EINVAL);
+
+ if (!setting) {
+ pj_turn_sock_cfg_default(&default_setting);
+ setting = &default_setting;
+ }
+
+ switch (conn_type) {
+ case PJ_TURN_TP_UDP:
+ name_tmpl = "udprel%p";
+ break;
+ case PJ_TURN_TP_TCP:
+ name_tmpl = "tcprel%p";
+ break;
+ default:
+ PJ_ASSERT_RETURN(!"Invalid TURN conn_type", PJ_EINVAL);
+ name_tmpl = "tcprel%p";
+ break;
+ }
+
+ /* Create and init basic data structure */
+ pool = pj_pool_create(cfg->pf, name_tmpl, PJNATH_POOL_LEN_TURN_SOCK,
+ PJNATH_POOL_INC_TURN_SOCK, NULL);
+ turn_sock = PJ_POOL_ZALLOC_T(pool, pj_turn_sock);
+ turn_sock->pool = pool;
+ turn_sock->obj_name = pool->obj_name;
+ turn_sock->user_data = user_data;
+ turn_sock->af = af;
+ turn_sock->conn_type = conn_type;
+
+ /* Copy STUN config (this contains ioqueue, timer heap, etc.) */
+ pj_memcpy(&turn_sock->cfg, cfg, sizeof(*cfg));
+
+ /* Copy setting (QoS parameters etc */
+ pj_memcpy(&turn_sock->setting, setting, sizeof(*setting));
+
+ /* Set callback */
+ if (cb) {
+ pj_memcpy(&turn_sock->cb, cb, sizeof(*cb));
+ }
+
+ /* Create lock */
+ status = pj_lock_create_recursive_mutex(pool, turn_sock->obj_name,
+ &turn_sock->lock);
+ if (status != PJ_SUCCESS) {
+ destroy(turn_sock);
+ return status;
+ }
+
+ /* Init timer */
+ pj_timer_entry_init(&turn_sock->timer, TIMER_NONE, turn_sock, &timer_cb);
+
+ /* Init TURN session */
+ pj_bzero(&sess_cb, sizeof(sess_cb));
+ sess_cb.on_send_pkt = &turn_on_send_pkt;
+ sess_cb.on_channel_bound = &turn_on_channel_bound;
+ sess_cb.on_rx_data = &turn_on_rx_data;
+ sess_cb.on_state = &turn_on_state;
+ status = pj_turn_session_create(cfg, pool->obj_name, af, conn_type,
+ &sess_cb, 0, turn_sock, &turn_sock->sess);
+ if (status != PJ_SUCCESS) {
+ destroy(turn_sock);
+ return status;
+ }
+
+ /* Note: socket and ioqueue will be created later once the TURN server
+ * has been resolved.
+ */
+
+ *p_turn_sock = turn_sock;
+ return PJ_SUCCESS;
+}
+
+/*
+ * Destroy.
+ */
+static void destroy(pj_turn_sock *turn_sock)
+{
+ if (turn_sock->lock) {
+ pj_lock_acquire(turn_sock->lock);
+ }
+
+ if (turn_sock->sess) {
+ pj_turn_session_set_user_data(turn_sock->sess, NULL);
+ pj_turn_session_shutdown(turn_sock->sess);
+ turn_sock->sess = NULL;
+ }
+
+ if (turn_sock->active_sock) {
+ pj_activesock_close(turn_sock->active_sock);
+ turn_sock->active_sock = NULL;
+ }
+
+ if (turn_sock->lock) {
+ pj_lock_release(turn_sock->lock);
+ pj_lock_destroy(turn_sock->lock);
+ turn_sock->lock = NULL;
+ }
+
+ if (turn_sock->pool) {
+ pj_pool_t *pool = turn_sock->pool;
+ turn_sock->pool = NULL;
+ pj_pool_release(pool);
+ }
+}
+
+
+PJ_DEF(void) pj_turn_sock_destroy(pj_turn_sock *turn_sock)
+{
+ pj_lock_acquire(turn_sock->lock);
+ turn_sock->destroy_request = PJ_TRUE;
+
+ if (turn_sock->sess) {
+ pj_turn_session_shutdown(turn_sock->sess);
+ /* This will ultimately call our state callback, and when
+ * session state is DESTROYING we will schedule a timer to
+ * destroy ourselves.
+ */
+ pj_lock_release(turn_sock->lock);
+ } else {
+ pj_lock_release(turn_sock->lock);
+ destroy(turn_sock);
+ }
+
+}
+
+
+/* Timer callback */
+static void timer_cb(pj_timer_heap_t *th, pj_timer_entry *e)
+{
+ pj_turn_sock *turn_sock = (pj_turn_sock*)e->user_data;
+ int eid = e->id;
+
+ PJ_UNUSED_ARG(th);
+
+ e->id = TIMER_NONE;
+
+ switch (eid) {
+ case TIMER_DESTROY:
+ PJ_LOG(5,(turn_sock->obj_name, "Destroying TURN"));
+ destroy(turn_sock);
+ break;
+ default:
+ pj_assert(!"Invalid timer id");
+ break;
+ }
+}
+
+
+/* Display error */
+static void show_err(pj_turn_sock *turn_sock, const char *title,
+ pj_status_t status)
+{
+ PJ_PERROR(4,(turn_sock->obj_name, status, title));
+}
+
+/* On error, terminate session */
+static void sess_fail(pj_turn_sock *turn_sock, const char *title,
+ pj_status_t status)
+{
+ show_err(turn_sock, title, status);
+ if (turn_sock->sess) {
+ pj_turn_session_destroy(turn_sock->sess, status);
+ }
+}
+
+/*
+ * Set user data.
+ */
+PJ_DEF(pj_status_t) pj_turn_sock_set_user_data( pj_turn_sock *turn_sock,
+ void *user_data)
+{
+ PJ_ASSERT_RETURN(turn_sock, PJ_EINVAL);
+ turn_sock->user_data = user_data;
+ return PJ_SUCCESS;
+}
+
+/*
+ * Get user data.
+ */
+PJ_DEF(void*) pj_turn_sock_get_user_data(pj_turn_sock *turn_sock)
+{
+ PJ_ASSERT_RETURN(turn_sock, NULL);
+ return turn_sock->user_data;
+}
+
+/**
+ * Get info.
+ */
+PJ_DEF(pj_status_t) pj_turn_sock_get_info(pj_turn_sock *turn_sock,
+ pj_turn_session_info *info)
+{
+ PJ_ASSERT_RETURN(turn_sock && info, PJ_EINVAL);
+
+ if (turn_sock->sess) {
+ return pj_turn_session_get_info(turn_sock->sess, info);
+ } else {
+ pj_bzero(info, sizeof(*info));
+ info->state = PJ_TURN_STATE_NULL;
+ return PJ_SUCCESS;
+ }
+}
+
+/**
+ * Lock the TURN socket. Application may need to call this function to
+ * synchronize access to other objects to avoid deadlock.
+ */
+PJ_DEF(pj_status_t) pj_turn_sock_lock(pj_turn_sock *turn_sock)
+{
+ return pj_lock_acquire(turn_sock->lock);
+}
+
+/**
+ * Unlock the TURN socket.
+ */
+PJ_DEF(pj_status_t) pj_turn_sock_unlock(pj_turn_sock *turn_sock)
+{
+ return pj_lock_release(turn_sock->lock);
+}
+
+/*
+ * Set STUN message logging for this TURN session.
+ */
+PJ_DEF(void) pj_turn_sock_set_log( pj_turn_sock *turn_sock,
+ unsigned flags)
+{
+ pj_turn_session_set_log(turn_sock->sess, flags);
+}
+
+/*
+ * Set software name
+ */
+PJ_DEF(pj_status_t) pj_turn_sock_set_software_name( pj_turn_sock *turn_sock,
+ const pj_str_t *sw)
+{
+ return pj_turn_session_set_software_name(turn_sock->sess, sw);
+}
+
+/*
+ * Initialize.
+ */
+PJ_DEF(pj_status_t) pj_turn_sock_alloc(pj_turn_sock *turn_sock,
+ const pj_str_t *domain,
+ int default_port,
+ pj_dns_resolver *resolver,
+ const pj_stun_auth_cred *cred,
+ const pj_turn_alloc_param *param)
+{
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(turn_sock && domain, PJ_EINVAL);
+ PJ_ASSERT_RETURN(turn_sock->sess, PJ_EINVALIDOP);
+
+ /* Copy alloc param. We will call session_alloc() only after the
+ * server address has been resolved.
+ */
+ if (param) {
+ pj_turn_alloc_param_copy(turn_sock->pool, &turn_sock->alloc_param, param);
+ } else {
+ pj_turn_alloc_param_default(&turn_sock->alloc_param);
+ }
+
+ /* Set credental */
+ if (cred) {
+ status = pj_turn_session_set_credential(turn_sock->sess, cred);
+ if (status != PJ_SUCCESS) {
+ sess_fail(turn_sock, "Error setting credential", status);
+ return status;
+ }
+ }
+
+ /* Resolve server */
+ status = pj_turn_session_set_server(turn_sock->sess, domain, default_port,
+ resolver);
+ if (status != PJ_SUCCESS) {
+ sess_fail(turn_sock, "Error setting TURN server", status);
+ return status;
+ }
+
+ /* Done for now. The next work will be done when session state moved
+ * to RESOLVED state.
+ */
+
+ return PJ_SUCCESS;
+}
+
+/*
+ * Install permission
+ */
+PJ_DEF(pj_status_t) pj_turn_sock_set_perm( pj_turn_sock *turn_sock,
+ unsigned addr_cnt,
+ const pj_sockaddr addr[],
+ unsigned options)
+{
+ if (turn_sock->sess == NULL)
+ return PJ_EINVALIDOP;
+
+ return pj_turn_session_set_perm(turn_sock->sess, addr_cnt, addr, options);
+}
+
+/*
+ * Send packet.
+ */
+PJ_DEF(pj_status_t) pj_turn_sock_sendto( pj_turn_sock *turn_sock,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *addr,
+ unsigned addr_len)
+{
+ PJ_ASSERT_RETURN(turn_sock && addr && addr_len, PJ_EINVAL);
+
+ if (turn_sock->sess == NULL)
+ return PJ_EINVALIDOP;
+
+ return pj_turn_session_sendto(turn_sock->sess, pkt, pkt_len,
+ addr, addr_len);
+}
+
+/*
+ * Bind a peer address to a channel number.
+ */
+PJ_DEF(pj_status_t) pj_turn_sock_bind_channel( pj_turn_sock *turn_sock,
+ const pj_sockaddr_t *peer,
+ unsigned addr_len)
+{
+ PJ_ASSERT_RETURN(turn_sock && peer && addr_len, PJ_EINVAL);
+ PJ_ASSERT_RETURN(turn_sock->sess != NULL, PJ_EINVALIDOP);
+
+ return pj_turn_session_bind_channel(turn_sock->sess, peer, addr_len);
+}
+
+
+/*
+ * Notification when outgoing TCP socket has been connected.
+ */
+static pj_bool_t on_connect_complete(pj_activesock_t *asock,
+ pj_status_t status)
+{
+ pj_turn_sock *turn_sock;
+
+ turn_sock = (pj_turn_sock*) pj_activesock_get_user_data(asock);
+
+ if (status != PJ_SUCCESS) {
+ sess_fail(turn_sock, "TCP connect() error", status);
+ return PJ_FALSE;
+ }
+
+ if (turn_sock->conn_type != PJ_TURN_TP_UDP) {
+ PJ_LOG(5,(turn_sock->obj_name, "TCP connected"));
+ }
+
+ /* Kick start pending read operation */
+ status = pj_activesock_start_read(asock, turn_sock->pool,
+ PJ_TURN_MAX_PKT_LEN, 0);
+
+ /* Init send_key */
+ pj_ioqueue_op_key_init(&turn_sock->send_key, sizeof(turn_sock->send_key));
+
+ /* Send Allocate request */
+ status = pj_turn_session_alloc(turn_sock->sess, &turn_sock->alloc_param);
+ if (status != PJ_SUCCESS) {
+ sess_fail(turn_sock, "Error sending ALLOCATE", status);
+ return PJ_FALSE;
+ }
+
+ return PJ_TRUE;
+}
+
+static pj_uint16_t GETVAL16H(const pj_uint8_t *buf, unsigned pos)
+{
+ return (pj_uint16_t) ((buf[pos + 0] << 8) | \
+ (buf[pos + 1] << 0));
+}
+
+/* Quick check to determine if there is enough packet to process in the
+ * incoming buffer. Return the packet length, or zero if there's no packet.
+ */
+static unsigned has_packet(pj_turn_sock *turn_sock, const void *buf, pj_size_t bufsize)
+{
+ pj_bool_t is_stun;
+
+ if (turn_sock->conn_type == PJ_TURN_TP_UDP)
+ return bufsize;
+
+ /* Quickly check if this is STUN message, by checking the first two bits and
+ * size field which must be multiple of 4 bytes
+ */
+ is_stun = ((((pj_uint8_t*)buf)[0] & 0xC0) == 0) &&
+ ((GETVAL16H((const pj_uint8_t*)buf, 2) & 0x03)==0);
+
+ if (is_stun) {
+ pj_size_t msg_len = GETVAL16H((const pj_uint8_t*)buf, 2);
+ return (msg_len+20 <= bufsize) ? msg_len+20 : 0;
+ } else {
+ /* This must be ChannelData. */
+ pj_turn_channel_data cd;
+
+ if (bufsize < 4)
+ return 0;
+
+ /* Decode ChannelData packet */
+ pj_memcpy(&cd, buf, sizeof(pj_turn_channel_data));
+ cd.length = pj_ntohs(cd.length);
+
+ if (bufsize >= cd.length+sizeof(cd))
+ return (cd.length+sizeof(cd)+3) & (~3);
+ else
+ return 0;
+ }
+}
+
+/*
+ * Notification from ioqueue when incoming UDP packet is received.
+ */
+static pj_bool_t on_data_read(pj_activesock_t *asock,
+ void *data,
+ pj_size_t size,
+ pj_status_t status,
+ pj_size_t *remainder)
+{
+ pj_turn_sock *turn_sock;
+ pj_bool_t ret = PJ_TRUE;
+
+ turn_sock = (pj_turn_sock*) pj_activesock_get_user_data(asock);
+ pj_lock_acquire(turn_sock->lock);
+
+ if (status == PJ_SUCCESS && turn_sock->sess) {
+ /* Report incoming packet to TURN session, repeat while we have
+ * "packet" in the buffer (required for stream-oriented transports)
+ */
+ unsigned pkt_len;
+
+ //PJ_LOG(5,(turn_sock->pool->obj_name,
+ // "Incoming data, %lu bytes total buffer", size));
+
+ while ((pkt_len=has_packet(turn_sock, data, size)) != 0) {
+ pj_size_t parsed_len;
+ //const pj_uint8_t *pkt = (const pj_uint8_t*)data;
+
+ //PJ_LOG(5,(turn_sock->pool->obj_name,
+ // "Packet start: %02X %02X %02X %02X",
+ // pkt[0], pkt[1], pkt[2], pkt[3]));
+
+ //PJ_LOG(5,(turn_sock->pool->obj_name,
+ // "Processing %lu bytes packet of %lu bytes total buffer",
+ // pkt_len, size));
+
+ parsed_len = (unsigned)size;
+ pj_turn_session_on_rx_pkt(turn_sock->sess, data, size, &parsed_len);
+
+ /* parsed_len may be zero if we have parsing error, so use our
+ * previous calculation to exhaust the bad packet.
+ */
+ if (parsed_len == 0)
+ parsed_len = pkt_len;
+
+ if (parsed_len < (unsigned)size) {
+ *remainder = size - parsed_len;
+ pj_memmove(data, ((char*)data)+parsed_len, *remainder);
+ } else {
+ *remainder = 0;
+ }
+ size = *remainder;
+
+ //PJ_LOG(5,(turn_sock->pool->obj_name,
+ // "Buffer size now %lu bytes", size));
+ }
+ } else if (status != PJ_SUCCESS &&
+ turn_sock->conn_type != PJ_TURN_TP_UDP)
+ {
+ sess_fail(turn_sock, "TCP connection closed", status);
+ ret = PJ_FALSE;
+ goto on_return;
+ }
+
+on_return:
+ pj_lock_release(turn_sock->lock);
+
+ return ret;
+}
+
+
+/*
+ * Callback from TURN session to send outgoing packet.
+ */
+static pj_status_t turn_on_send_pkt(pj_turn_session *sess,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *dst_addr,
+ unsigned dst_addr_len)
+{
+ pj_turn_sock *turn_sock = (pj_turn_sock*)
+ pj_turn_session_get_user_data(sess);
+ pj_ssize_t len = pkt_len;
+ pj_status_t status;
+
+ if (turn_sock == NULL) {
+ /* We've been destroyed */
+ // https://trac.pjsip.org/repos/ticket/1316
+ //pj_assert(!"We should shutdown gracefully");
+ return PJ_EINVALIDOP;
+ }
+
+ PJ_UNUSED_ARG(dst_addr);
+ PJ_UNUSED_ARG(dst_addr_len);
+
+ status = pj_activesock_send(turn_sock->active_sock, &turn_sock->send_key,
+ pkt, &len, 0);
+ if (status != PJ_SUCCESS && status != PJ_EPENDING) {
+ show_err(turn_sock, "socket send()", status);
+ }
+
+ return status;
+}
+
+
+/*
+ * Callback from TURN session when a channel is successfully bound.
+ */
+static void turn_on_channel_bound(pj_turn_session *sess,
+ const pj_sockaddr_t *peer_addr,
+ unsigned addr_len,
+ unsigned ch_num)
+{
+ PJ_UNUSED_ARG(sess);
+ PJ_UNUSED_ARG(peer_addr);
+ PJ_UNUSED_ARG(addr_len);
+ PJ_UNUSED_ARG(ch_num);
+}
+
+
+/*
+ * Callback from TURN session upon incoming data.
+ */
+static void turn_on_rx_data(pj_turn_session *sess,
+ void *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *peer_addr,
+ unsigned addr_len)
+{
+ pj_turn_sock *turn_sock = (pj_turn_sock*)
+ pj_turn_session_get_user_data(sess);
+ if (turn_sock == NULL) {
+ /* We've been destroyed */
+ return;
+ }
+
+ if (turn_sock->cb.on_rx_data) {
+ (*turn_sock->cb.on_rx_data)(turn_sock, pkt, pkt_len,
+ peer_addr, addr_len);
+ }
+}
+
+
+/*
+ * Callback from TURN session when state has changed
+ */
+static void turn_on_state(pj_turn_session *sess,
+ pj_turn_state_t old_state,
+ pj_turn_state_t new_state)
+{
+ pj_turn_sock *turn_sock = (pj_turn_sock*)
+ pj_turn_session_get_user_data(sess);
+ pj_status_t status;
+
+ if (turn_sock == NULL) {
+ /* We've been destroyed */
+ return;
+ }
+
+ /* Notify app first */
+ if (turn_sock->cb.on_state) {
+ (*turn_sock->cb.on_state)(turn_sock, old_state, new_state);
+ }
+
+ /* Make sure user hasn't destroyed us in the callback */
+ if (turn_sock->sess && new_state == PJ_TURN_STATE_RESOLVED) {
+ pj_turn_session_info info;
+ pj_turn_session_get_info(turn_sock->sess, &info);
+ new_state = info.state;
+ }
+
+ if (turn_sock->sess && new_state == PJ_TURN_STATE_RESOLVED) {
+ /*
+ * Once server has been resolved, initiate outgoing TCP
+ * connection to the server.
+ */
+ pj_turn_session_info info;
+ char addrtxt[PJ_INET6_ADDRSTRLEN+8];
+ int sock_type;
+ pj_sock_t sock;
+ pj_activesock_cb asock_cb;
+
+ /* Close existing connection, if any. This happens when
+ * we're switching to alternate TURN server when either TCP
+ * connection or ALLOCATE request failed.
+ */
+ if (turn_sock->active_sock) {
+ pj_activesock_close(turn_sock->active_sock);
+ turn_sock->active_sock = NULL;
+ }
+
+ /* Get server address from session info */
+ pj_turn_session_get_info(sess, &info);
+
+ if (turn_sock->conn_type == PJ_TURN_TP_UDP)
+ sock_type = pj_SOCK_DGRAM();
+ else
+ sock_type = pj_SOCK_STREAM();
+
+ /* Init socket */
+ status = pj_sock_socket(turn_sock->af, sock_type, 0, &sock);
+ if (status != PJ_SUCCESS) {
+ pj_turn_sock_destroy(turn_sock);
+ return;
+ }
+
+ /* Apply QoS, if specified */
+ status = pj_sock_apply_qos2(sock, turn_sock->setting.qos_type,
+ &turn_sock->setting.qos_params,
+ (turn_sock->setting.qos_ignore_error?2:1),
+ turn_sock->pool->obj_name, NULL);
+ if (status != PJ_SUCCESS && !turn_sock->setting.qos_ignore_error) {
+ pj_turn_sock_destroy(turn_sock);
+ return;
+ }
+
+ /* Create active socket */
+ pj_bzero(&asock_cb, sizeof(asock_cb));
+ asock_cb.on_data_read = &on_data_read;
+ asock_cb.on_connect_complete = &on_connect_complete;
+ status = pj_activesock_create(turn_sock->pool, sock,
+ sock_type, NULL,
+ turn_sock->cfg.ioqueue, &asock_cb,
+ turn_sock,
+ &turn_sock->active_sock);
+ if (status != PJ_SUCCESS) {
+ pj_turn_sock_destroy(turn_sock);
+ return;
+ }
+
+ PJ_LOG(5,(turn_sock->pool->obj_name,
+ "Connecting to %s",
+ pj_sockaddr_print(&info.server, addrtxt,
+ sizeof(addrtxt), 3)));
+
+ /* Initiate non-blocking connect */
+#if PJ_HAS_TCP
+ status=pj_activesock_start_connect(turn_sock->active_sock,
+ turn_sock->pool,
+ &info.server,
+ pj_sockaddr_get_len(&info.server));
+ if (status == PJ_SUCCESS) {
+ on_connect_complete(turn_sock->active_sock, PJ_SUCCESS);
+ } else if (status != PJ_EPENDING) {
+ pj_turn_sock_destroy(turn_sock);
+ return;
+ }
+#else
+ on_connect_complete(turn_sock->active_sock, PJ_SUCCESS);
+#endif
+
+ /* Done for now. Subsequent work will be done in
+ * on_connect_complete() callback.
+ */
+ }
+
+ if (new_state >= PJ_TURN_STATE_DESTROYING && turn_sock->sess) {
+ pj_time_val delay = {0, 0};
+
+ turn_sock->sess = NULL;
+ pj_turn_session_set_user_data(sess, NULL);
+
+ if (turn_sock->timer.id) {
+ pj_timer_heap_cancel(turn_sock->cfg.timer_heap, &turn_sock->timer);
+ turn_sock->timer.id = 0;
+ }
+
+ turn_sock->timer.id = TIMER_DESTROY;
+ pj_timer_heap_schedule(turn_sock->cfg.timer_heap, &turn_sock->timer,
+ &delay);
+ }
+}
+
+
diff --git a/pjnath/src/pjturn-client/client_main.c b/pjnath/src/pjturn-client/client_main.c
new file mode 100644
index 0000000..67ec5cc
--- /dev/null
+++ b/pjnath/src/pjturn-client/client_main.c
@@ -0,0 +1,631 @@
+/* $Id: client_main.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath.h>
+#include <pjlib-util.h>
+#include <pjlib.h>
+
+
+#define THIS_FILE "client_main.c"
+#define LOCAL_PORT 1998
+#define BANDWIDTH 64 /* -1 to disable */
+#define LIFETIME 600 /* -1 to disable */
+#define REQ_TRANSPORT -1 /* 0: udp, 1: tcp, -1: disable */
+#define REQ_PORT_PROPS -1 /* -1 to disable */
+#define REQ_IP 0 /* IP address string */
+
+//#define OPTIONS PJ_STUN_NO_AUTHENTICATE
+#define OPTIONS 0
+
+
+struct peer
+{
+ pj_stun_sock *stun_sock;
+ pj_sockaddr mapped_addr;
+};
+
+
+static struct global
+{
+ pj_caching_pool cp;
+ pj_pool_t *pool;
+ pj_stun_config stun_config;
+ pj_thread_t *thread;
+ pj_bool_t quit;
+
+ pj_dns_resolver *resolver;
+
+ pj_turn_sock *relay;
+ pj_sockaddr relay_addr;
+
+ struct peer peer[2];
+} g;
+
+static struct options
+{
+ pj_bool_t use_tcp;
+ char *srv_addr;
+ char *srv_port;
+ char *realm;
+ char *user_name;
+ char *password;
+ pj_bool_t use_fingerprint;
+ char *stun_server;
+ char *nameserver;
+} o;
+
+
+static int worker_thread(void *unused);
+static void turn_on_rx_data(pj_turn_sock *relay,
+ void *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *peer_addr,
+ unsigned addr_len);
+static void turn_on_state(pj_turn_sock *relay, pj_turn_state_t old_state,
+ pj_turn_state_t new_state);
+static pj_bool_t stun_sock_on_status(pj_stun_sock *stun_sock,
+ pj_stun_sock_op op,
+ pj_status_t status);
+static pj_bool_t stun_sock_on_rx_data(pj_stun_sock *stun_sock,
+ void *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *src_addr,
+ unsigned addr_len);
+
+
+static void my_perror(const char *title, pj_status_t status)
+{
+ char errmsg[PJ_ERR_MSG_SIZE];
+ pj_strerror(status, errmsg, sizeof(errmsg));
+
+ PJ_LOG(3,(THIS_FILE, "%s: %s", title, errmsg));
+}
+
+#define CHECK(expr) status=expr; \
+ if (status!=PJ_SUCCESS) { \
+ my_perror(#expr, status); \
+ return status; \
+ }
+
+static int init()
+{
+ int i;
+ pj_status_t status;
+
+ CHECK( pj_init() );
+ CHECK( pjlib_util_init() );
+ CHECK( pjnath_init() );
+
+ /* Check that server is specified */
+ if (!o.srv_addr) {
+ printf("Error: server must be specified\n");
+ return PJ_EINVAL;
+ }
+
+ pj_caching_pool_init(&g.cp, &pj_pool_factory_default_policy, 0);
+
+ g.pool = pj_pool_create(&g.cp.factory, "main", 1000, 1000, NULL);
+
+ /* Init global STUN config */
+ pj_stun_config_init(&g.stun_config, &g.cp.factory, 0, NULL, NULL);
+
+ /* Create global timer heap */
+ CHECK( pj_timer_heap_create(g.pool, 1000, &g.stun_config.timer_heap) );
+
+ /* Create global ioqueue */
+ CHECK( pj_ioqueue_create(g.pool, 16, &g.stun_config.ioqueue) );
+
+ /*
+ * Create peers
+ */
+ for (i=0; i<(int)PJ_ARRAY_SIZE(g.peer); ++i) {
+ pj_stun_sock_cb stun_sock_cb;
+ char name[] = "peer0";
+ pj_uint16_t port;
+ pj_stun_sock_cfg ss_cfg;
+ pj_str_t server;
+
+ pj_bzero(&stun_sock_cb, sizeof(stun_sock_cb));
+ stun_sock_cb.on_rx_data = &stun_sock_on_rx_data;
+ stun_sock_cb.on_status = &stun_sock_on_status;
+
+ g.peer[i].mapped_addr.addr.sa_family = pj_AF_INET();
+
+ pj_stun_sock_cfg_default(&ss_cfg);
+#if 1
+ /* make reading the log easier */
+ ss_cfg.ka_interval = 300;
+#endif
+
+ name[strlen(name)-1] = '0'+i;
+ status = pj_stun_sock_create(&g.stun_config, name, pj_AF_INET(),
+ &stun_sock_cb, &ss_cfg,
+ &g.peer[i], &g.peer[i].stun_sock);
+ if (status != PJ_SUCCESS) {
+ my_perror("pj_stun_sock_create()", status);
+ return status;
+ }
+
+ if (o.stun_server) {
+ server = pj_str(o.stun_server);
+ port = PJ_STUN_PORT;
+ } else {
+ server = pj_str(o.srv_addr);
+ port = (pj_uint16_t)(o.srv_port?atoi(o.srv_port):PJ_STUN_PORT);
+ }
+ status = pj_stun_sock_start(g.peer[i].stun_sock, &server,
+ port, NULL);
+ if (status != PJ_SUCCESS) {
+ my_perror("pj_stun_sock_start()", status);
+ return status;
+ }
+ }
+
+ /* Start the worker thread */
+ CHECK( pj_thread_create(g.pool, "stun", &worker_thread, NULL, 0, 0, &g.thread) );
+
+
+ return PJ_SUCCESS;
+}
+
+
+static int client_shutdown()
+{
+ unsigned i;
+
+ if (g.thread) {
+ g.quit = 1;
+ pj_thread_join(g.thread);
+ pj_thread_destroy(g.thread);
+ g.thread = NULL;
+ }
+ if (g.relay) {
+ pj_turn_sock_destroy(g.relay);
+ g.relay = NULL;
+ }
+ for (i=0; i<PJ_ARRAY_SIZE(g.peer); ++i) {
+ if (g.peer[i].stun_sock) {
+ pj_stun_sock_destroy(g.peer[i].stun_sock);
+ g.peer[i].stun_sock = NULL;
+ }
+ }
+ if (g.stun_config.timer_heap) {
+ pj_timer_heap_destroy(g.stun_config.timer_heap);
+ g.stun_config.timer_heap = NULL;
+ }
+ if (g.stun_config.ioqueue) {
+ pj_ioqueue_destroy(g.stun_config.ioqueue);
+ g.stun_config.ioqueue = NULL;
+ }
+ if (g.pool) {
+ pj_pool_release(g.pool);
+ g.pool = NULL;
+ }
+ pj_pool_factory_dump(&g.cp.factory, PJ_TRUE);
+ pj_caching_pool_destroy(&g.cp);
+
+ return PJ_SUCCESS;
+}
+
+
+static int worker_thread(void *unused)
+{
+ PJ_UNUSED_ARG(unused);
+
+ while (!g.quit) {
+ const pj_time_val delay = {0, 10};
+
+ /* Poll ioqueue for the TURN client */
+ pj_ioqueue_poll(g.stun_config.ioqueue, &delay);
+
+ /* Poll the timer heap */
+ pj_timer_heap_poll(g.stun_config.timer_heap, NULL);
+
+ }
+
+ return 0;
+}
+
+static pj_status_t create_relay(void)
+{
+ pj_turn_sock_cb rel_cb;
+ pj_stun_auth_cred cred;
+ pj_str_t srv;
+ pj_status_t status;
+
+ if (g.relay) {
+ PJ_LOG(1,(THIS_FILE, "Relay already created"));
+ return -1;
+ }
+
+ /* Create DNS resolver if configured */
+ if (o.nameserver) {
+ pj_str_t ns = pj_str(o.nameserver);
+
+ status = pj_dns_resolver_create(&g.cp.factory, "resolver", 0,
+ g.stun_config.timer_heap,
+ g.stun_config.ioqueue, &g.resolver);
+ if (status != PJ_SUCCESS) {
+ PJ_LOG(1,(THIS_FILE, "Error creating resolver (err=%d)", status));
+ return status;
+ }
+
+ status = pj_dns_resolver_set_ns(g.resolver, 1, &ns, NULL);
+ if (status != PJ_SUCCESS) {
+ PJ_LOG(1,(THIS_FILE, "Error configuring nameserver (err=%d)", status));
+ return status;
+ }
+ }
+
+ pj_bzero(&rel_cb, sizeof(rel_cb));
+ rel_cb.on_rx_data = &turn_on_rx_data;
+ rel_cb.on_state = &turn_on_state;
+ CHECK( pj_turn_sock_create(&g.stun_config, pj_AF_INET(),
+ (o.use_tcp? PJ_TURN_TP_TCP : PJ_TURN_TP_UDP),
+ &rel_cb, 0,
+ NULL, &g.relay) );
+
+ if (o.user_name) {
+ pj_bzero(&cred, sizeof(cred));
+ cred.type = PJ_STUN_AUTH_CRED_STATIC;
+ cred.data.static_cred.realm = pj_str(o.realm);
+ cred.data.static_cred.username = pj_str(o.user_name);
+ cred.data.static_cred.data_type = PJ_STUN_PASSWD_PLAIN;
+ cred.data.static_cred.data = pj_str(o.password);
+ //cred.data.static_cred.nonce = pj_str(o.nonce);
+ } else {
+ PJ_LOG(2,(THIS_FILE, "Warning: no credential is set"));
+ }
+
+ srv = pj_str(o.srv_addr);
+ CHECK(pj_turn_sock_alloc(g.relay, /* the relay */
+ &srv, /* srv addr */
+ (o.srv_port?atoi(o.srv_port):PJ_STUN_PORT),/* def port */
+ g.resolver, /* resolver */
+ (o.user_name?&cred:NULL), /* credential */
+ NULL) /* alloc param */
+ );
+
+ return PJ_SUCCESS;
+}
+
+static void destroy_relay(void)
+{
+ if (g.relay) {
+ pj_turn_sock_destroy(g.relay);
+ }
+}
+
+
+static void turn_on_rx_data(pj_turn_sock *relay,
+ void *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *peer_addr,
+ unsigned addr_len)
+{
+ char addrinfo[80];
+
+ pj_sockaddr_print(peer_addr, addrinfo, sizeof(addrinfo), 3);
+
+ PJ_LOG(3,(THIS_FILE, "Client received %d bytes data from %s: %.*s",
+ pkt_len, addrinfo, pkt_len, pkt));
+}
+
+
+static void turn_on_state(pj_turn_sock *relay, pj_turn_state_t old_state,
+ pj_turn_state_t new_state)
+{
+ PJ_LOG(3,(THIS_FILE, "State %s --> %s", pj_turn_state_name(old_state),
+ pj_turn_state_name(new_state)));
+
+ if (new_state == PJ_TURN_STATE_READY) {
+ pj_turn_session_info info;
+ pj_turn_sock_get_info(relay, &info);
+ pj_memcpy(&g.relay_addr, &info.relay_addr, sizeof(pj_sockaddr));
+ } else if (new_state > PJ_TURN_STATE_READY && g.relay) {
+ PJ_LOG(3,(THIS_FILE, "Relay shutting down.."));
+ g.relay = NULL;
+ }
+}
+
+static pj_bool_t stun_sock_on_status(pj_stun_sock *stun_sock,
+ pj_stun_sock_op op,
+ pj_status_t status)
+{
+ struct peer *peer = (struct peer*) pj_stun_sock_get_user_data(stun_sock);
+
+ if (status == PJ_SUCCESS) {
+ PJ_LOG(4,(THIS_FILE, "peer%d: %s success", peer-g.peer,
+ pj_stun_sock_op_name(op)));
+ } else {
+ char errmsg[PJ_ERR_MSG_SIZE];
+ pj_strerror(status, errmsg, sizeof(errmsg));
+ PJ_LOG(1,(THIS_FILE, "peer%d: %s error: %s", peer-g.peer,
+ pj_stun_sock_op_name(op), errmsg));
+ return PJ_FALSE;
+ }
+
+ if (op==PJ_STUN_SOCK_BINDING_OP || op==PJ_STUN_SOCK_KEEP_ALIVE_OP) {
+ pj_stun_sock_info info;
+ int cmp;
+
+ pj_stun_sock_get_info(stun_sock, &info);
+ cmp = pj_sockaddr_cmp(&info.mapped_addr, &peer->mapped_addr);
+
+ if (cmp) {
+ char straddr[PJ_INET6_ADDRSTRLEN+10];
+
+ pj_sockaddr_cp(&peer->mapped_addr, &info.mapped_addr);
+ pj_sockaddr_print(&peer->mapped_addr, straddr, sizeof(straddr), 3);
+ PJ_LOG(3,(THIS_FILE, "peer%d: STUN mapped address is %s",
+ peer-g.peer, straddr));
+ }
+ }
+
+ return PJ_TRUE;
+}
+
+static pj_bool_t stun_sock_on_rx_data(pj_stun_sock *stun_sock,
+ void *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *src_addr,
+ unsigned addr_len)
+{
+ struct peer *peer = (struct peer*) pj_stun_sock_get_user_data(stun_sock);
+ char straddr[PJ_INET6_ADDRSTRLEN+10];
+
+ ((char*)pkt)[pkt_len] = '\0';
+
+ pj_sockaddr_print(src_addr, straddr, sizeof(straddr), 3);
+ PJ_LOG(3,(THIS_FILE, "peer%d: received %d bytes data from %s: %s",
+ peer-g.peer, pkt_len, straddr, (char*)pkt));
+
+ return PJ_TRUE;
+}
+
+
+static void menu(void)
+{
+ pj_turn_session_info info;
+ char client_state[20], relay_addr[80], peer0_addr[80], peer1_addr[80];
+
+ if (g.relay) {
+ pj_turn_sock_get_info(g.relay, &info);
+ strcpy(client_state, pj_turn_state_name(info.state));
+ if (info.state >= PJ_TURN_STATE_READY)
+ pj_sockaddr_print(&info.relay_addr, relay_addr, sizeof(relay_addr), 3);
+ else
+ strcpy(relay_addr, "0.0.0.0:0");
+ } else {
+ strcpy(client_state, "NULL");
+ strcpy(relay_addr, "0.0.0.0:0");
+ }
+
+ pj_sockaddr_print(&g.peer[0].mapped_addr, peer0_addr, sizeof(peer0_addr), 3);
+ pj_sockaddr_print(&g.peer[1].mapped_addr, peer1_addr, sizeof(peer1_addr), 3);
+
+
+ puts("\n");
+ puts("+=====================================================================+");
+ puts("| CLIENT | PEER-0 |");
+ puts("| | |");
+ printf("| State : %-12s | Address: %-21s |\n",
+ client_state, peer0_addr);
+ printf("| Relay addr: %-21s | |\n",
+ relay_addr);
+ puts("| | 0 Send data to relay address |");
+ puts("| a Allocate relay | |");
+ puts("| p,pp Set permission for peer 0/1 +--------------------------------+");
+ puts("| s,ss Send data to peer 0/1 | PEER-1 |");
+ puts("| b,bb BindChannel to peer 0/1 | |");
+ printf("| x Delete allocation | Address: %-21s |\n",
+ peer1_addr);
+ puts("+------------------------------------+ |");
+ puts("| q Quit d Dump | 1 Send data to relay adderss |");
+ puts("+------------------------------------+--------------------------------+");
+ printf(">>> ");
+ fflush(stdout);
+}
+
+
+static void console_main(void)
+{
+ while (!g.quit) {
+ char input[32];
+ struct peer *peer;
+ pj_status_t status;
+
+ menu();
+
+ if (fgets(input, sizeof(input), stdin) == NULL)
+ break;
+
+ switch (input[0]) {
+ case 'a':
+ create_relay();
+ break;
+ case 'd':
+ pj_pool_factory_dump(&g.cp.factory, PJ_TRUE);
+ break;
+ case 's':
+ if (g.relay == NULL) {
+ puts("Error: no relay");
+ continue;
+ }
+ if (input[1]!='s')
+ peer = &g.peer[0];
+ else
+ peer = &g.peer[1];
+
+ strcpy(input, "Hello from client");
+ status = pj_turn_sock_sendto(g.relay, (const pj_uint8_t*)input,
+ strlen(input)+1,
+ &peer->mapped_addr,
+ pj_sockaddr_get_len(&peer->mapped_addr));
+ if (status != PJ_SUCCESS)
+ my_perror("turn_udp_sendto() failed", status);
+ break;
+ case 'b':
+ if (g.relay == NULL) {
+ puts("Error: no relay");
+ continue;
+ }
+ if (input[1]!='b')
+ peer = &g.peer[0];
+ else
+ peer = &g.peer[1];
+
+ status = pj_turn_sock_bind_channel(g.relay, &peer->mapped_addr,
+ pj_sockaddr_get_len(&peer->mapped_addr));
+ if (status != PJ_SUCCESS)
+ my_perror("turn_udp_bind_channel() failed", status);
+ break;
+ case 'p':
+ if (g.relay == NULL) {
+ puts("Error: no relay");
+ continue;
+ }
+ if (input[1]!='p')
+ peer = &g.peer[0];
+ else
+ peer = &g.peer[1];
+
+ status = pj_turn_sock_set_perm(g.relay, 1, &peer->mapped_addr, 1);
+ if (status != PJ_SUCCESS)
+ my_perror("pj_turn_sock_set_perm() failed", status);
+ break;
+ case 'x':
+ if (g.relay == NULL) {
+ puts("Error: no relay");
+ continue;
+ }
+ destroy_relay();
+ break;
+ case '0':
+ case '1':
+ if (g.relay == NULL) {
+ puts("No relay");
+ break;
+ }
+ peer = &g.peer[input[0]-'0'];
+ sprintf(input, "Hello from peer%d", input[0]-'0');
+ pj_stun_sock_sendto(peer->stun_sock, NULL, input, strlen(input)+1, 0,
+ &g.relay_addr, pj_sockaddr_get_len(&g.relay_addr));
+ break;
+ case 'q':
+ g.quit = PJ_TRUE;
+ break;
+ }
+ }
+}
+
+
+static void usage(void)
+{
+ puts("Usage: pjturn_client TURN-SERVER [OPTIONS]");
+ puts("");
+ puts("where TURN-SERVER is \"host[:port]\"");
+ puts("");
+ puts("and OPTIONS:");
+ puts(" --tcp, -T Use TCP to connect to TURN server");
+ puts(" --realm, -r REALM Set realm of the credential to REALM");
+ puts(" --username, -u UID Set username of the credential to UID");
+ puts(" --password, -p PASSWD Set password of the credential to PASSWD");
+ puts(" --fingerprint, -F Use fingerprint for outgoing requests");
+ puts(" --stun-srv, -S NAME Use this STUN srv instead of TURN for Binding discovery");
+ puts(" --nameserver, -N IP Activate DNS SRV, use this DNS server");
+ puts(" --help, -h");
+}
+
+int main(int argc, char *argv[])
+{
+ struct pj_getopt_option long_options[] = {
+ { "realm", 1, 0, 'r'},
+ { "username", 1, 0, 'u'},
+ { "password", 1, 0, 'p'},
+ { "fingerprint",0, 0, 'F'},
+ { "tcp", 0, 0, 'T'},
+ { "help", 0, 0, 'h'},
+ { "stun-srv", 1, 0, 'S'},
+ { "nameserver", 1, 0, 'N'}
+ };
+ int c, opt_id;
+ char *pos;
+ pj_status_t status;
+
+ while((c=pj_getopt_long(argc,argv, "r:u:p:S:N:hFT", long_options, &opt_id))!=-1) {
+ switch (c) {
+ case 'r':
+ o.realm = pj_optarg;
+ break;
+ case 'u':
+ o.user_name = pj_optarg;
+ break;
+ case 'p':
+ o.password = pj_optarg;
+ break;
+ case 'h':
+ usage();
+ return 0;
+ case 'F':
+ o.use_fingerprint = PJ_TRUE;
+ break;
+ case 'T':
+ o.use_tcp = PJ_TRUE;
+ break;
+ case 'S':
+ o.stun_server = pj_optarg;
+ break;
+ case 'N':
+ o.nameserver = pj_optarg;
+ break;
+ default:
+ printf("Argument \"%s\" is not valid. Use -h to see help",
+ argv[pj_optind]);
+ return 1;
+ }
+ }
+
+ if (pj_optind == argc) {
+ puts("Error: TARGET is needed");
+ usage();
+ return 1;
+ }
+
+ if ((pos=pj_ansi_strchr(argv[pj_optind], ':')) != NULL) {
+ o.srv_addr = argv[pj_optind];
+ *pos = '\0';
+ o.srv_port = pos+1;
+ } else {
+ o.srv_addr = argv[pj_optind];
+ }
+
+ if ((status=init()) != 0)
+ goto on_return;
+
+ //if ((status=create_relay()) != 0)
+ // goto on_return;
+
+ console_main();
+
+on_return:
+ client_shutdown();
+ return status ? 1 : 0;
+}
+
diff --git a/pjnath/src/pjturn-srv/allocation.c b/pjnath/src/pjturn-srv/allocation.c
new file mode 100644
index 0000000..9371635
--- /dev/null
+++ b/pjnath/src/pjturn-srv/allocation.c
@@ -0,0 +1,1377 @@
+/* $Id: allocation.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "turn.h"
+#include "auth.h"
+
+
+#define THIS_FILE "allocation.c"
+
+
+enum {
+ TIMER_ID_NONE,
+ TIMER_ID_TIMEOUT,
+ TIMER_ID_DESTROY
+};
+
+#define DESTROY_DELAY {0, 500}
+#define PEER_TABLE_SIZE 32
+
+#define MAX_CLIENT_BANDWIDTH 128 /* In Kbps */
+#define DEFA_CLIENT_BANDWIDTH 64
+
+#define MIN_LIFETIME 30
+#define MAX_LIFETIME 600
+#define DEF_LIFETIME 300
+
+
+/* Parsed Allocation request. */
+typedef struct alloc_request
+{
+ unsigned tp_type; /* Requested transport */
+ char addr[PJ_INET6_ADDRSTRLEN]; /* Requested IP */
+ unsigned bandwidth; /* Requested bandwidth */
+ unsigned lifetime; /* Lifetime. */
+ unsigned rpp_bits; /* A bits */
+ unsigned rpp_port; /* Requested port */
+} alloc_request;
+
+
+
+/* Prototypes */
+static void destroy_allocation(pj_turn_allocation *alloc);
+static pj_status_t create_relay(pj_turn_srv *srv,
+ pj_turn_allocation *alloc,
+ const pj_stun_msg *msg,
+ const alloc_request *req,
+ pj_turn_relay_res *relay);
+static void destroy_relay(pj_turn_relay_res *relay);
+static void on_rx_from_peer(pj_ioqueue_key_t *key,
+ pj_ioqueue_op_key_t *op_key,
+ pj_ssize_t bytes_read);
+static pj_status_t stun_on_send_msg(pj_stun_session *sess,
+ void *token,
+ const void *pkt,
+ pj_size_t pkt_size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len);
+static pj_status_t stun_on_rx_request(pj_stun_session *sess,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_stun_rx_data *rdata,
+ void *token,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len);
+static pj_status_t stun_on_rx_indication(pj_stun_session *sess,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_stun_msg *msg,
+ void *token,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len);
+
+/* Log allocation error */
+static void alloc_err(pj_turn_allocation *alloc, const char *title,
+ pj_status_t status)
+{
+ char errmsg[PJ_ERR_MSG_SIZE];
+
+ pj_strerror(status, errmsg, sizeof(errmsg));
+ PJ_LOG(4,(alloc->obj_name, "%s for client %s: %s",
+ title, alloc->info, errmsg));
+}
+
+
+/* Parse ALLOCATE request */
+static pj_status_t parse_allocate_req(alloc_request *cfg,
+ pj_stun_session *sess,
+ const pj_stun_rx_data *rdata,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ const pj_stun_msg *req = rdata->msg;
+ pj_stun_bandwidth_attr *attr_bw;
+ pj_stun_req_transport_attr *attr_req_tp;
+ pj_stun_res_token_attr *attr_res_token;
+ pj_stun_lifetime_attr *attr_lifetime;
+
+ pj_bzero(cfg, sizeof(*cfg));
+
+ /* Get BANDWIDTH attribute, if any. */
+ attr_bw = (pj_stun_uint_attr*)
+ pj_stun_msg_find_attr(req, PJ_STUN_ATTR_BANDWIDTH, 0);
+ if (attr_bw) {
+ cfg->bandwidth = attr_bw->value;
+ } else {
+ cfg->bandwidth = DEFA_CLIENT_BANDWIDTH;
+ }
+
+ /* Check if we can satisfy the bandwidth */
+ if (cfg->bandwidth > MAX_CLIENT_BANDWIDTH) {
+ pj_stun_session_respond(sess, rdata,
+ PJ_STUN_SC_ALLOCATION_QUOTA_REACHED,
+ "Invalid bandwidth", NULL, PJ_TRUE,
+ src_addr, src_addr_len);
+ return PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_ALLOCATION_QUOTA_REACHED);
+ }
+
+ /* MUST have REQUESTED-TRANSPORT attribute */
+ attr_req_tp = (pj_stun_uint_attr*)
+ pj_stun_msg_find_attr(req, PJ_STUN_ATTR_REQ_TRANSPORT, 0);
+ if (attr_req_tp == NULL) {
+ pj_stun_session_respond(sess, rdata, PJ_STUN_SC_BAD_REQUEST,
+ "Missing REQUESTED-TRANSPORT attribute",
+ NULL, PJ_TRUE, src_addr, src_addr_len);
+ return PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_BAD_REQUEST);
+ }
+
+ cfg->tp_type = PJ_STUN_GET_RT_PROTO(attr_req_tp->value);
+
+ /* Can only support UDP for now */
+ if (cfg->tp_type != PJ_TURN_TP_UDP) {
+ pj_stun_session_respond(sess, rdata, PJ_STUN_SC_UNSUPP_TRANSPORT_PROTO,
+ NULL, NULL, PJ_TRUE, src_addr, src_addr_len);
+ return PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_UNSUPP_TRANSPORT_PROTO);
+ }
+
+ /* Get RESERVATION-TOKEN attribute, if any */
+ attr_res_token = (pj_stun_res_token_attr*)
+ pj_stun_msg_find_attr(req, PJ_STUN_ATTR_RESERVATION_TOKEN,
+ 0);
+ if (attr_res_token) {
+ /* We don't support RESERVATION-TOKEN for now */
+ pj_stun_session_respond(sess, rdata,
+ PJ_STUN_SC_BAD_REQUEST,
+ "RESERVATION-TOKEN is not supported", NULL,
+ PJ_TRUE, src_addr, src_addr_len);
+ return PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_BAD_REQUEST);
+ }
+
+ /* Get LIFETIME attribute */
+ attr_lifetime = (pj_stun_uint_attr*)
+ pj_stun_msg_find_attr(req, PJ_STUN_ATTR_LIFETIME, 0);
+ if (attr_lifetime) {
+ cfg->lifetime = attr_lifetime->value;
+ if (cfg->lifetime < MIN_LIFETIME) {
+ pj_stun_session_respond(sess, rdata, PJ_STUN_SC_BAD_REQUEST,
+ "LIFETIME too short", NULL,
+ PJ_TRUE, src_addr, src_addr_len);
+ return PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_BAD_REQUEST);
+ }
+ if (cfg->lifetime > MAX_LIFETIME)
+ cfg->lifetime = MAX_LIFETIME;
+ } else {
+ cfg->lifetime = DEF_LIFETIME;
+ }
+
+ return PJ_SUCCESS;
+}
+
+
+/* Respond to ALLOCATE request */
+static pj_status_t send_allocate_response(pj_turn_allocation *alloc,
+ pj_stun_session *srv_sess,
+ pj_turn_transport *transport,
+ const pj_stun_rx_data *rdata)
+{
+ pj_stun_tx_data *tdata;
+ pj_status_t status;
+
+ /* Respond the original ALLOCATE request */
+ status = pj_stun_session_create_res(srv_sess, rdata, 0, NULL, &tdata);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ /* Add XOR-RELAYED-ADDRESS attribute */
+ pj_stun_msg_add_sockaddr_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_XOR_RELAYED_ADDR, PJ_TRUE,
+ &alloc->relay.hkey.addr,
+ pj_sockaddr_get_len(&alloc->relay.hkey.addr));
+
+ /* Add LIFETIME. */
+ pj_stun_msg_add_uint_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_LIFETIME,
+ (unsigned)alloc->relay.lifetime);
+
+ /* Add BANDWIDTH */
+ pj_stun_msg_add_uint_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_BANDWIDTH,
+ alloc->bandwidth);
+
+ /* Add RESERVATION-TOKEN */
+ PJ_TODO(ADD_RESERVATION_TOKEN);
+
+ /* Add XOR-MAPPED-ADDRESS */
+ pj_stun_msg_add_sockaddr_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_XOR_MAPPED_ADDR, PJ_TRUE,
+ &alloc->hkey.clt_addr,
+ pj_sockaddr_get_len(&alloc->hkey.clt_addr));
+
+ /* Send the response */
+ return pj_stun_session_send_msg(srv_sess, transport, PJ_TRUE,
+ PJ_FALSE, &alloc->hkey.clt_addr,
+ pj_sockaddr_get_len(&alloc->hkey.clt_addr),
+ tdata);
+}
+
+
+/*
+ * Init credential for the allocation. We use static credential, meaning that
+ * the user's password must not change during allocation.
+ */
+static pj_status_t init_cred(pj_turn_allocation *alloc, const pj_stun_msg *req)
+{
+ const pj_stun_username_attr *user;
+ const pj_stun_realm_attr *realm;
+ const pj_stun_nonce_attr *nonce;
+ pj_status_t status;
+
+ realm = (const pj_stun_realm_attr*)
+ pj_stun_msg_find_attr(req, PJ_STUN_ATTR_REALM, 0);
+ PJ_ASSERT_RETURN(realm != NULL, PJ_EBUG);
+
+ user = (const pj_stun_username_attr*)
+ pj_stun_msg_find_attr(req, PJ_STUN_ATTR_USERNAME, 0);
+ PJ_ASSERT_RETURN(user != NULL, PJ_EBUG);
+
+ nonce = (const pj_stun_nonce_attr*)
+ pj_stun_msg_find_attr(req, PJ_STUN_ATTR_NONCE, 0);
+ PJ_ASSERT_RETURN(nonce != NULL, PJ_EBUG);
+
+ /* Lookup the password */
+ status = pj_turn_get_password(NULL, NULL, &realm->value,
+ &user->value, alloc->pool,
+ &alloc->cred.data.static_cred.data_type,
+ &alloc->cred.data.static_cred.data);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ /* Save credential */
+ alloc->cred.type = PJ_STUN_AUTH_CRED_STATIC;
+ pj_strdup(alloc->pool, &alloc->cred.data.static_cred.realm, &realm->value);
+ pj_strdup(alloc->pool, &alloc->cred.data.static_cred.username, &user->value);
+ pj_strdup(alloc->pool, &alloc->cred.data.static_cred.nonce, &nonce->value);
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Create new allocation.
+ */
+PJ_DEF(pj_status_t) pj_turn_allocation_create(pj_turn_transport *transport,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len,
+ const pj_stun_rx_data *rdata,
+ pj_stun_session *srv_sess,
+ pj_turn_allocation **p_alloc)
+{
+ pj_turn_srv *srv = transport->listener->server;
+ const pj_stun_msg *msg = rdata->msg;
+ pj_pool_t *pool;
+ alloc_request req;
+ pj_turn_allocation *alloc;
+ pj_stun_session_cb sess_cb;
+ char str_tmp[80];
+ pj_status_t status;
+
+ /* Parse ALLOCATE request */
+ status = parse_allocate_req(&req, srv_sess, rdata, src_addr, src_addr_len);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ pool = pj_pool_create(srv->core.pf, "alloc%p", 1000, 1000, NULL);
+
+ /* Init allocation structure */
+ alloc = PJ_POOL_ZALLOC_T(pool, pj_turn_allocation);
+ alloc->pool = pool;
+ alloc->obj_name = pool->obj_name;
+ alloc->relay.tp.sock = PJ_INVALID_SOCKET;
+ alloc->server = transport->listener->server;
+
+ alloc->bandwidth = req.bandwidth;
+
+ /* Set transport */
+ alloc->transport = transport;
+ pj_turn_transport_add_ref(transport, alloc);
+
+ alloc->hkey.tp_type = transport->listener->tp_type;
+ pj_memcpy(&alloc->hkey.clt_addr, src_addr, src_addr_len);
+
+ status = pj_lock_create_recursive_mutex(pool, alloc->obj_name,
+ &alloc->lock);
+ if (status != PJ_SUCCESS) {
+ goto on_error;
+ }
+
+ /* Create peer hash table */
+ alloc->peer_table = pj_hash_create(pool, PEER_TABLE_SIZE);
+
+ /* Create channel hash table */
+ alloc->ch_table = pj_hash_create(pool, PEER_TABLE_SIZE);
+
+ /* Print info */
+ pj_ansi_strcpy(alloc->info,
+ pj_turn_tp_type_name(transport->listener->tp_type));
+ alloc->info[3] = ':';
+ pj_sockaddr_print(src_addr, alloc->info+4, sizeof(alloc->info)-4, 3);
+
+ /* Create STUN session to handle STUN communication with client */
+ pj_bzero(&sess_cb, sizeof(sess_cb));
+ sess_cb.on_send_msg = &stun_on_send_msg;
+ sess_cb.on_rx_request = &stun_on_rx_request;
+ sess_cb.on_rx_indication = &stun_on_rx_indication;
+ status = pj_stun_session_create(&srv->core.stun_cfg, alloc->obj_name,
+ &sess_cb, PJ_FALSE, &alloc->sess);
+ if (status != PJ_SUCCESS) {
+ goto on_error;
+ }
+
+ /* Attach to STUN session */
+ pj_stun_session_set_user_data(alloc->sess, alloc);
+
+ /* Init authentication credential */
+ status = init_cred(alloc, msg);
+ if (status != PJ_SUCCESS) {
+ goto on_error;
+ }
+
+ /* Attach authentication credential to STUN session */
+ pj_stun_session_set_credential(alloc->sess, PJ_STUN_AUTH_LONG_TERM,
+ &alloc->cred);
+
+ /* Create the relay resource */
+ status = create_relay(srv, alloc, msg, &req, &alloc->relay);
+ if (status != PJ_SUCCESS) {
+ goto on_error;
+ }
+
+ /* Register this allocation */
+ pj_turn_srv_register_allocation(srv, alloc);
+
+ /* Respond to ALLOCATE request */
+ status = send_allocate_response(alloc, srv_sess, transport, rdata);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Done */
+ pj_sockaddr_print(&alloc->relay.hkey.addr, str_tmp,
+ sizeof(str_tmp), 3);
+ PJ_LOG(4,(alloc->obj_name, "Client %s created, relay addr=%s:%s",
+ alloc->info, pj_turn_tp_type_name(req.tp_type), str_tmp));
+
+ /* Success */
+ *p_alloc = alloc;
+ return PJ_SUCCESS;
+
+on_error:
+ /* Send reply to the ALLOCATE request */
+ pj_strerror(status, str_tmp, sizeof(str_tmp));
+ pj_stun_session_respond(srv_sess, rdata, PJ_STUN_SC_BAD_REQUEST, str_tmp,
+ transport, PJ_TRUE, src_addr, src_addr_len);
+
+ /* Cleanup */
+ destroy_allocation(alloc);
+ return status;
+}
+
+
+/* Destroy relay resource */
+static void destroy_relay(pj_turn_relay_res *relay)
+{
+ if (relay->timer.id) {
+ pj_timer_heap_cancel(relay->allocation->server->core.timer_heap,
+ &relay->timer);
+ relay->timer.id = PJ_FALSE;
+ }
+
+ if (relay->tp.key) {
+ pj_ioqueue_unregister(relay->tp.key);
+ relay->tp.key = NULL;
+ relay->tp.sock = PJ_INVALID_SOCKET;
+ } else if (relay->tp.sock != PJ_INVALID_SOCKET) {
+ pj_sock_close(relay->tp.sock);
+ relay->tp.sock = PJ_INVALID_SOCKET;
+ }
+
+ /* Mark as shutdown */
+ relay->lifetime = 0;
+}
+
+
+/*
+ * Really destroy allocation.
+ */
+static void destroy_allocation(pj_turn_allocation *alloc)
+{
+ pj_pool_t *pool;
+
+ /* Unregister this allocation */
+ pj_turn_srv_unregister_allocation(alloc->server, alloc);
+
+ /* Destroy relay */
+ destroy_relay(&alloc->relay);
+
+ /* Must lock only after destroying relay otherwise deadlock */
+ if (alloc->lock) {
+ pj_lock_acquire(alloc->lock);
+ }
+
+ /* Unreference transport */
+ if (alloc->transport) {
+ pj_turn_transport_dec_ref(alloc->transport, alloc);
+ alloc->transport = NULL;
+ }
+
+ /* Destroy STUN session */
+ if (alloc->sess) {
+ pj_stun_session_destroy(alloc->sess);
+ alloc->sess = NULL;
+ }
+
+ /* Destroy lock */
+ if (alloc->lock) {
+ pj_lock_release(alloc->lock);
+ pj_lock_destroy(alloc->lock);
+ alloc->lock = NULL;
+ }
+
+ /* Destroy pool */
+ pool = alloc->pool;
+ if (pool) {
+ alloc->pool = NULL;
+ pj_pool_release(pool);
+ }
+}
+
+
+PJ_DECL(void) pj_turn_allocation_destroy(pj_turn_allocation *alloc)
+{
+ destroy_allocation(alloc);
+}
+
+
+/*
+ * Handle transport closure.
+ */
+PJ_DEF(void) pj_turn_allocation_on_transport_closed( pj_turn_allocation *alloc,
+ pj_turn_transport *tp)
+{
+ PJ_LOG(5,(alloc->obj_name, "Transport %s unexpectedly closed, destroying "
+ "allocation %s", tp->info, alloc->info));
+ pj_turn_transport_dec_ref(tp, alloc);
+ alloc->transport = NULL;
+ destroy_allocation(alloc);
+}
+
+
+/* Initiate shutdown sequence for this allocation and start destroy timer.
+ * Once allocation is marked as shutting down, any packets will be
+ * rejected/discarded
+ */
+static void alloc_shutdown(pj_turn_allocation *alloc)
+{
+ pj_time_val destroy_delay = DESTROY_DELAY;
+
+ /* Work with existing schedule */
+ if (alloc->relay.timer.id == TIMER_ID_TIMEOUT) {
+ /* Cancel existing shutdown timer */
+ pj_timer_heap_cancel(alloc->server->core.timer_heap,
+ &alloc->relay.timer);
+ alloc->relay.timer.id = TIMER_ID_NONE;
+
+ } else if (alloc->relay.timer.id == TIMER_ID_DESTROY) {
+ /* We've been scheduled to be destroyed, ignore this
+ * shutdown request.
+ */
+ return;
+ }
+
+ pj_assert(alloc->relay.timer.id == TIMER_ID_NONE);
+
+ /* Shutdown relay socket */
+ destroy_relay(&alloc->relay);
+
+ /* Don't unregister from hash table because we still need to
+ * handle REFRESH retransmission.
+ */
+
+ /* Schedule destroy timer */
+ alloc->relay.timer.id = TIMER_ID_DESTROY;
+ pj_timer_heap_schedule(alloc->server->core.timer_heap,
+ &alloc->relay.timer, &destroy_delay);
+}
+
+
+/* Reschedule timeout using current lifetime setting */
+static pj_status_t resched_timeout(pj_turn_allocation *alloc)
+{
+ pj_time_val delay;
+ pj_status_t status;
+
+ pj_gettimeofday(&alloc->relay.expiry);
+ alloc->relay.expiry.sec += alloc->relay.lifetime;
+
+ pj_assert(alloc->relay.timer.id != TIMER_ID_DESTROY);
+ if (alloc->relay.timer.id != 0) {
+ pj_timer_heap_cancel(alloc->server->core.timer_heap,
+ &alloc->relay.timer);
+ alloc->relay.timer.id = TIMER_ID_NONE;
+ }
+
+ delay.sec = alloc->relay.lifetime;
+ delay.msec = 0;
+
+ alloc->relay.timer.id = TIMER_ID_TIMEOUT;
+ status = pj_timer_heap_schedule(alloc->server->core.timer_heap,
+ &alloc->relay.timer, &delay);
+ if (status != PJ_SUCCESS) {
+ alloc->relay.timer.id = TIMER_ID_NONE;
+ return status;
+ }
+
+ return PJ_SUCCESS;
+}
+
+
+/* Timer timeout callback */
+static void relay_timeout_cb(pj_timer_heap_t *heap, pj_timer_entry *e)
+{
+ pj_turn_relay_res *rel;
+ pj_turn_allocation *alloc;
+
+ PJ_UNUSED_ARG(heap);
+
+ rel = (pj_turn_relay_res*) e->user_data;
+ alloc = rel->allocation;
+
+ if (e->id == TIMER_ID_TIMEOUT) {
+
+ e->id = TIMER_ID_NONE;
+
+ PJ_LOG(4,(alloc->obj_name,
+ "Client %s refresh timed-out, shutting down..",
+ alloc->info));
+
+ alloc_shutdown(alloc);
+
+ } else if (e->id == TIMER_ID_DESTROY) {
+ e->id = TIMER_ID_NONE;
+
+ PJ_LOG(4,(alloc->obj_name, "Client %s destroying..",
+ alloc->info));
+
+ destroy_allocation(alloc);
+ }
+}
+
+
+/*
+ * Create relay.
+ */
+static pj_status_t create_relay(pj_turn_srv *srv,
+ pj_turn_allocation *alloc,
+ const pj_stun_msg *msg,
+ const alloc_request *req,
+ pj_turn_relay_res *relay)
+{
+ enum { RETRY = 40 };
+ pj_pool_t *pool = alloc->pool;
+ int retry, retry_max, sock_type;
+ pj_ioqueue_callback icb;
+ int af, namelen;
+ pj_stun_string_attr *sa;
+ pj_status_t status;
+
+ pj_bzero(relay, sizeof(*relay));
+
+ relay->allocation = alloc;
+ relay->tp.sock = PJ_INVALID_SOCKET;
+
+ /* TODO: get the requested address family from somewhere */
+ af = alloc->transport->listener->addr.addr.sa_family;
+
+ /* Save realm */
+ sa = (pj_stun_string_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_REALM, 0);
+ PJ_ASSERT_RETURN(sa, PJ_EINVALIDOP);
+ pj_strdup(pool, &relay->realm, &sa->value);
+
+ /* Save username */
+ sa = (pj_stun_string_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_USERNAME, 0);
+ PJ_ASSERT_RETURN(sa, PJ_EINVALIDOP);
+ pj_strdup(pool, &relay->user, &sa->value);
+
+ /* Lifetime and timeout */
+ relay->lifetime = req->lifetime;
+ pj_timer_entry_init(&relay->timer, TIMER_ID_NONE, relay,
+ &relay_timeout_cb);
+ resched_timeout(alloc);
+
+ /* Transport type */
+ relay->hkey.tp_type = req->tp_type;
+
+ /* Create the socket */
+ if (req->tp_type == PJ_TURN_TP_UDP) {
+ sock_type = pj_SOCK_DGRAM();
+ } else if (req->tp_type == PJ_TURN_TP_TCP) {
+ sock_type = pj_SOCK_STREAM();
+ } else {
+ pj_assert(!"Unknown transport");
+ return PJ_EINVALIDOP;
+ }
+
+ status = pj_sock_socket(af, sock_type, 0, &relay->tp.sock);
+ if (status != PJ_SUCCESS) {
+ pj_bzero(relay, sizeof(*relay));
+ return status;
+ }
+
+ /* Find suitable port for this allocation */
+ if (req->rpp_port) {
+ retry_max = 1;
+ } else {
+ retry_max = RETRY;
+ }
+
+ for (retry=0; retry<retry_max; ++retry) {
+ pj_uint16_t port;
+ pj_sockaddr bound_addr;
+
+ pj_lock_acquire(srv->core.lock);
+
+ if (req->rpp_port) {
+ port = (pj_uint16_t) req->rpp_port;
+ } else if (req->tp_type == PJ_TURN_TP_UDP) {
+ port = (pj_uint16_t) srv->ports.next_udp++;
+ if (srv->ports.next_udp > srv->ports.max_udp)
+ srv->ports.next_udp = srv->ports.min_udp;
+ } else if (req->tp_type == PJ_TURN_TP_TCP) {
+ port = (pj_uint16_t) srv->ports.next_tcp++;
+ if (srv->ports.next_tcp > srv->ports.max_tcp)
+ srv->ports.next_tcp = srv->ports.min_tcp;
+ } else {
+ pj_assert(!"Invalid transport");
+ port = 0;
+ }
+
+ pj_lock_release(srv->core.lock);
+
+ pj_sockaddr_init(af, &bound_addr, NULL, port);
+
+ status = pj_sock_bind(relay->tp.sock, &bound_addr,
+ pj_sockaddr_get_len(&bound_addr));
+ if (status == PJ_SUCCESS)
+ break;
+ }
+
+ if (status != PJ_SUCCESS) {
+ /* Unable to allocate port */
+ PJ_LOG(4,(THIS_FILE, "Unable to allocate relay, giving up: err %d",
+ status));
+ pj_sock_close(relay->tp.sock);
+ relay->tp.sock = PJ_INVALID_SOCKET;
+ return status;
+ }
+
+ /* Init relay key */
+ namelen = sizeof(relay->hkey.addr);
+ status = pj_sock_getsockname(relay->tp.sock, &relay->hkey.addr, &namelen);
+ if (status != PJ_SUCCESS) {
+ PJ_LOG(4,(THIS_FILE, "pj_sock_getsockname() failed: err %d",
+ status));
+ pj_sock_close(relay->tp.sock);
+ relay->tp.sock = PJ_INVALID_SOCKET;
+ return status;
+ }
+ if (!pj_sockaddr_has_addr(&relay->hkey.addr)) {
+ pj_sockaddr_copy_addr(&relay->hkey.addr,
+ &alloc->transport->listener->addr);
+ }
+ if (!pj_sockaddr_has_addr(&relay->hkey.addr)) {
+ pj_sockaddr tmp_addr;
+ pj_gethostip(af, &tmp_addr);
+ pj_sockaddr_copy_addr(&relay->hkey.addr, &tmp_addr);
+ }
+
+ /* Init ioqueue */
+ pj_bzero(&icb, sizeof(icb));
+ icb.on_read_complete = &on_rx_from_peer;
+
+ status = pj_ioqueue_register_sock(pool, srv->core.ioqueue, relay->tp.sock,
+ relay, &icb, &relay->tp.key);
+ if (status != PJ_SUCCESS) {
+ PJ_LOG(4,(THIS_FILE, "pj_ioqueue_register_sock() failed: err %d",
+ status));
+ pj_sock_close(relay->tp.sock);
+ relay->tp.sock = PJ_INVALID_SOCKET;
+ return status;
+ }
+
+ /* Kick off pending read operation */
+ pj_ioqueue_op_key_init(&relay->tp.read_key, sizeof(relay->tp.read_key));
+ on_rx_from_peer(relay->tp.key, &relay->tp.read_key, 0);
+
+ /* Done */
+ return PJ_SUCCESS;
+}
+
+/* Create and send error response */
+static void send_reply_err(pj_turn_allocation *alloc,
+ const pj_stun_rx_data *rdata,
+ pj_bool_t cache,
+ int code, const char *errmsg)
+{
+ pj_status_t status;
+
+ status = pj_stun_session_respond(alloc->sess, rdata, code, errmsg, NULL,
+ cache, &alloc->hkey.clt_addr,
+ pj_sockaddr_get_len(&alloc->hkey.clt_addr.addr));
+ if (status != PJ_SUCCESS) {
+ alloc_err(alloc, "Error sending STUN error response", status);
+ return;
+ }
+}
+
+/* Create and send successful response */
+static void send_reply_ok(pj_turn_allocation *alloc,
+ const pj_stun_rx_data *rdata)
+{
+ pj_status_t status;
+ unsigned interval;
+ pj_stun_tx_data *tdata;
+
+ status = pj_stun_session_create_res(alloc->sess, rdata, 0, NULL, &tdata);
+ if (status != PJ_SUCCESS) {
+ alloc_err(alloc, "Error creating STUN success response", status);
+ return;
+ }
+
+ /* Calculate time to expiration */
+ if (alloc->relay.lifetime != 0) {
+ pj_time_val now;
+ pj_gettimeofday(&now);
+ interval = alloc->relay.expiry.sec - now.sec;
+ } else {
+ interval = 0;
+ }
+
+ /* Add LIFETIME if this is not ChannelBind. */
+ if (PJ_STUN_GET_METHOD(tdata->msg->hdr.type)!=PJ_STUN_CHANNEL_BIND_METHOD){
+ pj_stun_msg_add_uint_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_LIFETIME, interval);
+
+ /* Add BANDWIDTH if lifetime is not zero */
+ if (interval != 0) {
+ pj_stun_msg_add_uint_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_BANDWIDTH,
+ alloc->bandwidth);
+ }
+ }
+
+ status = pj_stun_session_send_msg(alloc->sess, NULL, PJ_TRUE,
+ PJ_FALSE, &alloc->hkey.clt_addr,
+ pj_sockaddr_get_len(&alloc->hkey.clt_addr),
+ tdata);
+ if (status != PJ_SUCCESS) {
+ alloc_err(alloc, "Error sending STUN success response", status);
+ return;
+ }
+}
+
+
+/* Create new permission */
+static pj_turn_permission *create_permission(pj_turn_allocation *alloc,
+ const pj_sockaddr_t *peer_addr,
+ unsigned addr_len)
+{
+ pj_turn_permission *perm;
+
+ perm = PJ_POOL_ZALLOC_T(alloc->pool, pj_turn_permission);
+ pj_memcpy(&perm->hkey.peer_addr, peer_addr, addr_len);
+
+ perm->allocation = alloc;
+ perm->channel = PJ_TURN_INVALID_CHANNEL;
+
+ pj_gettimeofday(&perm->expiry);
+ perm->expiry.sec += PJ_TURN_PERM_TIMEOUT;
+
+ /* Register to hash table (only the address part!) */
+ pj_hash_set(alloc->pool, alloc->peer_table,
+ pj_sockaddr_get_addr(&perm->hkey.peer_addr),
+ pj_sockaddr_get_addr_len(&perm->hkey.peer_addr), 0, perm);
+
+ return perm;
+}
+
+/* Check if a permission isn't expired. Return NULL if expired. */
+static pj_turn_permission *check_permission_expiry(pj_turn_permission *perm)
+{
+ pj_turn_allocation *alloc = perm->allocation;
+ pj_time_val now;
+
+ pj_gettimeofday(&now);
+ if (PJ_TIME_VAL_GT(perm->expiry, now)) {
+ /* Permission has not expired */
+ return perm;
+ }
+
+ /* Remove from permission hash table */
+ pj_hash_set(NULL, alloc->peer_table,
+ pj_sockaddr_get_addr(&perm->hkey.peer_addr),
+ pj_sockaddr_get_addr_len(&perm->hkey.peer_addr), 0, NULL);
+
+ /* Remove from channel hash table, if assigned a channel number */
+ if (perm->channel != PJ_TURN_INVALID_CHANNEL) {
+ pj_hash_set(NULL, alloc->ch_table, &perm->channel,
+ sizeof(perm->channel), 0, NULL);
+ }
+
+ return NULL;
+}
+
+/* Lookup permission in hash table by the peer address */
+static pj_turn_permission*
+lookup_permission_by_addr(pj_turn_allocation *alloc,
+ const pj_sockaddr_t *peer_addr,
+ unsigned addr_len)
+{
+ pj_turn_permission *perm;
+
+ PJ_UNUSED_ARG(addr_len);
+
+ /* Lookup in peer hash table */
+ perm = (pj_turn_permission*)
+ pj_hash_get(alloc->peer_table,
+ pj_sockaddr_get_addr(peer_addr),
+ pj_sockaddr_get_addr_len(peer_addr),
+ NULL);
+ return perm ? check_permission_expiry(perm) : NULL;
+}
+
+/* Lookup permission in hash table by the channel number */
+static pj_turn_permission*
+lookup_permission_by_chnum(pj_turn_allocation *alloc,
+ unsigned chnum)
+{
+ pj_uint16_t chnum16 = (pj_uint16_t)chnum;
+ pj_turn_permission *perm;
+
+ /* Lookup in peer hash table */
+ perm = (pj_turn_permission*) pj_hash_get(alloc->ch_table, &chnum16,
+ sizeof(chnum16), NULL);
+ return perm ? check_permission_expiry(perm) : NULL;
+}
+
+/* Update permission because of data from client to peer.
+ * Return PJ_TRUE is permission is found.
+ */
+static pj_bool_t refresh_permission(pj_turn_permission *perm)
+{
+ pj_gettimeofday(&perm->expiry);
+ if (perm->channel == PJ_TURN_INVALID_CHANNEL)
+ perm->expiry.sec += PJ_TURN_PERM_TIMEOUT;
+ else
+ perm->expiry.sec += PJ_TURN_CHANNEL_TIMEOUT;
+ return PJ_TRUE;
+}
+
+/*
+ * Handle incoming packet from client. This would have been called by
+ * server upon receiving packet from a listener.
+ */
+PJ_DEF(void) pj_turn_allocation_on_rx_client_pkt(pj_turn_allocation *alloc,
+ pj_turn_pkt *pkt)
+{
+ pj_bool_t is_stun;
+ pj_status_t status;
+
+ /* Lock this allocation */
+ pj_lock_acquire(alloc->lock);
+
+ /* Quickly check if this is STUN message */
+ is_stun = ((*((pj_uint8_t*)pkt->pkt) & 0xC0) == 0);
+
+ if (is_stun) {
+ /*
+ * This could be an incoming STUN requests or indications.
+ * Pass this through to the STUN session, which will call
+ * our stun_on_rx_request() or stun_on_rx_indication()
+ * callbacks.
+ *
+ * Note: currently it is necessary to specify the
+ * PJ_STUN_NO_FINGERPRINT_CHECK otherwise the FINGERPRINT
+ * attribute inside STUN Send Indication message will mess up
+ * with fingerprint checking.
+ */
+ unsigned options = PJ_STUN_CHECK_PACKET | PJ_STUN_NO_FINGERPRINT_CHECK;
+ pj_size_t parsed_len = 0;
+
+ if (pkt->transport->listener->tp_type == PJ_TURN_TP_UDP)
+ options |= PJ_STUN_IS_DATAGRAM;
+
+ status = pj_stun_session_on_rx_pkt(alloc->sess, pkt->pkt, pkt->len,
+ options, NULL, &parsed_len,
+ &pkt->src.clt_addr,
+ pkt->src_addr_len);
+
+ if (pkt->transport->listener->tp_type == PJ_TURN_TP_UDP) {
+ pkt->len = 0;
+ } else if (parsed_len > 0) {
+ if (parsed_len == pkt->len) {
+ pkt->len = 0;
+ } else {
+ pj_memmove(pkt->pkt, pkt->pkt+parsed_len,
+ pkt->len - parsed_len);
+ pkt->len -= parsed_len;
+ }
+ }
+
+ if (status != PJ_SUCCESS) {
+ alloc_err(alloc, "Error handling STUN packet", status);
+ goto on_return;
+ }
+
+ } else {
+ /*
+ * This is not a STUN packet, must be ChannelData packet.
+ */
+ pj_turn_channel_data *cd = (pj_turn_channel_data*)pkt->pkt;
+ pj_turn_permission *perm;
+ pj_ssize_t len;
+
+ pj_assert(sizeof(*cd)==4);
+
+ /* For UDP check the packet length */
+ if (alloc->transport->listener->tp_type == PJ_TURN_TP_UDP) {
+ if (pkt->len < pj_ntohs(cd->length)+sizeof(*cd)) {
+ PJ_LOG(4,(alloc->obj_name,
+ "ChannelData from %s discarded: UDP size error",
+ alloc->info));
+ goto on_return;
+ }
+ } else {
+ pj_assert(!"Unsupported transport");
+ goto on_return;
+ }
+
+ perm = lookup_permission_by_chnum(alloc, pj_ntohs(cd->ch_number));
+ if (!perm) {
+ /* Discard */
+ PJ_LOG(4,(alloc->obj_name,
+ "ChannelData from %s discarded: ch#0x%x not found",
+ alloc->info, pj_ntohs(cd->ch_number)));
+ goto on_return;
+ }
+
+ /* Relay the data */
+ len = pj_ntohs(cd->length);
+ pj_sock_sendto(alloc->relay.tp.sock, cd+1, &len, 0,
+ &perm->hkey.peer_addr,
+ pj_sockaddr_get_len(&perm->hkey.peer_addr));
+
+ /* Refresh permission */
+ refresh_permission(perm);
+ }
+
+on_return:
+ /* Release lock */
+ pj_lock_release(alloc->lock);
+}
+
+
+/*
+ * Handle incoming packet from peer. This function is called by
+ * on_rx_from_peer().
+ */
+static void handle_peer_pkt(pj_turn_allocation *alloc,
+ pj_turn_relay_res *rel,
+ char *pkt, pj_size_t len,
+ const pj_sockaddr *src_addr)
+{
+ pj_turn_permission *perm;
+
+ /* Lookup permission */
+ perm = lookup_permission_by_addr(alloc, src_addr,
+ pj_sockaddr_get_len(src_addr));
+ if (perm == NULL) {
+ /* No permission, discard data */
+ return;
+ }
+
+ /* Send Data Indication or ChannelData, depends on whether
+ * this permission is attached to a channel number.
+ */
+ if (perm->channel != PJ_TURN_INVALID_CHANNEL) {
+ /* Send ChannelData */
+ pj_turn_channel_data *cd = (pj_turn_channel_data*)rel->tp.tx_pkt;
+
+ if (len > PJ_TURN_MAX_PKT_LEN) {
+ char peer_addr[80];
+ pj_sockaddr_print(src_addr, peer_addr, sizeof(peer_addr), 3);
+ PJ_LOG(4,(alloc->obj_name, "Client %s: discarded data from %s "
+ "because it's too long (%d bytes)",
+ alloc->info, peer_addr, len));
+ return;
+ }
+
+ /* Init header */
+ cd->ch_number = pj_htons(perm->channel);
+ cd->length = pj_htons((pj_uint16_t)len);
+
+ /* Copy data */
+ pj_memcpy(rel->tp.tx_pkt+sizeof(pj_turn_channel_data), pkt, len);
+
+ /* Send to client */
+ alloc->transport->sendto(alloc->transport, rel->tp.tx_pkt,
+ len+sizeof(pj_turn_channel_data), 0,
+ &alloc->hkey.clt_addr,
+ pj_sockaddr_get_len(&alloc->hkey.clt_addr));
+ } else {
+ /* Send Data Indication */
+ pj_stun_tx_data *tdata;
+ pj_status_t status;
+
+ status = pj_stun_session_create_ind(alloc->sess,
+ PJ_STUN_DATA_INDICATION, &tdata);
+ if (status != PJ_SUCCESS) {
+ alloc_err(alloc, "Error creating Data indication", status);
+ return;
+ }
+
+ pj_stun_msg_add_sockaddr_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_XOR_PEER_ADDR, PJ_TRUE,
+ src_addr, pj_sockaddr_get_len(src_addr));
+ pj_stun_msg_add_binary_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_DATA,
+ (const pj_uint8_t*)pkt, len);
+
+ pj_stun_session_send_msg(alloc->sess, NULL, PJ_FALSE,
+ PJ_FALSE, &alloc->hkey.clt_addr,
+ pj_sockaddr_get_len(&alloc->hkey.clt_addr),
+ tdata);
+ }
+}
+
+/*
+ * ioqueue notification on RX packets from the relay socket.
+ */
+static void on_rx_from_peer(pj_ioqueue_key_t *key,
+ pj_ioqueue_op_key_t *op_key,
+ pj_ssize_t bytes_read)
+{
+ pj_turn_relay_res *rel;
+ pj_status_t status;
+
+ rel = (pj_turn_relay_res*) pj_ioqueue_get_user_data(key);
+
+ /* Lock the allocation */
+ pj_lock_acquire(rel->allocation->lock);
+
+ do {
+ if (bytes_read > 0) {
+ handle_peer_pkt(rel->allocation, rel, rel->tp.rx_pkt,
+ bytes_read, &rel->tp.src_addr);
+ }
+
+ /* Read next packet */
+ bytes_read = sizeof(rel->tp.rx_pkt);
+ rel->tp.src_addr_len = sizeof(rel->tp.src_addr);
+ status = pj_ioqueue_recvfrom(key, op_key,
+ rel->tp.rx_pkt, &bytes_read, 0,
+ &rel->tp.src_addr,
+ &rel->tp.src_addr_len);
+
+ if (status != PJ_EPENDING && status != PJ_SUCCESS)
+ bytes_read = -status;
+
+ } while (status != PJ_EPENDING && status != PJ_ECANCELLED);
+
+ /* Release allocation lock */
+ pj_lock_release(rel->allocation->lock);
+}
+
+/*
+ * Callback notification from STUN session when it wants to send
+ * a STUN message towards the client.
+ */
+static pj_status_t stun_on_send_msg(pj_stun_session *sess,
+ void *token,
+ const void *pkt,
+ pj_size_t pkt_size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len)
+{
+ pj_turn_allocation *alloc;
+
+ PJ_UNUSED_ARG(token);
+
+ alloc = (pj_turn_allocation*) pj_stun_session_get_user_data(sess);
+
+ return alloc->transport->sendto(alloc->transport, pkt, pkt_size, 0,
+ dst_addr, addr_len);
+}
+
+/*
+ * Callback notification from STUN session when it receives STUN
+ * requests. This callback was trigger by STUN incoming message
+ * processing in pj_turn_allocation_on_rx_client_pkt().
+ */
+static pj_status_t stun_on_rx_request(pj_stun_session *sess,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_stun_rx_data *rdata,
+ void *token,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ const pj_stun_msg *msg = rdata->msg;
+ pj_turn_allocation *alloc;
+
+ PJ_UNUSED_ARG(pkt);
+ PJ_UNUSED_ARG(pkt_len);
+ PJ_UNUSED_ARG(token);
+ PJ_UNUSED_ARG(src_addr);
+ PJ_UNUSED_ARG(src_addr_len);
+
+ alloc = (pj_turn_allocation*) pj_stun_session_get_user_data(sess);
+
+ /* Refuse to serve any request if we've been shutdown */
+ if (alloc->relay.lifetime == 0) {
+ /* Reject with 437 if we're shutting down */
+ send_reply_err(alloc, rdata, PJ_TRUE,
+ PJ_STUN_SC_ALLOCATION_MISMATCH, NULL);
+ return PJ_SUCCESS;
+ }
+
+ if (msg->hdr.type == PJ_STUN_REFRESH_REQUEST) {
+ /*
+ * Handle REFRESH request
+ */
+ pj_stun_lifetime_attr *lifetime;
+ pj_stun_bandwidth_attr *bandwidth;
+
+ /* Get LIFETIME attribute */
+ lifetime = (pj_stun_lifetime_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_LIFETIME, 0);
+
+ /* Get BANDWIDTH attribute */
+ bandwidth = (pj_stun_bandwidth_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_BANDWIDTH, 0);
+
+ if (lifetime && lifetime->value==0) {
+ /*
+ * This is deallocation request.
+ */
+ alloc->relay.lifetime = 0;
+
+ /* Respond first */
+ send_reply_ok(alloc, rdata);
+
+ /* Shutdown allocation */
+ PJ_LOG(4,(alloc->obj_name,
+ "Client %s request to dealloc, shutting down",
+ alloc->info));
+
+ alloc_shutdown(alloc);
+
+ } else {
+ /*
+ * This is a refresh request.
+ */
+
+ /* Update lifetime */
+ if (lifetime) {
+ alloc->relay.lifetime = lifetime->value;
+ }
+
+ /* Update bandwidth */
+ // TODO:
+
+ /* Update expiration timer */
+ resched_timeout(alloc);
+
+ /* Send reply */
+ send_reply_ok(alloc, rdata);
+ }
+
+ } else if (msg->hdr.type == PJ_STUN_CHANNEL_BIND_REQUEST) {
+ /*
+ * ChannelBind request.
+ */
+ pj_stun_channel_number_attr *ch_attr;
+ pj_stun_xor_peer_addr_attr *peer_attr;
+ pj_turn_permission *p1, *p2;
+
+ ch_attr = (pj_stun_channel_number_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_CHANNEL_NUMBER, 0);
+ peer_attr = (pj_stun_xor_peer_addr_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_XOR_PEER_ADDR, 0);
+
+ if (!ch_attr || !peer_attr) {
+ send_reply_err(alloc, rdata, PJ_TRUE,
+ PJ_STUN_SC_BAD_REQUEST, NULL);
+ return PJ_SUCCESS;
+ }
+
+ /* Find permission with the channel number */
+ p1 = lookup_permission_by_chnum(alloc, PJ_STUN_GET_CH_NB(ch_attr->value));
+
+ /* If permission is found, this is supposed to be a channel bind
+ * refresh. Make sure it's for the same peer.
+ */
+ if (p1) {
+ if (pj_sockaddr_cmp(&p1->hkey.peer_addr, &peer_attr->sockaddr)) {
+ /* Address mismatch. Send 400 */
+ send_reply_err(alloc, rdata, PJ_TRUE,
+ PJ_STUN_SC_BAD_REQUEST,
+ "Peer address mismatch");
+ return PJ_SUCCESS;
+ }
+
+ /* Refresh permission */
+ refresh_permission(p1);
+
+ /* Send response */
+ send_reply_ok(alloc, rdata);
+
+ /* Done */
+ return PJ_SUCCESS;
+ }
+
+ /* If permission is not found, create a new one. Make sure the peer
+ * has not alreadyy assigned with a channel number.
+ */
+ p2 = lookup_permission_by_addr(alloc, &peer_attr->sockaddr,
+ pj_sockaddr_get_len(&peer_attr->sockaddr));
+ if (p2 && p2->channel != PJ_TURN_INVALID_CHANNEL) {
+ send_reply_err(alloc, rdata, PJ_TRUE, PJ_STUN_SC_BAD_REQUEST,
+ "Peer address already assigned a channel number");
+ return PJ_SUCCESS;
+ }
+
+ /* Create permission if it doesn't exist */
+ if (!p2) {
+ p2 = create_permission(alloc, &peer_attr->sockaddr,
+ pj_sockaddr_get_len(&peer_attr->sockaddr));
+ if (!p2)
+ return PJ_SUCCESS;
+ }
+
+ /* Assign channel number to permission */
+ p2->channel = PJ_STUN_GET_CH_NB(ch_attr->value);
+
+ /* Register to hash table */
+ pj_assert(sizeof(p2->channel==2));
+ pj_hash_set(alloc->pool, alloc->ch_table, &p2->channel,
+ sizeof(p2->channel), 0, p2);
+
+ /* Update */
+ refresh_permission(p2);
+
+ /* Reply */
+ send_reply_ok(alloc, rdata);
+
+ return PJ_SUCCESS;
+
+ } else if (msg->hdr.type == PJ_STUN_ALLOCATE_REQUEST) {
+
+ /* Respond with 437 (section 6.3 turn-07) */
+ send_reply_err(alloc, rdata, PJ_TRUE, PJ_STUN_SC_ALLOCATION_MISMATCH,
+ NULL);
+
+ } else {
+
+ /* Respond with Bad Request? */
+ send_reply_err(alloc, rdata, PJ_TRUE, PJ_STUN_SC_BAD_REQUEST, NULL);
+
+ }
+
+ return PJ_SUCCESS;
+}
+
+/*
+ * Callback notification from STUN session when it receives STUN
+ * indications. This callback was trigger by STUN incoming message
+ * processing in pj_turn_allocation_on_rx_client_pkt().
+ */
+static pj_status_t stun_on_rx_indication(pj_stun_session *sess,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_stun_msg *msg,
+ void *token,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_stun_xor_peer_addr_attr *peer_attr;
+ pj_stun_data_attr *data_attr;
+ pj_turn_allocation *alloc;
+ pj_turn_permission *perm;
+ pj_ssize_t len;
+
+ PJ_UNUSED_ARG(pkt);
+ PJ_UNUSED_ARG(pkt_len);
+ PJ_UNUSED_ARG(token);
+ PJ_UNUSED_ARG(src_addr);
+ PJ_UNUSED_ARG(src_addr_len);
+
+ alloc = (pj_turn_allocation*) pj_stun_session_get_user_data(sess);
+
+ /* Only expect Send Indication */
+ if (msg->hdr.type != PJ_STUN_SEND_INDICATION) {
+ /* Ignore */
+ return PJ_SUCCESS;
+ }
+
+ /* Get XOR-PEER-ADDRESS attribute */
+ peer_attr = (pj_stun_xor_peer_addr_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_XOR_PEER_ADDR, 0);
+
+ /* MUST have XOR-PEER-ADDRESS attribute */
+ if (!peer_attr)
+ return PJ_SUCCESS;
+
+ /* Get DATA attribute */
+ data_attr = (pj_stun_data_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_DATA, 0);
+
+ /* Create/update/refresh the permission */
+ perm = lookup_permission_by_addr(alloc, &peer_attr->sockaddr,
+ pj_sockaddr_get_len(&peer_attr->sockaddr));
+ if (perm == NULL) {
+ perm = create_permission(alloc, &peer_attr->sockaddr,
+ pj_sockaddr_get_len(&peer_attr->sockaddr));
+ }
+ refresh_permission(perm);
+
+ /* Return if we don't have data */
+ if (data_attr == NULL)
+ return PJ_SUCCESS;
+
+ /* Relay the data to peer */
+ len = data_attr->length;
+ pj_sock_sendto(alloc->relay.tp.sock, data_attr->data,
+ &len, 0, &peer_attr->sockaddr,
+ pj_sockaddr_get_len(&peer_attr->sockaddr));
+
+ return PJ_SUCCESS;
+}
+
+
diff --git a/pjnath/src/pjturn-srv/auth.c b/pjnath/src/pjturn-srv/auth.c
new file mode 100644
index 0000000..6d95ca4
--- /dev/null
+++ b/pjnath/src/pjturn-srv/auth.c
@@ -0,0 +1,145 @@
+/* $Id: auth.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "auth.h"
+#include <pjlib.h>
+
+
+#define MAX_REALM 80
+#define MAX_USERNAME 32
+#define MAX_PASSWORD 32
+#define MAX_NONCE 32
+
+static char g_realm[MAX_REALM];
+
+static struct cred_t
+{
+ char username[MAX_USERNAME];
+ char passwd[MAX_PASSWORD];
+} g_cred[] =
+{
+ { "100", "100" },
+ { "700", "700" },
+ { "701", "701" }
+};
+
+#define THIS_FILE "auth.c"
+#define THE_NONCE "pjnath"
+#define LOG(expr) PJ_LOG(3,expr)
+
+
+/*
+ * Initialize TURN authentication subsystem.
+ */
+PJ_DEF(pj_status_t) pj_turn_auth_init(const char *realm)
+{
+ PJ_ASSERT_RETURN(pj_ansi_strlen(realm) < MAX_REALM, PJ_ENAMETOOLONG);
+ pj_ansi_strcpy(g_realm, realm);
+ return PJ_SUCCESS;
+}
+
+/*
+ * Shutdown TURN authentication subsystem.
+ */
+PJ_DEF(void) pj_turn_auth_dinit(void)
+{
+ /* Nothing to do */
+}
+
+
+/*
+ * This function is called by pj_stun_verify_credential() when
+ * server needs to challenge the request with 401 response.
+ */
+PJ_DEF(pj_status_t) pj_turn_get_auth(void *user_data,
+ pj_pool_t *pool,
+ pj_str_t *realm,
+ pj_str_t *nonce)
+{
+ PJ_UNUSED_ARG(user_data);
+ PJ_UNUSED_ARG(pool);
+
+ *realm = pj_str(g_realm);
+ *nonce = pj_str(THE_NONCE);
+
+ return PJ_SUCCESS;
+}
+
+/*
+ * This function is called to get the password for the specified username.
+ * This function is also used to check whether the username is valid.
+ */
+PJ_DEF(pj_status_t) pj_turn_get_password(const pj_stun_msg *msg,
+ void *user_data,
+ const pj_str_t *realm,
+ const pj_str_t *username,
+ pj_pool_t *pool,
+ pj_stun_passwd_type *data_type,
+ pj_str_t *data)
+{
+ unsigned i;
+
+ PJ_UNUSED_ARG(msg);
+ PJ_UNUSED_ARG(user_data);
+ PJ_UNUSED_ARG(pool);
+
+ if (pj_stricmp2(realm, g_realm)) {
+ LOG((THIS_FILE, "auth error: invalid realm '%.*s'",
+ (int)realm->slen, realm->ptr));
+ return PJ_EINVAL;
+ }
+
+ for (i=0; i<PJ_ARRAY_SIZE(g_cred); ++i) {
+ if (pj_stricmp2(username, g_cred[i].username) == 0) {
+ *data_type = PJ_STUN_PASSWD_PLAIN;
+ *data = pj_str(g_cred[i].passwd);
+ return PJ_SUCCESS;
+ }
+ }
+
+ LOG((THIS_FILE, "auth error: user '%.*s' not found",
+ (int)username->slen, username->ptr));
+ return PJ_ENOTFOUND;
+}
+
+/*
+ * This function will be called to verify that the NONCE given
+ * in the message can be accepted. If this callback returns
+ * PJ_FALSE, 438 (Stale Nonce) response will be created.
+ */
+PJ_DEF(pj_bool_t) pj_turn_verify_nonce(const pj_stun_msg *msg,
+ void *user_data,
+ const pj_str_t *realm,
+ const pj_str_t *username,
+ const pj_str_t *nonce)
+{
+ PJ_UNUSED_ARG(msg);
+ PJ_UNUSED_ARG(user_data);
+ PJ_UNUSED_ARG(realm);
+ PJ_UNUSED_ARG(username);
+
+ if (pj_stricmp2(nonce, THE_NONCE)) {
+ LOG((THIS_FILE, "auth error: invalid nonce '%.*s'",
+ (int)nonce->slen, nonce->ptr));
+ return PJ_FALSE;
+ }
+
+ return PJ_TRUE;
+}
+
diff --git a/pjnath/src/pjturn-srv/auth.h b/pjnath/src/pjturn-srv/auth.h
new file mode 100644
index 0000000..ed68701
--- /dev/null
+++ b/pjnath/src/pjturn-srv/auth.h
@@ -0,0 +1,116 @@
+/* $Id: auth.h 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __PJ_TURN_SRV_AUTH_H__
+#define __PJ_TURN_SRV_AUTH_H__
+
+#include <pjnath.h>
+
+/**
+ * Initialize TURN authentication subsystem.
+ *
+ * @return PJ_SUCCESS on success.
+ */
+PJ_DECL(pj_status_t) pj_turn_auth_init(const char *realm);
+
+/**
+ * Shutdown TURN authentication subsystem.
+ */
+PJ_DECL(void) pj_turn_auth_dinit(void);
+
+/**
+ * This function is called by pj_stun_verify_credential() when
+ * server needs to challenge the request with 401 response.
+ *
+ * @param user_data Should be ignored.
+ * @param pool Pool to allocate memory.
+ * @param realm On return, the function should fill in with
+ * realm if application wants to use long term
+ * credential. Otherwise application should set
+ * empty string for the realm.
+ * @param nonce On return, if application wants to use long
+ * term credential, it MUST fill in the nonce
+ * with some value. Otherwise if short term
+ * credential is wanted, it MAY set this value.
+ * If short term credential is wanted and the
+ * application doesn't want to include NONCE,
+ * then it must set this to empty string.
+ *
+ * @return The callback should return PJ_SUCCESS, or
+ * otherwise response message will not be
+ * created.
+ */
+PJ_DECL(pj_status_t) pj_turn_get_auth(void *user_data,
+ pj_pool_t *pool,
+ pj_str_t *realm,
+ pj_str_t *nonce);
+
+/**
+ * This function is called to get the password for the specified username.
+ * This function is also used to check whether the username is valid.
+ *
+ * @param msg The STUN message where the password will be
+ * applied to.
+ * @param user_data Should be ignored.
+ * @param realm The realm as specified in the message.
+ * @param username The username as specified in the message.
+ * @param pool Pool to allocate memory when necessary.
+ * @param data_type On return, application should fill up this
+ * argument with the type of data (which should
+ * be zero if data is a plaintext password).
+ * @param data On return, application should fill up this
+ * argument with the password according to
+ * data_type.
+ *
+ * @return The callback should return PJ_SUCCESS if
+ * username has been successfully verified
+ * and password was obtained. If non-PJ_SUCCESS
+ * is returned, it is assumed that the
+ * username is not valid.
+ */
+PJ_DECL(pj_status_t) pj_turn_get_password(const pj_stun_msg *msg,
+ void *user_data,
+ const pj_str_t *realm,
+ const pj_str_t *username,
+ pj_pool_t *pool,
+ pj_stun_passwd_type *data_type,
+ pj_str_t *data);
+
+/**
+ * This function will be called to verify that the NONCE given
+ * in the message can be accepted. If this callback returns
+ * PJ_FALSE, 438 (Stale Nonce) response will be created.
+ *
+ * @param msg The STUN message where the nonce was received.
+ * @param user_data Should be ignored.
+ * @param realm The realm as specified in the message.
+ * @param username The username as specified in the message.
+ * @param nonce The nonce to be verified.
+ *
+ * @return The callback MUST return non-zero if the
+ * NONCE can be accepted.
+ */
+PJ_DECL(pj_bool_t) pj_turn_verify_nonce(const pj_stun_msg *msg,
+ void *user_data,
+ const pj_str_t *realm,
+ const pj_str_t *username,
+ const pj_str_t *nonce);
+
+#endif /* __PJ_TURN_SRV_AUTH_H__ */
+
diff --git a/pjnath/src/pjturn-srv/listener_tcp.c b/pjnath/src/pjturn-srv/listener_tcp.c
new file mode 100644
index 0000000..89e8544
--- /dev/null
+++ b/pjnath/src/pjturn-srv/listener_tcp.c
@@ -0,0 +1,490 @@
+/* $Id: listener_tcp.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "turn.h"
+#include <pj/compat/socket.h>
+
+#if PJ_HAS_TCP
+
+struct accept_op
+{
+ pj_ioqueue_op_key_t op_key;
+ pj_sock_t sock;
+ pj_sockaddr src_addr;
+ int src_addr_len;
+};
+
+struct tcp_listener
+{
+ pj_turn_listener base;
+ pj_ioqueue_key_t *key;
+ unsigned accept_cnt;
+ struct accept_op *accept_op; /* Array of accept_op's */
+};
+
+
+static void lis_on_accept_complete(pj_ioqueue_key_t *key,
+ pj_ioqueue_op_key_t *op_key,
+ pj_sock_t sock,
+ pj_status_t status);
+static pj_status_t lis_destroy(pj_turn_listener *listener);
+static void transport_create(pj_sock_t sock, pj_turn_listener *lis,
+ pj_sockaddr_t *src_addr, int src_addr_len);
+
+static void show_err(const char *sender, const char *title,
+ pj_status_t status)
+{
+ char errmsg[PJ_ERR_MSG_SIZE];
+
+ pj_strerror(status, errmsg, sizeof(errmsg));
+ PJ_LOG(4,(sender, "%s: %s", title, errmsg));
+}
+
+
+/*
+ * Create a new listener on the specified port.
+ */
+PJ_DEF(pj_status_t) pj_turn_listener_create_tcp(pj_turn_srv *srv,
+ int af,
+ const pj_str_t *bound_addr,
+ unsigned port,
+ unsigned concurrency_cnt,
+ unsigned flags,
+ pj_turn_listener **p_listener)
+{
+ pj_pool_t *pool;
+ struct tcp_listener *tcp_lis;
+ pj_ioqueue_callback ioqueue_cb;
+ unsigned i;
+ pj_status_t status;
+
+ /* Create structure */
+ pool = pj_pool_create(srv->core.pf, "tcpl%p", 1000, 1000, NULL);
+ tcp_lis = PJ_POOL_ZALLOC_T(pool, struct tcp_listener);
+ tcp_lis->base.pool = pool;
+ tcp_lis->base.obj_name = pool->obj_name;
+ tcp_lis->base.server = srv;
+ tcp_lis->base.tp_type = PJ_TURN_TP_TCP;
+ tcp_lis->base.sock = PJ_INVALID_SOCKET;
+ //tcp_lis->base.sendto = &tcp_sendto;
+ tcp_lis->base.destroy = &lis_destroy;
+ tcp_lis->accept_cnt = concurrency_cnt;
+ tcp_lis->base.flags = flags;
+
+ /* Create socket */
+ status = pj_sock_socket(af, pj_SOCK_STREAM(), 0, &tcp_lis->base.sock);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Init bind address */
+ status = pj_sockaddr_init(af, &tcp_lis->base.addr, bound_addr,
+ (pj_uint16_t)port);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Create info */
+ pj_ansi_strcpy(tcp_lis->base.info, "TCP:");
+ pj_sockaddr_print(&tcp_lis->base.addr, tcp_lis->base.info+4,
+ sizeof(tcp_lis->base.info)-4, 3);
+
+ /* Bind socket */
+ status = pj_sock_bind(tcp_lis->base.sock, &tcp_lis->base.addr,
+ pj_sockaddr_get_len(&tcp_lis->base.addr));
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Listen() */
+ status = pj_sock_listen(tcp_lis->base.sock, 5);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Register to ioqueue */
+ pj_bzero(&ioqueue_cb, sizeof(ioqueue_cb));
+ ioqueue_cb.on_accept_complete = &lis_on_accept_complete;
+ status = pj_ioqueue_register_sock(pool, srv->core.ioqueue, tcp_lis->base.sock,
+ tcp_lis, &ioqueue_cb, &tcp_lis->key);
+
+ /* Create op keys */
+ tcp_lis->accept_op = (struct accept_op*)pj_pool_calloc(pool, concurrency_cnt,
+ sizeof(struct accept_op));
+
+ /* Create each accept_op and kick off read operation */
+ for (i=0; i<concurrency_cnt; ++i) {
+ lis_on_accept_complete(tcp_lis->key, &tcp_lis->accept_op[i].op_key,
+ PJ_INVALID_SOCKET, PJ_EPENDING);
+ }
+
+ /* Done */
+ PJ_LOG(4,(tcp_lis->base.obj_name, "Listener %s created",
+ tcp_lis->base.info));
+
+ *p_listener = &tcp_lis->base;
+ return PJ_SUCCESS;
+
+
+on_error:
+ lis_destroy(&tcp_lis->base);
+ return status;
+}
+
+
+/*
+ * Destroy listener.
+ */
+static pj_status_t lis_destroy(pj_turn_listener *listener)
+{
+ struct tcp_listener *tcp_lis = (struct tcp_listener *)listener;
+ unsigned i;
+
+ if (tcp_lis->key) {
+ pj_ioqueue_unregister(tcp_lis->key);
+ tcp_lis->key = NULL;
+ tcp_lis->base.sock = PJ_INVALID_SOCKET;
+ } else if (tcp_lis->base.sock != PJ_INVALID_SOCKET) {
+ pj_sock_close(tcp_lis->base.sock);
+ tcp_lis->base.sock = PJ_INVALID_SOCKET;
+ }
+
+ for (i=0; i<tcp_lis->accept_cnt; ++i) {
+ /* Nothing to do */
+ }
+
+ if (tcp_lis->base.pool) {
+ pj_pool_t *pool = tcp_lis->base.pool;
+
+ PJ_LOG(4,(tcp_lis->base.obj_name, "Listener %s destroyed",
+ tcp_lis->base.info));
+
+ tcp_lis->base.pool = NULL;
+ pj_pool_release(pool);
+ }
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Callback on new TCP connection.
+ */
+static void lis_on_accept_complete(pj_ioqueue_key_t *key,
+ pj_ioqueue_op_key_t *op_key,
+ pj_sock_t sock,
+ pj_status_t status)
+{
+ struct tcp_listener *tcp_lis;
+ struct accept_op *accept_op = (struct accept_op*) op_key;
+
+ tcp_lis = (struct tcp_listener*) pj_ioqueue_get_user_data(key);
+
+ PJ_UNUSED_ARG(sock);
+
+ do {
+ /* Report new connection. */
+ if (status == PJ_SUCCESS) {
+ char addr[PJ_INET6_ADDRSTRLEN+8];
+ PJ_LOG(5,(tcp_lis->base.obj_name, "Incoming TCP from %s",
+ pj_sockaddr_print(&accept_op->src_addr, addr,
+ sizeof(addr), 3)));
+ transport_create(accept_op->sock, &tcp_lis->base,
+ &accept_op->src_addr, accept_op->src_addr_len);
+ } else if (status != PJ_EPENDING) {
+ show_err(tcp_lis->base.obj_name, "accept()", status);
+ }
+
+ /* Prepare next accept() */
+ accept_op->src_addr_len = sizeof(accept_op->src_addr);
+ status = pj_ioqueue_accept(key, op_key, &accept_op->sock,
+ NULL,
+ &accept_op->src_addr,
+ &accept_op->src_addr_len);
+
+ } while (status != PJ_EPENDING && status != PJ_ECANCELLED &&
+ status != PJ_STATUS_FROM_OS(PJ_BLOCKING_ERROR_VAL));
+}
+
+
+/****************************************************************************/
+/*
+ * Transport
+ */
+enum
+{
+ TIMER_NONE,
+ TIMER_DESTROY
+};
+
+/* The delay in seconds to be applied before TCP transport is destroyed when
+ * no allocation is referencing it. This also means the initial time to wait
+ * after the initial TCP connection establishment to receive a valid STUN
+ * message in the transport.
+ */
+#define SHUTDOWN_DELAY 10
+
+struct recv_op
+{
+ pj_ioqueue_op_key_t op_key;
+ pj_turn_pkt pkt;
+};
+
+struct tcp_transport
+{
+ pj_turn_transport base;
+ pj_pool_t *pool;
+ pj_timer_entry timer;
+
+ pj_turn_allocation *alloc;
+ int ref_cnt;
+
+ pj_sock_t sock;
+ pj_ioqueue_key_t *key;
+ struct recv_op recv_op;
+ pj_ioqueue_op_key_t send_op;
+};
+
+
+static void tcp_on_read_complete(pj_ioqueue_key_t *key,
+ pj_ioqueue_op_key_t *op_key,
+ pj_ssize_t bytes_read);
+
+static pj_status_t tcp_sendto(pj_turn_transport *tp,
+ const void *packet,
+ pj_size_t size,
+ unsigned flag,
+ const pj_sockaddr_t *addr,
+ int addr_len);
+static void tcp_destroy(struct tcp_transport *tcp);
+static void tcp_add_ref(pj_turn_transport *tp,
+ pj_turn_allocation *alloc);
+static void tcp_dec_ref(pj_turn_transport *tp,
+ pj_turn_allocation *alloc);
+static void timer_callback(pj_timer_heap_t *timer_heap,
+ pj_timer_entry *entry);
+
+static void transport_create(pj_sock_t sock, pj_turn_listener *lis,
+ pj_sockaddr_t *src_addr, int src_addr_len)
+{
+ pj_pool_t *pool;
+ struct tcp_transport *tcp;
+ pj_ioqueue_callback cb;
+ pj_status_t status;
+
+ pool = pj_pool_create(lis->server->core.pf, "tcp%p", 1000, 1000, NULL);
+
+ tcp = PJ_POOL_ZALLOC_T(pool, struct tcp_transport);
+ tcp->base.obj_name = pool->obj_name;
+ tcp->base.listener = lis;
+ tcp->base.info = lis->info;
+ tcp->base.sendto = &tcp_sendto;
+ tcp->base.add_ref = &tcp_add_ref;
+ tcp->base.dec_ref = &tcp_dec_ref;
+ tcp->pool = pool;
+ tcp->sock = sock;
+
+ pj_timer_entry_init(&tcp->timer, TIMER_NONE, tcp, &timer_callback);
+
+ /* Register to ioqueue */
+ pj_bzero(&cb, sizeof(cb));
+ cb.on_read_complete = &tcp_on_read_complete;
+ status = pj_ioqueue_register_sock(pool, lis->server->core.ioqueue, sock,
+ tcp, &cb, &tcp->key);
+ if (status != PJ_SUCCESS) {
+ tcp_destroy(tcp);
+ return;
+ }
+
+ /* Init pkt */
+ tcp->recv_op.pkt.pool = pj_pool_create(lis->server->core.pf, "tcpkt%p",
+ 1000, 1000, NULL);
+ tcp->recv_op.pkt.transport = &tcp->base;
+ tcp->recv_op.pkt.src.tp_type = PJ_TURN_TP_TCP;
+ tcp->recv_op.pkt.src_addr_len = src_addr_len;
+ pj_memcpy(&tcp->recv_op.pkt.src.clt_addr, src_addr, src_addr_len);
+
+ tcp_on_read_complete(tcp->key, &tcp->recv_op.op_key, -PJ_EPENDING);
+ /* Should not access transport from now, it may have been destroyed */
+}
+
+
+static void tcp_destroy(struct tcp_transport *tcp)
+{
+ if (tcp->key) {
+ pj_ioqueue_unregister(tcp->key);
+ tcp->key = NULL;
+ tcp->sock = 0;
+ } else if (tcp->sock) {
+ pj_sock_close(tcp->sock);
+ tcp->sock = 0;
+ }
+
+ if (tcp->pool) {
+ pj_pool_release(tcp->pool);
+ }
+}
+
+
+static void timer_callback(pj_timer_heap_t *timer_heap,
+ pj_timer_entry *entry)
+{
+ struct tcp_transport *tcp = (struct tcp_transport*) entry->user_data;
+
+ PJ_UNUSED_ARG(timer_heap);
+
+ tcp_destroy(tcp);
+}
+
+
+static void tcp_on_read_complete(pj_ioqueue_key_t *key,
+ pj_ioqueue_op_key_t *op_key,
+ pj_ssize_t bytes_read)
+{
+ struct tcp_transport *tcp;
+ struct recv_op *recv_op = (struct recv_op*) op_key;
+ pj_status_t status;
+
+ tcp = (struct tcp_transport*) pj_ioqueue_get_user_data(key);
+
+ do {
+ /* Report to server or allocation, if we have allocation */
+ if (bytes_read > 0) {
+
+ recv_op->pkt.len = bytes_read;
+ pj_gettimeofday(&recv_op->pkt.rx_time);
+
+ tcp_add_ref(&tcp->base, NULL);
+
+ if (tcp->alloc) {
+ pj_turn_allocation_on_rx_client_pkt(tcp->alloc, &recv_op->pkt);
+ } else {
+ pj_turn_srv_on_rx_pkt(tcp->base.listener->server, &recv_op->pkt);
+ }
+
+ pj_assert(tcp->ref_cnt > 0);
+ tcp_dec_ref(&tcp->base, NULL);
+
+ } else if (bytes_read != -PJ_EPENDING) {
+ /* TCP connection closed/error. Notify client and then destroy
+ * ourselves.
+ * Note: the -PJ_EPENDING is the value passed during init.
+ */
+ ++tcp->ref_cnt;
+
+ if (tcp->alloc) {
+ if (bytes_read != 0) {
+ show_err(tcp->base.obj_name, "TCP socket error",
+ -bytes_read);
+ } else {
+ PJ_LOG(5,(tcp->base.obj_name, "TCP socket closed"));
+ }
+ pj_turn_allocation_on_transport_closed(tcp->alloc, &tcp->base);
+ tcp->alloc = NULL;
+ }
+
+ pj_assert(tcp->ref_cnt > 0);
+ if (--tcp->ref_cnt == 0) {
+ tcp_destroy(tcp);
+ return;
+ }
+ }
+
+ /* Reset pool */
+ pj_pool_reset(recv_op->pkt.pool);
+
+ /* If packet is full discard it */
+ if (recv_op->pkt.len == sizeof(recv_op->pkt.pkt)) {
+ PJ_LOG(4,(tcp->base.obj_name, "Buffer discarded"));
+ recv_op->pkt.len = 0;
+ }
+
+ /* Read next packet */
+ bytes_read = sizeof(recv_op->pkt.pkt) - recv_op->pkt.len;
+ status = pj_ioqueue_recv(tcp->key, op_key,
+ recv_op->pkt.pkt + recv_op->pkt.len,
+ &bytes_read, 0);
+
+ if (status != PJ_EPENDING && status != PJ_SUCCESS)
+ bytes_read = -status;
+
+ } while (status != PJ_EPENDING && status != PJ_ECANCELLED &&
+ status != PJ_STATUS_FROM_OS(PJ_BLOCKING_ERROR_VAL));
+
+}
+
+
+static pj_status_t tcp_sendto(pj_turn_transport *tp,
+ const void *packet,
+ pj_size_t size,
+ unsigned flag,
+ const pj_sockaddr_t *addr,
+ int addr_len)
+{
+ struct tcp_transport *tcp = (struct tcp_transport*) tp;
+ pj_ssize_t length = size;
+
+ PJ_UNUSED_ARG(addr);
+ PJ_UNUSED_ARG(addr_len);
+
+ return pj_ioqueue_send(tcp->key, &tcp->send_op, packet, &length, flag);
+}
+
+
+static void tcp_add_ref(pj_turn_transport *tp,
+ pj_turn_allocation *alloc)
+{
+ struct tcp_transport *tcp = (struct tcp_transport*) tp;
+
+ ++tcp->ref_cnt;
+
+ if (tcp->alloc == NULL && alloc) {
+ tcp->alloc = alloc;
+ }
+
+ /* Cancel shutdown timer if it's running */
+ if (tcp->timer.id != TIMER_NONE) {
+ pj_timer_heap_cancel(tcp->base.listener->server->core.timer_heap,
+ &tcp->timer);
+ tcp->timer.id = TIMER_NONE;
+ }
+}
+
+
+static void tcp_dec_ref(pj_turn_transport *tp,
+ pj_turn_allocation *alloc)
+{
+ struct tcp_transport *tcp = (struct tcp_transport*) tp;
+
+ --tcp->ref_cnt;
+
+ if (alloc && alloc == tcp->alloc) {
+ tcp->alloc = NULL;
+ }
+
+ if (tcp->ref_cnt == 0 && tcp->timer.id == TIMER_NONE) {
+ pj_time_val delay = { SHUTDOWN_DELAY, 0 };
+ tcp->timer.id = TIMER_DESTROY;
+ pj_timer_heap_schedule(tcp->base.listener->server->core.timer_heap,
+ &tcp->timer, &delay);
+ }
+}
+
+#else /* PJ_HAS_TCP */
+
+/* To avoid empty translation unit warning */
+int listener_tcp_dummy = 0;
+
+#endif /* PJ_HAS_TCP */
+
diff --git a/pjnath/src/pjturn-srv/listener_udp.c b/pjnath/src/pjturn-srv/listener_udp.c
new file mode 100644
index 0000000..c4d2171
--- /dev/null
+++ b/pjnath/src/pjturn-srv/listener_udp.c
@@ -0,0 +1,266 @@
+/* $Id: listener_udp.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "turn.h"
+#include <pj/compat/socket.h>
+
+struct read_op
+{
+ pj_ioqueue_op_key_t op_key;
+ pj_turn_pkt pkt;
+};
+
+struct udp_listener
+{
+ pj_turn_listener base;
+
+ pj_ioqueue_key_t *key;
+ unsigned read_cnt;
+ struct read_op **read_op; /* Array of read_op's */
+
+ pj_turn_transport tp; /* Transport instance */
+};
+
+
+static pj_status_t udp_destroy(pj_turn_listener *udp);
+static void on_read_complete(pj_ioqueue_key_t *key,
+ pj_ioqueue_op_key_t *op_key,
+ pj_ssize_t bytes_read);
+
+static pj_status_t udp_sendto(pj_turn_transport *tp,
+ const void *packet,
+ pj_size_t size,
+ unsigned flag,
+ const pj_sockaddr_t *addr,
+ int addr_len);
+static void udp_add_ref(pj_turn_transport *tp,
+ pj_turn_allocation *alloc);
+static void udp_dec_ref(pj_turn_transport *tp,
+ pj_turn_allocation *alloc);
+
+
+/*
+ * Create a new listener on the specified port.
+ */
+PJ_DEF(pj_status_t) pj_turn_listener_create_udp( pj_turn_srv *srv,
+ int af,
+ const pj_str_t *bound_addr,
+ unsigned port,
+ unsigned concurrency_cnt,
+ unsigned flags,
+ pj_turn_listener **p_listener)
+{
+ pj_pool_t *pool;
+ struct udp_listener *udp;
+ pj_ioqueue_callback ioqueue_cb;
+ unsigned i;
+ pj_status_t status;
+
+ /* Create structure */
+ pool = pj_pool_create(srv->core.pf, "udp%p", 1000, 1000, NULL);
+ udp = PJ_POOL_ZALLOC_T(pool, struct udp_listener);
+ udp->base.pool = pool;
+ udp->base.obj_name = pool->obj_name;
+ udp->base.server = srv;
+ udp->base.tp_type = PJ_TURN_TP_UDP;
+ udp->base.sock = PJ_INVALID_SOCKET;
+ udp->base.destroy = &udp_destroy;
+ udp->read_cnt = concurrency_cnt;
+ udp->base.flags = flags;
+
+ udp->tp.obj_name = udp->base.obj_name;
+ udp->tp.info = udp->base.info;
+ udp->tp.listener = &udp->base;
+ udp->tp.sendto = &udp_sendto;
+ udp->tp.add_ref = &udp_add_ref;
+ udp->tp.dec_ref = &udp_dec_ref;
+
+ /* Create socket */
+ status = pj_sock_socket(af, pj_SOCK_DGRAM(), 0, &udp->base.sock);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Init bind address */
+ status = pj_sockaddr_init(af, &udp->base.addr, bound_addr,
+ (pj_uint16_t)port);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Create info */
+ pj_ansi_strcpy(udp->base.info, "UDP:");
+ pj_sockaddr_print(&udp->base.addr, udp->base.info+4,
+ sizeof(udp->base.info)-4, 3);
+
+ /* Bind socket */
+ status = pj_sock_bind(udp->base.sock, &udp->base.addr,
+ pj_sockaddr_get_len(&udp->base.addr));
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Register to ioqueue */
+ pj_bzero(&ioqueue_cb, sizeof(ioqueue_cb));
+ ioqueue_cb.on_read_complete = on_read_complete;
+ status = pj_ioqueue_register_sock(pool, srv->core.ioqueue, udp->base.sock,
+ udp, &ioqueue_cb, &udp->key);
+
+ /* Create op keys */
+ udp->read_op = (struct read_op**)pj_pool_calloc(pool, concurrency_cnt,
+ sizeof(struct read_op*));
+
+ /* Create each read_op and kick off read operation */
+ for (i=0; i<concurrency_cnt; ++i) {
+ pj_pool_t *rpool = pj_pool_create(srv->core.pf, "rop%p",
+ 1000, 1000, NULL);
+
+ udp->read_op[i] = PJ_POOL_ZALLOC_T(pool, struct read_op);
+ udp->read_op[i]->pkt.pool = rpool;
+
+ on_read_complete(udp->key, &udp->read_op[i]->op_key, 0);
+ }
+
+ /* Done */
+ PJ_LOG(4,(udp->base.obj_name, "Listener %s created", udp->base.info));
+
+ *p_listener = &udp->base;
+ return PJ_SUCCESS;
+
+
+on_error:
+ udp_destroy(&udp->base);
+ return status;
+}
+
+
+/*
+ * Destroy listener.
+ */
+static pj_status_t udp_destroy(pj_turn_listener *listener)
+{
+ struct udp_listener *udp = (struct udp_listener *)listener;
+ unsigned i;
+
+ if (udp->key) {
+ pj_ioqueue_unregister(udp->key);
+ udp->key = NULL;
+ udp->base.sock = PJ_INVALID_SOCKET;
+ } else if (udp->base.sock != PJ_INVALID_SOCKET) {
+ pj_sock_close(udp->base.sock);
+ udp->base.sock = PJ_INVALID_SOCKET;
+ }
+
+ for (i=0; i<udp->read_cnt; ++i) {
+ if (udp->read_op[i]->pkt.pool) {
+ pj_pool_t *rpool = udp->read_op[i]->pkt.pool;
+ udp->read_op[i]->pkt.pool = NULL;
+ pj_pool_release(rpool);
+ }
+ }
+
+ if (udp->base.pool) {
+ pj_pool_t *pool = udp->base.pool;
+
+ PJ_LOG(4,(udp->base.obj_name, "Listener %s destroyed",
+ udp->base.info));
+
+ udp->base.pool = NULL;
+ pj_pool_release(pool);
+ }
+ return PJ_SUCCESS;
+}
+
+/*
+ * Callback to send packet.
+ */
+static pj_status_t udp_sendto(pj_turn_transport *tp,
+ const void *packet,
+ pj_size_t size,
+ unsigned flag,
+ const pj_sockaddr_t *addr,
+ int addr_len)
+{
+ pj_ssize_t len = size;
+ return pj_sock_sendto(tp->listener->sock, packet, &len, flag, addr, addr_len);
+}
+
+
+static void udp_add_ref(pj_turn_transport *tp,
+ pj_turn_allocation *alloc)
+{
+ /* Do nothing */
+ PJ_UNUSED_ARG(tp);
+ PJ_UNUSED_ARG(alloc);
+}
+
+static void udp_dec_ref(pj_turn_transport *tp,
+ pj_turn_allocation *alloc)
+{
+ /* Do nothing */
+ PJ_UNUSED_ARG(tp);
+ PJ_UNUSED_ARG(alloc);
+}
+
+
+/*
+ * Callback on received packet.
+ */
+static void on_read_complete(pj_ioqueue_key_t *key,
+ pj_ioqueue_op_key_t *op_key,
+ pj_ssize_t bytes_read)
+{
+ struct udp_listener *udp;
+ struct read_op *read_op = (struct read_op*) op_key;
+ pj_status_t status;
+
+ udp = (struct udp_listener*) pj_ioqueue_get_user_data(key);
+
+ do {
+ pj_pool_t *rpool;
+
+ /* Report to server */
+ if (bytes_read > 0) {
+ read_op->pkt.len = bytes_read;
+ pj_gettimeofday(&read_op->pkt.rx_time);
+
+ pj_turn_srv_on_rx_pkt(udp->base.server, &read_op->pkt);
+ }
+
+ /* Reset pool */
+ rpool = read_op->pkt.pool;
+ pj_pool_reset(rpool);
+ read_op->pkt.pool = rpool;
+ read_op->pkt.transport = &udp->tp;
+ read_op->pkt.src.tp_type = udp->base.tp_type;
+
+ /* Read next packet */
+ bytes_read = sizeof(read_op->pkt.pkt);
+ read_op->pkt.src_addr_len = sizeof(read_op->pkt.src.clt_addr);
+ pj_bzero(&read_op->pkt.src.clt_addr, sizeof(read_op->pkt.src.clt_addr));
+
+ status = pj_ioqueue_recvfrom(udp->key, op_key,
+ read_op->pkt.pkt, &bytes_read, 0,
+ &read_op->pkt.src.clt_addr,
+ &read_op->pkt.src_addr_len);
+
+ if (status != PJ_EPENDING && status != PJ_SUCCESS)
+ bytes_read = -status;
+
+ } while (status != PJ_EPENDING && status != PJ_ECANCELLED &&
+ status != PJ_STATUS_FROM_OS(PJ_BLOCKING_ERROR_VAL));
+}
+
diff --git a/pjnath/src/pjturn-srv/main.c b/pjnath/src/pjturn-srv/main.c
new file mode 100644
index 0000000..5bf5bbb
--- /dev/null
+++ b/pjnath/src/pjturn-srv/main.c
@@ -0,0 +1,174 @@
+/* $Id: main.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "turn.h"
+#include "auth.h"
+
+#define REALM "pjsip.org"
+//#define TURN_PORT PJ_STUN_TURN_PORT
+#define TURN_PORT 34780
+#define LOG_LEVEL 4
+
+
+static pj_caching_pool g_cp;
+
+int err(const char *title, pj_status_t status)
+{
+ char errmsg[PJ_ERR_MSG_SIZE];
+ pj_strerror(status, errmsg, sizeof(errmsg));
+
+ printf("%s: %s\n", title, errmsg);
+ return 1;
+}
+
+static void dump_status(pj_turn_srv *srv)
+{
+ char addr[80];
+ pj_hash_iterator_t itbuf, *it;
+ pj_time_val now;
+ unsigned i;
+
+ for (i=0; i<srv->core.lis_cnt; ++i) {
+ pj_turn_listener *lis = srv->core.listener[i];
+ printf("Server address : %s\n", lis->info);
+ }
+
+ printf("Worker threads : %d\n", srv->core.thread_cnt);
+ printf("Total mem usage: %u.%03uMB\n", (unsigned)(g_cp.used_size / 1000000),
+ (unsigned)((g_cp.used_size % 1000000)/1000));
+ printf("UDP port range : %u %u %u (next/min/max)\n", srv->ports.next_udp,
+ srv->ports.min_udp, srv->ports.max_udp);
+ printf("TCP port range : %u %u %u (next/min/max)\n", srv->ports.next_tcp,
+ srv->ports.min_tcp, srv->ports.max_tcp);
+ printf("Clients # : %u\n", pj_hash_count(srv->tables.alloc));
+
+ puts("");
+
+ if (pj_hash_count(srv->tables.alloc)==0) {
+ return;
+ }
+
+ puts("# Client addr. Alloc addr. Username Lftm Expy #prm #chl");
+ puts("------------------------------------------------------------------------------");
+
+ pj_gettimeofday(&now);
+
+ it = pj_hash_first(srv->tables.alloc, &itbuf);
+ i=1;
+ while (it) {
+ pj_turn_allocation *alloc = (pj_turn_allocation*)
+ pj_hash_this(srv->tables.alloc, it);
+ printf("%-3d %-22s %-22s %-8.*s %-4d %-4ld %-4d %-4d\n",
+ i,
+ alloc->info,
+ pj_sockaddr_print(&alloc->relay.hkey.addr, addr, sizeof(addr), 3),
+ (int)alloc->cred.data.static_cred.username.slen,
+ alloc->cred.data.static_cred.username.ptr,
+ alloc->relay.lifetime,
+ alloc->relay.expiry.sec - now.sec,
+ pj_hash_count(alloc->peer_table),
+ pj_hash_count(alloc->ch_table));
+
+ it = pj_hash_next(srv->tables.alloc, it);
+ ++i;
+ }
+}
+
+static void menu(void)
+{
+ puts("");
+ puts("Menu:");
+ puts(" d Dump status");
+ puts(" q Quit");
+ printf(">> ");
+}
+
+static void console_main(pj_turn_srv *srv)
+{
+ pj_bool_t quit = PJ_FALSE;
+
+ while (!quit) {
+ char line[10];
+
+ menu();
+
+ if (fgets(line, sizeof(line), stdin) == NULL)
+ break;
+
+ switch (line[0]) {
+ case 'd':
+ dump_status(srv);
+ break;
+ case 'q':
+ quit = PJ_TRUE;
+ break;
+ }
+ }
+}
+
+int main()
+{
+ pj_turn_srv *srv;
+ pj_turn_listener *listener;
+ pj_status_t status;
+
+ status = pj_init();
+ if (status != PJ_SUCCESS)
+ return err("pj_init() error", status);
+
+ pjlib_util_init();
+ pjnath_init();
+
+ pj_caching_pool_init(&g_cp, NULL, 0);
+
+ pj_turn_auth_init(REALM);
+
+ status = pj_turn_srv_create(&g_cp.factory, &srv);
+ if (status != PJ_SUCCESS)
+ return err("Error creating server", status);
+
+ status = pj_turn_listener_create_udp(srv, pj_AF_INET(), NULL,
+ TURN_PORT, 1, 0, &listener);
+ if (status != PJ_SUCCESS)
+ return err("Error creating UDP listener", status);
+
+#if PJ_HAS_TCP
+ status = pj_turn_listener_create_tcp(srv, pj_AF_INET(), NULL,
+ TURN_PORT, 1, 0, &listener);
+ if (status != PJ_SUCCESS)
+ return err("Error creating listener", status);
+#endif
+
+ status = pj_turn_srv_add_listener(srv, listener);
+ if (status != PJ_SUCCESS)
+ return err("Error adding listener", status);
+
+ puts("Server is running");
+
+ pj_log_set_level(LOG_LEVEL);
+
+ console_main(srv);
+
+ pj_turn_srv_destroy(srv);
+ pj_caching_pool_destroy(&g_cp);
+ pj_shutdown();
+
+ return 0;
+}
+
diff --git a/pjnath/src/pjturn-srv/server.c b/pjnath/src/pjturn-srv/server.c
new file mode 100644
index 0000000..3732898
--- /dev/null
+++ b/pjnath/src/pjturn-srv/server.c
@@ -0,0 +1,699 @@
+/* $Id: server.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "turn.h"
+#include "auth.h"
+
+#define MAX_CLIENTS 32
+#define MAX_PEERS_PER_CLIENT 8
+//#define MAX_HANDLES (MAX_CLIENTS*MAX_PEERS_PER_CLIENT+MAX_LISTENERS)
+#define MAX_HANDLES PJ_IOQUEUE_MAX_HANDLES
+#define MAX_TIMER (MAX_HANDLES * 2)
+#define MIN_PORT 49152
+#define MAX_PORT 65535
+#define MAX_LISTENERS 16
+#define MAX_THREADS 2
+#define MAX_NET_EVENTS 1000
+
+/* Prototypes */
+static int server_thread_proc(void *arg);
+static pj_status_t on_tx_stun_msg( pj_stun_session *sess,
+ void *token,
+ const void *pkt,
+ pj_size_t pkt_size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len);
+static pj_status_t on_rx_stun_request(pj_stun_session *sess,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_stun_rx_data *rdata,
+ void *user_data,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len);
+
+struct saved_cred
+{
+ pj_str_t realm;
+ pj_str_t username;
+ pj_str_t nonce;
+ int data_type;
+ pj_str_t data;
+};
+
+
+/*
+ * Get transport type name, normally for logging purpose only.
+ */
+PJ_DEF(const char*) pj_turn_tp_type_name(int tp_type)
+{
+ /* Must be 3 characters long! */
+ if (tp_type == PJ_TURN_TP_UDP) {
+ return "UDP";
+ } else if (tp_type == PJ_TURN_TP_TCP) {
+ return "TCP";
+ } else {
+ pj_assert(!"Unsupported transport");
+ return "???";
+ }
+}
+
+/*
+ * Create server.
+ */
+PJ_DEF(pj_status_t) pj_turn_srv_create(pj_pool_factory *pf,
+ pj_turn_srv **p_srv)
+{
+ pj_pool_t *pool;
+ pj_stun_session_cb sess_cb;
+ pj_turn_srv *srv;
+ unsigned i;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(pf && p_srv, PJ_EINVAL);
+
+ /* Create server and init core settings */
+ pool = pj_pool_create(pf, "srv%p", 1000, 1000, NULL);
+ srv = PJ_POOL_ZALLOC_T(pool, pj_turn_srv);
+ srv->obj_name = pool->obj_name;
+ srv->core.pf = pf;
+ srv->core.pool = pool;
+ srv->core.tls_key = srv->core.tls_data = -1;
+
+ /* Create ioqueue */
+ status = pj_ioqueue_create(pool, MAX_HANDLES, &srv->core.ioqueue);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Server mutex */
+ status = pj_lock_create_recursive_mutex(pool, srv->obj_name,
+ &srv->core.lock);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Allocate TLS */
+ status = pj_thread_local_alloc(&srv->core.tls_key);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ status = pj_thread_local_alloc(&srv->core.tls_data);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Create timer heap */
+ status = pj_timer_heap_create(pool, MAX_TIMER, &srv->core.timer_heap);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Configure lock for the timer heap */
+ pj_timer_heap_set_lock(srv->core.timer_heap, srv->core.lock, PJ_FALSE);
+
+ /* Array of listeners */
+ srv->core.listener = (pj_turn_listener**)
+ pj_pool_calloc(pool, MAX_LISTENERS,
+ sizeof(srv->core.listener[0]));
+
+ /* Create hash tables */
+ srv->tables.alloc = pj_hash_create(pool, MAX_CLIENTS);
+ srv->tables.res = pj_hash_create(pool, MAX_CLIENTS);
+
+ /* Init ports settings */
+ srv->ports.min_udp = srv->ports.next_udp = MIN_PORT;
+ srv->ports.max_udp = MAX_PORT;
+ srv->ports.min_tcp = srv->ports.next_tcp = MIN_PORT;
+ srv->ports.max_tcp = MAX_PORT;
+
+ /* Init STUN config */
+ pj_stun_config_init(&srv->core.stun_cfg, pf, 0, srv->core.ioqueue,
+ srv->core.timer_heap);
+
+ /* Init STUN credential */
+ srv->core.cred.type = PJ_STUN_AUTH_CRED_DYNAMIC;
+ srv->core.cred.data.dyn_cred.user_data = srv;
+ srv->core.cred.data.dyn_cred.get_auth = &pj_turn_get_auth;
+ srv->core.cred.data.dyn_cred.get_password = &pj_turn_get_password;
+ srv->core.cred.data.dyn_cred.verify_nonce = &pj_turn_verify_nonce;
+
+ /* Create STUN session to handle new allocation */
+ pj_bzero(&sess_cb, sizeof(sess_cb));
+ sess_cb.on_rx_request = &on_rx_stun_request;
+ sess_cb.on_send_msg = &on_tx_stun_msg;
+
+ status = pj_stun_session_create(&srv->core.stun_cfg, srv->obj_name,
+ &sess_cb, PJ_FALSE, &srv->core.stun_sess);
+ if (status != PJ_SUCCESS) {
+ goto on_error;
+ }
+
+ pj_stun_session_set_user_data(srv->core.stun_sess, srv);
+ pj_stun_session_set_credential(srv->core.stun_sess, PJ_STUN_AUTH_LONG_TERM,
+ &srv->core.cred);
+
+
+ /* Array of worker threads */
+ srv->core.thread_cnt = MAX_THREADS;
+ srv->core.thread = (pj_thread_t**)
+ pj_pool_calloc(pool, srv->core.thread_cnt,
+ sizeof(pj_thread_t*));
+
+ /* Start the worker threads */
+ for (i=0; i<srv->core.thread_cnt; ++i) {
+ status = pj_thread_create(pool, srv->obj_name, &server_thread_proc,
+ srv, 0, 0, &srv->core.thread[i]);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+ }
+
+ /* We're done. Application should add listeners now */
+ PJ_LOG(4,(srv->obj_name, "TURN server v%s is running",
+ pj_get_version()));
+
+ *p_srv = srv;
+ return PJ_SUCCESS;
+
+on_error:
+ pj_turn_srv_destroy(srv);
+ return status;
+}
+
+
+/*
+ * Handle timer and network events
+ */
+static void srv_handle_events(pj_turn_srv *srv, const pj_time_val *max_timeout)
+{
+ /* timeout is 'out' var. This just to make compiler happy. */
+ pj_time_val timeout = { 0, 0};
+ unsigned net_event_count = 0;
+ int c;
+
+ /* Poll the timer. The timer heap has its own mutex for better
+ * granularity, so we don't need to lock the server.
+ */
+ timeout.sec = timeout.msec = 0;
+ c = pj_timer_heap_poll( srv->core.timer_heap, &timeout );
+
+ /* timer_heap_poll should never ever returns negative value, or otherwise
+ * ioqueue_poll() will block forever!
+ */
+ pj_assert(timeout.sec >= 0 && timeout.msec >= 0);
+ if (timeout.msec >= 1000) timeout.msec = 999;
+
+ /* If caller specifies maximum time to wait, then compare the value with
+ * the timeout to wait from timer, and use the minimum value.
+ */
+ if (max_timeout && PJ_TIME_VAL_GT(timeout, *max_timeout)) {
+ timeout = *max_timeout;
+ }
+
+ /* Poll ioqueue.
+ * Repeat polling the ioqueue while we have immediate events, because
+ * timer heap may process more than one events, so if we only process
+ * one network events at a time (such as when IOCP backend is used),
+ * the ioqueue may have trouble keeping up with the request rate.
+ *
+ * For example, for each send() request, one network event will be
+ * reported by ioqueue for the send() completion. If we don't poll
+ * the ioqueue often enough, the send() completion will not be
+ * reported in timely manner.
+ */
+ do {
+ c = pj_ioqueue_poll( srv->core.ioqueue, &timeout);
+ if (c < 0) {
+ pj_thread_sleep(PJ_TIME_VAL_MSEC(timeout));
+ return;
+ } else if (c == 0) {
+ break;
+ } else {
+ net_event_count += c;
+ timeout.sec = timeout.msec = 0;
+ }
+ } while (c > 0 && net_event_count < MAX_NET_EVENTS);
+
+}
+
+/*
+ * Server worker thread proc.
+ */
+static int server_thread_proc(void *arg)
+{
+ pj_turn_srv *srv = (pj_turn_srv*)arg;
+
+ while (!srv->core.quit) {
+ pj_time_val timeout_max = {0, 100};
+ srv_handle_events(srv, &timeout_max);
+ }
+
+ return 0;
+}
+
+/*
+ * Destroy the server.
+ */
+PJ_DEF(pj_status_t) pj_turn_srv_destroy(pj_turn_srv *srv)
+{
+ pj_hash_iterator_t itbuf, *it;
+ unsigned i;
+
+ /* Stop all worker threads */
+ srv->core.quit = PJ_TRUE;
+ for (i=0; i<srv->core.thread_cnt; ++i) {
+ if (srv->core.thread[i]) {
+ pj_thread_join(srv->core.thread[i]);
+ pj_thread_destroy(srv->core.thread[i]);
+ srv->core.thread[i] = NULL;
+ }
+ }
+
+ /* Destroy all allocations FIRST */
+ if (srv->tables.alloc) {
+ it = pj_hash_first(srv->tables.alloc, &itbuf);
+ while (it != NULL) {
+ pj_turn_allocation *alloc = (pj_turn_allocation*)
+ pj_hash_this(srv->tables.alloc, it);
+ pj_hash_iterator_t *next = pj_hash_next(srv->tables.alloc, it);
+ pj_turn_allocation_destroy(alloc);
+ it = next;
+ }
+ }
+
+ /* Destroy all listeners. */
+ for (i=0; i<srv->core.lis_cnt; ++i) {
+ if (srv->core.listener[i]) {
+ pj_turn_listener_destroy(srv->core.listener[i]);
+ srv->core.listener[i] = NULL;
+ }
+ }
+
+ /* Destroy STUN session */
+ if (srv->core.stun_sess) {
+ pj_stun_session_destroy(srv->core.stun_sess);
+ srv->core.stun_sess = NULL;
+ }
+
+ /* Destroy hash tables (well, sort of) */
+ if (srv->tables.alloc) {
+ srv->tables.alloc = NULL;
+ srv->tables.res = NULL;
+ }
+
+ /* Destroy timer heap */
+ if (srv->core.timer_heap) {
+ pj_timer_heap_destroy(srv->core.timer_heap);
+ srv->core.timer_heap = NULL;
+ }
+
+ /* Destroy ioqueue */
+ if (srv->core.ioqueue) {
+ pj_ioqueue_destroy(srv->core.ioqueue);
+ srv->core.ioqueue = NULL;
+ }
+
+ /* Destroy thread local IDs */
+ if (srv->core.tls_key != -1) {
+ pj_thread_local_free(srv->core.tls_key);
+ srv->core.tls_key = -1;
+ }
+ if (srv->core.tls_data != -1) {
+ pj_thread_local_free(srv->core.tls_data);
+ srv->core.tls_data = -1;
+ }
+
+ /* Destroy server lock */
+ if (srv->core.lock) {
+ pj_lock_destroy(srv->core.lock);
+ srv->core.lock = NULL;
+ }
+
+ /* Release pool */
+ if (srv->core.pool) {
+ pj_pool_t *pool = srv->core.pool;
+ srv->core.pool = NULL;
+ pj_pool_release(pool);
+ }
+
+ /* Done */
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Add listener.
+ */
+PJ_DEF(pj_status_t) pj_turn_srv_add_listener(pj_turn_srv *srv,
+ pj_turn_listener *lis)
+{
+ unsigned index;
+
+ PJ_ASSERT_RETURN(srv && lis, PJ_EINVAL);
+ PJ_ASSERT_RETURN(srv->core.lis_cnt < MAX_LISTENERS, PJ_ETOOMANY);
+
+ /* Add to array */
+ index = srv->core.lis_cnt;
+ srv->core.listener[index] = lis;
+ lis->server = srv;
+ lis->id = index;
+ srv->core.lis_cnt++;
+
+ PJ_LOG(4,(srv->obj_name, "Listener %s/%s added at index %d",
+ lis->obj_name, lis->info, lis->id));
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Destroy listener.
+ */
+PJ_DEF(pj_status_t) pj_turn_listener_destroy(pj_turn_listener *listener)
+{
+ pj_turn_srv *srv = listener->server;
+ unsigned i;
+
+ /* Remove from our listener list */
+ pj_lock_acquire(srv->core.lock);
+ for (i=0; i<srv->core.lis_cnt; ++i) {
+ if (srv->core.listener[i] == listener) {
+ srv->core.listener[i] = NULL;
+ srv->core.lis_cnt--;
+ listener->id = PJ_TURN_INVALID_LIS_ID;
+ break;
+ }
+ }
+ pj_lock_release(srv->core.lock);
+
+ /* Destroy */
+ return listener->destroy(listener);
+}
+
+
+/**
+ * Add a reference to a transport.
+ */
+PJ_DEF(void) pj_turn_transport_add_ref( pj_turn_transport *transport,
+ pj_turn_allocation *alloc)
+{
+ transport->add_ref(transport, alloc);
+}
+
+
+/**
+ * Decrement transport reference counter.
+ */
+PJ_DEF(void) pj_turn_transport_dec_ref( pj_turn_transport *transport,
+ pj_turn_allocation *alloc)
+{
+ transport->dec_ref(transport, alloc);
+}
+
+
+/*
+ * Register an allocation to the hash tables.
+ */
+PJ_DEF(pj_status_t) pj_turn_srv_register_allocation(pj_turn_srv *srv,
+ pj_turn_allocation *alloc)
+{
+ /* Add to hash tables */
+ pj_lock_acquire(srv->core.lock);
+ pj_hash_set(alloc->pool, srv->tables.alloc,
+ &alloc->hkey, sizeof(alloc->hkey), 0, alloc);
+ pj_hash_set(alloc->pool, srv->tables.res,
+ &alloc->relay.hkey, sizeof(alloc->relay.hkey), 0,
+ &alloc->relay);
+ pj_lock_release(srv->core.lock);
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Unregister an allocation from the hash tables.
+ */
+PJ_DEF(pj_status_t) pj_turn_srv_unregister_allocation(pj_turn_srv *srv,
+ pj_turn_allocation *alloc)
+{
+ /* Unregister from hash tables */
+ pj_lock_acquire(srv->core.lock);
+ pj_hash_set(alloc->pool, srv->tables.alloc,
+ &alloc->hkey, sizeof(alloc->hkey), 0, NULL);
+ pj_hash_set(alloc->pool, srv->tables.res,
+ &alloc->relay.hkey, sizeof(alloc->relay.hkey), 0, NULL);
+ pj_lock_release(srv->core.lock);
+
+ return PJ_SUCCESS;
+}
+
+
+/* Callback from our own STUN session whenever it needs to send
+ * outgoing STUN packet.
+ */
+static pj_status_t on_tx_stun_msg( pj_stun_session *sess,
+ void *token,
+ const void *pdu,
+ pj_size_t pdu_size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len)
+{
+ pj_turn_transport *transport = (pj_turn_transport*) token;
+
+ PJ_ASSERT_RETURN(transport!=NULL, PJ_EINVALIDOP);
+
+ PJ_UNUSED_ARG(sess);
+
+ return transport->sendto(transport, pdu, pdu_size, 0,
+ dst_addr, addr_len);
+}
+
+
+/* Respond to STUN request */
+static pj_status_t stun_respond(pj_stun_session *sess,
+ pj_turn_transport *transport,
+ const pj_stun_rx_data *rdata,
+ unsigned code,
+ const char *errmsg,
+ pj_bool_t cache,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len)
+{
+ pj_status_t status;
+ pj_str_t reason;
+ pj_stun_tx_data *tdata;
+
+ /* Create response */
+ status = pj_stun_session_create_res(sess, rdata, code,
+ (errmsg?pj_cstr(&reason,errmsg):NULL),
+ &tdata);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ /* Send the response */
+ return pj_stun_session_send_msg(sess, transport, cache, PJ_FALSE,
+ dst_addr, addr_len, tdata);
+}
+
+
+/* Callback from our own STUN session when incoming request arrives.
+ * This function is triggered by pj_stun_session_on_rx_pkt() call in
+ * pj_turn_srv_on_rx_pkt() function below.
+ */
+static pj_status_t on_rx_stun_request(pj_stun_session *sess,
+ const pj_uint8_t *pdu,
+ unsigned pdu_len,
+ const pj_stun_rx_data *rdata,
+ void *token,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_turn_transport *transport;
+ const pj_stun_msg *msg = rdata->msg;
+ pj_turn_srv *srv;
+ pj_turn_allocation *alloc;
+ pj_status_t status;
+
+ PJ_UNUSED_ARG(pdu);
+ PJ_UNUSED_ARG(pdu_len);
+
+ transport = (pj_turn_transport*) token;
+ srv = transport->listener->server;
+
+ /* Respond any requests other than ALLOCATE with 437 response */
+ if (msg->hdr.type != PJ_STUN_ALLOCATE_REQUEST) {
+ stun_respond(sess, transport, rdata, PJ_STUN_SC_ALLOCATION_MISMATCH,
+ NULL, PJ_FALSE, src_addr, src_addr_len);
+ return PJ_SUCCESS;
+ }
+
+ /* Create new allocation. The relay resource will be allocated
+ * in this function.
+ */
+ status = pj_turn_allocation_create(transport, src_addr, src_addr_len,
+ rdata, sess, &alloc);
+ if (status != PJ_SUCCESS) {
+ /* STUN response has been sent, no need to reply here */
+ return PJ_SUCCESS;
+ }
+
+ /* Done. */
+ return PJ_SUCCESS;
+}
+
+/* Handle STUN Binding request */
+static void handle_binding_request(pj_turn_pkt *pkt,
+ unsigned options)
+{
+ pj_stun_msg *request, *response;
+ pj_uint8_t pdu[200];
+ pj_size_t len;
+ pj_status_t status;
+
+ /* Decode request */
+ status = pj_stun_msg_decode(pkt->pool, pkt->pkt, pkt->len, options,
+ &request, NULL, NULL);
+ if (status != PJ_SUCCESS)
+ return;
+
+ /* Create response */
+ status = pj_stun_msg_create_response(pkt->pool, request, 0, NULL,
+ &response);
+ if (status != PJ_SUCCESS)
+ return;
+
+ /* Add XOR-MAPPED-ADDRESS */
+ pj_stun_msg_add_sockaddr_attr(pkt->pool, response,
+ PJ_STUN_ATTR_XOR_MAPPED_ADDR,
+ PJ_TRUE,
+ &pkt->src.clt_addr,
+ pkt->src_addr_len);
+
+ /* Encode */
+ status = pj_stun_msg_encode(response, pdu, sizeof(pdu), 0, NULL, &len);
+ if (status != PJ_SUCCESS)
+ return;
+
+ /* Send response */
+ pkt->transport->sendto(pkt->transport, pdu, len, 0,
+ &pkt->src.clt_addr, pkt->src_addr_len);
+}
+
+/*
+ * This callback is called by UDP listener on incoming packet. This is
+ * the first entry for incoming packet (from client) to the server. From
+ * here, the packet may be handed over to an allocation if an allocation
+ * is found for the client address, or handed over to owned STUN session
+ * if an allocation is not found.
+ */
+PJ_DEF(void) pj_turn_srv_on_rx_pkt(pj_turn_srv *srv,
+ pj_turn_pkt *pkt)
+{
+ pj_turn_allocation *alloc;
+
+ /* Get TURN allocation from the source address */
+ pj_lock_acquire(srv->core.lock);
+ alloc = (pj_turn_allocation*)
+ pj_hash_get(srv->tables.alloc, &pkt->src, sizeof(pkt->src), NULL);
+ pj_lock_release(srv->core.lock);
+
+ /* If allocation is found, just hand over the packet to the
+ * allocation.
+ */
+ if (alloc) {
+ pj_turn_allocation_on_rx_client_pkt(alloc, pkt);
+ } else {
+ /* Otherwise this is a new client */
+ unsigned options;
+ pj_size_t parsed_len;
+ pj_status_t status;
+
+ /* Check that this is a STUN message */
+ options = PJ_STUN_CHECK_PACKET | PJ_STUN_NO_FINGERPRINT_CHECK;
+ if (pkt->transport->listener->tp_type == PJ_TURN_TP_UDP)
+ options |= PJ_STUN_IS_DATAGRAM;
+
+ status = pj_stun_msg_check(pkt->pkt, pkt->len, options);
+ if (status != PJ_SUCCESS) {
+ /* If the first byte are not STUN, drop the packet. First byte
+ * of STUN message is always 0x00 or 0x01. Otherwise wait for
+ * more data as the data might have come from TCP.
+ *
+ * Also drop packet if it's unreasonably too big, as this might
+ * indicate invalid data that's building up in the buffer.
+ *
+ * Or if packet is a datagram.
+ */
+ if ((*pkt->pkt != 0x00 && *pkt->pkt != 0x01) ||
+ pkt->len > 1600 ||
+ (options & PJ_STUN_IS_DATAGRAM))
+ {
+ char errmsg[PJ_ERR_MSG_SIZE];
+ char ip[PJ_INET6_ADDRSTRLEN+10];
+
+ pkt->len = 0;
+
+ pj_strerror(status, errmsg, sizeof(errmsg));
+ PJ_LOG(5,(srv->obj_name,
+ "Non-STUN packet from %s is dropped: %s",
+ pj_sockaddr_print(&pkt->src.clt_addr, ip, sizeof(ip), 3),
+ errmsg));
+ }
+ return;
+ }
+
+ /* Special handling for Binding Request. We won't give it to the
+ * STUN session since this request is not authenticated.
+ */
+ if (pkt->pkt[1] == 1) {
+ handle_binding_request(pkt, options);
+ return;
+ }
+
+ /* Hand over processing to STUN session. This will trigger
+ * on_rx_stun_request() callback to be called if the STUN
+ * message is a request.
+ */
+ options &= ~PJ_STUN_CHECK_PACKET;
+ parsed_len = 0;
+ status = pj_stun_session_on_rx_pkt(srv->core.stun_sess, pkt->pkt,
+ pkt->len, options, pkt->transport,
+ &parsed_len, &pkt->src.clt_addr,
+ pkt->src_addr_len);
+ if (status != PJ_SUCCESS) {
+ char errmsg[PJ_ERR_MSG_SIZE];
+ char ip[PJ_INET6_ADDRSTRLEN+10];
+
+ pj_strerror(status, errmsg, sizeof(errmsg));
+ PJ_LOG(5,(srv->obj_name,
+ "Error processing STUN packet from %s: %s",
+ pj_sockaddr_print(&pkt->src.clt_addr, ip, sizeof(ip), 3),
+ errmsg));
+ }
+
+ if (pkt->transport->listener->tp_type == PJ_TURN_TP_UDP) {
+ pkt->len = 0;
+ } else if (parsed_len > 0) {
+ if (parsed_len == pkt->len) {
+ pkt->len = 0;
+ } else {
+ pj_memmove(pkt->pkt, pkt->pkt+parsed_len,
+ pkt->len - parsed_len);
+ pkt->len -= parsed_len;
+ }
+ }
+ }
+}
+
+
diff --git a/pjnath/src/pjturn-srv/turn.h b/pjnath/src/pjturn-srv/turn.h
new file mode 100644
index 0000000..2fe9f9d
--- /dev/null
+++ b/pjnath/src/pjturn-srv/turn.h
@@ -0,0 +1,508 @@
+/* $Id: turn.h 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __PJ_TURN_SRV_TURN_H__
+#define __PJ_TURN_SRV_TURN_H__
+
+#include <pjlib.h>
+#include <pjnath.h>
+
+typedef struct pj_turn_relay_res pj_turn_relay_res;
+typedef struct pj_turn_listener pj_turn_listener;
+typedef struct pj_turn_transport pj_turn_transport;
+typedef struct pj_turn_permission pj_turn_permission;
+typedef struct pj_turn_allocation pj_turn_allocation;
+typedef struct pj_turn_srv pj_turn_srv;
+typedef struct pj_turn_pkt pj_turn_pkt;
+
+
+#define PJ_TURN_INVALID_LIS_ID ((unsigned)-1)
+
+/**
+ * Get transport type name string.
+ */
+PJ_DECL(const char*) pj_turn_tp_type_name(int tp_type);
+
+/**
+ * This structure describes TURN relay resource. An allocation allocates
+ * one relay resource, and optionally it may reserve another resource.
+ */
+struct pj_turn_relay_res
+{
+ /** Hash table key */
+ struct {
+ /** Transport type. */
+ int tp_type;
+
+ /** Transport/relay address */
+ pj_sockaddr addr;
+ } hkey;
+
+ /** Allocation who requested or reserved this resource. */
+ pj_turn_allocation *allocation;
+
+ /** Username used in credential */
+ pj_str_t user;
+
+ /** Realm used in credential. */
+ pj_str_t realm;
+
+ /** Lifetime, in seconds. */
+ unsigned lifetime;
+
+ /** Relay/allocation expiration time */
+ pj_time_val expiry;
+
+ /** Timeout timer entry */
+ pj_timer_entry timer;
+
+ /** Transport. */
+ struct {
+ /** Transport/relay socket */
+ pj_sock_t sock;
+
+ /** Transport/relay ioqueue */
+ pj_ioqueue_key_t *key;
+
+ /** Read operation key. */
+ pj_ioqueue_op_key_t read_key;
+
+ /** The incoming packet buffer */
+ char rx_pkt[PJ_TURN_MAX_PKT_LEN];
+
+ /** Source address of the packet. */
+ pj_sockaddr src_addr;
+
+ /** Source address length */
+ int src_addr_len;
+
+ /** The outgoing packet buffer. This must be 3wbit aligned. */
+ char tx_pkt[PJ_TURN_MAX_PKT_LEN+4];
+ } tp;
+};
+
+
+/****************************************************************************/
+/*
+ * TURN Allocation API
+ */
+
+/**
+ * This structure describes key to lookup TURN allocations in the
+ * allocation hash table.
+ */
+typedef struct pj_turn_allocation_key
+{
+ int tp_type; /**< Transport type. */
+ pj_sockaddr clt_addr; /**< Client's address. */
+} pj_turn_allocation_key;
+
+
+/**
+ * This structure describes TURN pj_turn_allocation session.
+ */
+struct pj_turn_allocation
+{
+ /** Hash table key to identify client. */
+ pj_turn_allocation_key hkey;
+
+ /** Pool for this allocation. */
+ pj_pool_t *pool;
+
+ /** Object name for logging identification */
+ char *obj_name;
+
+ /** Client info (IP address and port) */
+ char info[80];
+
+ /** Mutex */
+ pj_lock_t *lock;
+
+ /** Server instance. */
+ pj_turn_srv *server;
+
+ /** Transport to send/receive packets to/from client. */
+ pj_turn_transport *transport;
+
+ /** The relay resource for this allocation. */
+ pj_turn_relay_res relay;
+
+ /** Relay resource reserved by this allocation, if any */
+ pj_turn_relay_res *resv;
+
+ /** Requested bandwidth */
+ unsigned bandwidth;
+
+ /** STUN session for this client */
+ pj_stun_session *sess;
+
+ /** Credential for this STUN session. */
+ pj_stun_auth_cred cred;
+
+ /** Peer hash table (keyed by peer address) */
+ pj_hash_table_t *peer_table;
+
+ /** Channel hash table (keyed by channel number) */
+ pj_hash_table_t *ch_table;
+};
+
+
+/**
+ * This structure describes the hash table key to lookup TURN
+ * permission.
+ */
+typedef struct pj_turn_permission_key
+{
+ /** Peer address. */
+ pj_sockaddr peer_addr;
+
+} pj_turn_permission_key;
+
+
+/**
+ * This structure describes TURN pj_turn_permission or channel.
+ */
+struct pj_turn_permission
+{
+ /** Hash table key */
+ pj_turn_permission_key hkey;
+
+ /** TURN allocation that owns this permission/channel */
+ pj_turn_allocation *allocation;
+
+ /** Optional channel number, or PJ_TURN_INVALID_CHANNEL if channel number
+ * is not requested for this permission.
+ */
+ pj_uint16_t channel;
+
+ /** Permission expiration time. */
+ pj_time_val expiry;
+};
+
+/**
+ * Create new allocation.
+ */
+PJ_DECL(pj_status_t) pj_turn_allocation_create(pj_turn_transport *transport,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len,
+ const pj_stun_rx_data *rdata,
+ pj_stun_session *srv_sess,
+ pj_turn_allocation **p_alloc);
+/**
+ * Destroy allocation.
+ */
+PJ_DECL(void) pj_turn_allocation_destroy(pj_turn_allocation *alloc);
+
+
+/**
+ * Handle incoming packet from client.
+ */
+PJ_DECL(void) pj_turn_allocation_on_rx_client_pkt(pj_turn_allocation *alloc,
+ pj_turn_pkt *pkt);
+
+/**
+ * Handle transport closure.
+ */
+PJ_DECL(void) pj_turn_allocation_on_transport_closed(pj_turn_allocation *alloc,
+ pj_turn_transport *tp);
+
+/****************************************************************************/
+/*
+ * TURN Listener API
+ */
+
+/**
+ * This structure describes TURN listener socket. A TURN listener socket
+ * listens for incoming connections from clients.
+ */
+struct pj_turn_listener
+{
+ /** Object name/identification */
+ char *obj_name;
+
+ /** Slightly longer info about this listener */
+ char info[80];
+
+ /** TURN server instance. */
+ pj_turn_srv *server;
+
+ /** Listener index in the server */
+ unsigned id;
+
+ /** Pool for this listener. */
+ pj_pool_t *pool;
+
+ /** Transport type. */
+ int tp_type;
+
+ /** Bound address of this listener. */
+ pj_sockaddr addr;
+
+ /** Socket. */
+ pj_sock_t sock;
+
+ /** Flags. */
+ unsigned flags;
+
+ /** Destroy handler */
+ pj_status_t (*destroy)(pj_turn_listener*);
+};
+
+
+/**
+ * This structure describes TURN transport socket which is used to send and
+ * receive packets towards client.
+ */
+struct pj_turn_transport
+{
+ /** Object name/identification */
+ char *obj_name;
+
+ /** Slightly longer info about this listener */
+ char *info;
+
+ /** Listener instance */
+ pj_turn_listener *listener;
+
+ /** Sendto handler */
+ pj_status_t (*sendto)(pj_turn_transport *tp,
+ const void *packet,
+ pj_size_t size,
+ unsigned flag,
+ const pj_sockaddr_t *addr,
+ int addr_len);
+
+ /** Addref handler */
+ void (*add_ref)(pj_turn_transport *tp,
+ pj_turn_allocation *alloc);
+
+ /** Decref handler */
+ void (*dec_ref)(pj_turn_transport *tp,
+ pj_turn_allocation *alloc);
+
+};
+
+
+/**
+ * An incoming packet.
+ */
+struct pj_turn_pkt
+{
+ /** Pool for this packet */
+ pj_pool_t *pool;
+
+ /** Transport where the packet was received. */
+ pj_turn_transport *transport;
+
+ /** Packet buffer (must be 32bit aligned). */
+ pj_uint8_t pkt[PJ_TURN_MAX_PKT_LEN];
+
+ /** Size of the packet */
+ pj_size_t len;
+
+ /** Arrival time. */
+ pj_time_val rx_time;
+
+ /** Source transport type and source address. */
+ pj_turn_allocation_key src;
+
+ /** Source address length. */
+ int src_addr_len;
+};
+
+
+/**
+ * Create a UDP listener on the specified port.
+ */
+PJ_DECL(pj_status_t) pj_turn_listener_create_udp(pj_turn_srv *srv,
+ int af,
+ const pj_str_t *bound_addr,
+ unsigned port,
+ unsigned concurrency_cnt,
+ unsigned flags,
+ pj_turn_listener **p_lis);
+
+/**
+ * Create a TCP listener on the specified port.
+ */
+PJ_DECL(pj_status_t) pj_turn_listener_create_tcp(pj_turn_srv *srv,
+ int af,
+ const pj_str_t *bound_addr,
+ unsigned port,
+ unsigned concurrency_cnt,
+ unsigned flags,
+ pj_turn_listener **p_lis);
+
+/**
+ * Destroy listener.
+ */
+PJ_DECL(pj_status_t) pj_turn_listener_destroy(pj_turn_listener *listener);
+
+
+/**
+ * Add a reference to a transport.
+ */
+PJ_DECL(void) pj_turn_transport_add_ref(pj_turn_transport *transport,
+ pj_turn_allocation *alloc);
+
+
+/**
+ * Decrement transport reference counter.
+ */
+PJ_DECL(void) pj_turn_transport_dec_ref(pj_turn_transport *transport,
+ pj_turn_allocation *alloc);
+
+
+
+/****************************************************************************/
+/*
+ * TURN Server API
+ */
+/**
+ * This structure describes TURN pj_turn_srv instance.
+ */
+struct pj_turn_srv
+{
+ /** Object name */
+ char *obj_name;
+
+ /** Core settings */
+ struct {
+ /** Pool factory */
+ pj_pool_factory *pf;
+
+ /** Pool for this server instance. */
+ pj_pool_t *pool;
+
+ /** Global Ioqueue */
+ pj_ioqueue_t *ioqueue;
+
+ /** Mutex */
+ pj_lock_t *lock;
+
+ /** Global timer heap instance. */
+ pj_timer_heap_t *timer_heap;
+
+ /** Number of listeners */
+ unsigned lis_cnt;
+
+ /** Array of listeners. */
+ pj_turn_listener **listener;
+
+ /** STUN session to handle initial Allocate request. */
+ pj_stun_session *stun_sess;
+
+ /** Number of worker threads. */
+ unsigned thread_cnt;
+
+ /** Array of worker threads. */
+ pj_thread_t **thread;
+
+ /** Thread quit signal */
+ pj_bool_t quit;
+
+ /** STUN config. */
+ pj_stun_config stun_cfg;
+
+ /** STUN auth credential. */
+ pj_stun_auth_cred cred;
+
+ /** Thread local ID for storing credential */
+ long tls_key, tls_data;
+
+ } core;
+
+
+ /** Hash tables */
+ struct {
+ /** Allocations hash table, indexed by transport type and
+ * client address.
+ */
+ pj_hash_table_t *alloc;
+
+ /** Relay resource hash table, indexed by transport type and
+ * relay address.
+ */
+ pj_hash_table_t *res;
+
+ } tables;
+
+ /** Ports settings */
+ struct {
+ /** Minimum UDP port number. */
+ pj_uint16_t min_udp;
+
+ /** Maximum UDP port number. */
+ pj_uint16_t max_udp;
+
+ /** Next UDP port number. */
+ pj_uint16_t next_udp;
+
+
+ /** Minimum TCP port number. */
+ pj_uint16_t min_tcp;
+
+ /** Maximum TCP port number. */
+ pj_uint16_t max_tcp;
+
+ /** Next TCP port number. */
+ pj_uint16_t next_tcp;
+
+ } ports;
+};
+
+
+/**
+ * Create server.
+ */
+PJ_DECL(pj_status_t) pj_turn_srv_create(pj_pool_factory *pf,
+ pj_turn_srv **p_srv);
+
+/**
+ * Destroy server.
+ */
+PJ_DECL(pj_status_t) pj_turn_srv_destroy(pj_turn_srv *srv);
+
+/**
+ * Add listener.
+ */
+PJ_DECL(pj_status_t) pj_turn_srv_add_listener(pj_turn_srv *srv,
+ pj_turn_listener *lis);
+
+/**
+ * Register an allocation.
+ */
+PJ_DECL(pj_status_t) pj_turn_srv_register_allocation(pj_turn_srv *srv,
+ pj_turn_allocation *alloc);
+
+/**
+ * Unregister an allocation.
+ */
+PJ_DECL(pj_status_t) pj_turn_srv_unregister_allocation(pj_turn_srv *srv,
+ pj_turn_allocation *alloc);
+
+/**
+ * This callback is called by UDP listener on incoming packet.
+ */
+PJ_DECL(void) pj_turn_srv_on_rx_pkt(pj_turn_srv *srv,
+ pj_turn_pkt *pkt);
+
+
+#endif /* __PJ_TURN_SRV_TURN_H__ */
+