summaryrefslogtreecommitdiff
path: root/pjnath/src/pjnath
diff options
context:
space:
mode:
Diffstat (limited to 'pjnath/src/pjnath')
-rw-r--r--pjnath/src/pjnath/errno.c216
-rw-r--r--pjnath/src/pjnath/ice_session.c2968
-rw-r--r--pjnath/src/pjnath/ice_strans.c1757
-rw-r--r--pjnath/src/pjnath/nat_detect.c911
-rw-r--r--pjnath/src/pjnath/stun_auth.c631
-rw-r--r--pjnath/src/pjnath/stun_msg.c2827
-rw-r--r--pjnath/src/pjnath/stun_msg_dump.c298
-rw-r--r--pjnath/src/pjnath/stun_session.c1436
-rw-r--r--pjnath/src/pjnath/stun_sock.c856
-rw-r--r--pjnath/src/pjnath/stun_transaction.c448
-rw-r--r--pjnath/src/pjnath/turn_session.c2040
-rw-r--r--pjnath/src/pjnath/turn_sock.c808
12 files changed, 15196 insertions, 0 deletions
diff --git a/pjnath/src/pjnath/errno.c b/pjnath/src/pjnath/errno.c
new file mode 100644
index 0000000..389e9ad
--- /dev/null
+++ b/pjnath/src/pjnath/errno.c
@@ -0,0 +1,216 @@
+/* $Id: errno.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/errno.h>
+#include <pjnath/stun_msg.h>
+#include <pj/assert.h>
+#include <pj/log.h>
+#include <pj/string.h>
+
+
+
+/* PJNATH's own error codes/messages
+ * MUST KEEP THIS ARRAY SORTED!!
+ * Message must be limited to 64 chars!
+ */
+#if defined(PJ_HAS_ERROR_STRING) && PJ_HAS_ERROR_STRING!=0
+static const struct
+{
+ int code;
+ const char *msg;
+} err_str[] =
+{
+ /* STUN related error codes */
+ PJ_BUILD_ERR( PJNATH_EINSTUNMSG, "Invalid STUN message"),
+ PJ_BUILD_ERR( PJNATH_EINSTUNMSGLEN, "Invalid STUN message length"),
+ PJ_BUILD_ERR( PJNATH_EINSTUNMSGTYPE, "Invalid or unexpected STUN message type"),
+ PJ_BUILD_ERR( PJNATH_ESTUNTIMEDOUT, "STUN transaction has timed out"),
+
+ PJ_BUILD_ERR( PJNATH_ESTUNTOOMANYATTR, "Too many STUN attributes"),
+ PJ_BUILD_ERR( PJNATH_ESTUNINATTRLEN, "Invalid STUN attribute length"),
+ PJ_BUILD_ERR( PJNATH_ESTUNDUPATTR, "Found duplicate STUN attribute"),
+
+ PJ_BUILD_ERR( PJNATH_ESTUNFINGERPRINT, "STUN FINGERPRINT verification failed"),
+ PJ_BUILD_ERR( PJNATH_ESTUNMSGINTPOS, "Invalid STUN attribute after MESSAGE-INTEGRITY"),
+ PJ_BUILD_ERR( PJNATH_ESTUNFINGERPOS, "Invalid STUN attribute after FINGERPRINT"),
+
+ PJ_BUILD_ERR( PJNATH_ESTUNNOMAPPEDADDR, "STUN (XOR-)MAPPED-ADDRESS attribute not found"),
+ PJ_BUILD_ERR( PJNATH_ESTUNIPV6NOTSUPP, "STUN IPv6 attribute not supported"),
+ PJ_BUILD_ERR( PJNATH_EINVAF, "Invalid STUN address family value"),
+ PJ_BUILD_ERR( PJNATH_ESTUNINSERVER, "Invalid STUN server or server not configured"),
+
+ PJ_BUILD_ERR( PJNATH_ESTUNDESTROYED, "STUN object has been destoyed"),
+
+ /* ICE related errors */
+ PJ_BUILD_ERR( PJNATH_ENOICE, "ICE session not available"),
+ PJ_BUILD_ERR( PJNATH_EICEINPROGRESS, "ICE check is in progress"),
+ PJ_BUILD_ERR( PJNATH_EICEFAILED, "All ICE checklists failed"),
+ PJ_BUILD_ERR( PJNATH_EICEMISMATCH, "Default target doesn't match any ICE candidates"),
+ PJ_BUILD_ERR( PJNATH_EICEINCOMPID, "Invalid ICE component ID"),
+ PJ_BUILD_ERR( PJNATH_EICEINCANDID, "Invalid ICE candidate ID"),
+ PJ_BUILD_ERR( PJNATH_EICEINSRCADDR, "Source address mismatch"),
+ PJ_BUILD_ERR( PJNATH_EICEMISSINGSDP, "Missing ICE SDP attribute"),
+ PJ_BUILD_ERR( PJNATH_EICEINCANDSDP, "Invalid SDP \"candidate\" attribute"),
+ PJ_BUILD_ERR( PJNATH_EICENOHOSTCAND, "No host candidate associated with srflx"),
+ PJ_BUILD_ERR( PJNATH_EICENOMTIMEOUT, "Controlled agent timed out waiting for nomination"),
+
+ /* TURN related errors */
+ PJ_BUILD_ERR( PJNATH_ETURNINTP, "Invalid/unsupported transport"),
+
+};
+#endif /* PJ_HAS_ERROR_STRING */
+
+
+/*
+ * pjnath_strerror()
+ */
+static pj_str_t pjnath_strerror(pj_status_t statcode,
+ char *buf, pj_size_t bufsize )
+{
+ pj_str_t errstr;
+
+#if defined(PJ_HAS_ERROR_STRING) && (PJ_HAS_ERROR_STRING != 0)
+
+ if (statcode >= PJNATH_ERRNO_START &&
+ statcode < PJNATH_ERRNO_START + PJ_ERRNO_SPACE_SIZE)
+ {
+ /* Find the error in the table.
+ * Use binary search!
+ */
+ int first = 0;
+ int n = PJ_ARRAY_SIZE(err_str);
+
+ while (n > 0) {
+ int half = n/2;
+ int mid = first + half;
+
+ if (err_str[mid].code < statcode) {
+ first = mid+1;
+ n -= (half+1);
+ } else if (err_str[mid].code > statcode) {
+ n = half;
+ } else {
+ first = mid;
+ break;
+ }
+ }
+
+
+ if (PJ_ARRAY_SIZE(err_str) && err_str[first].code == statcode) {
+ pj_str_t msg;
+
+ msg.ptr = (char*)err_str[first].msg;
+ msg.slen = pj_ansi_strlen(err_str[first].msg);
+
+ errstr.ptr = buf;
+ pj_strncpy_with_null(&errstr, &msg, bufsize);
+ return errstr;
+
+ }
+ }
+
+#endif /* PJ_HAS_ERROR_STRING */
+
+
+ /* Error not found. */
+ errstr.ptr = buf;
+ errstr.slen = pj_ansi_snprintf(buf, bufsize,
+ "Unknown pjnath error %d",
+ statcode);
+ if (errstr.slen < 0) errstr.slen = 0;
+ else if (errstr.slen > (int)bufsize) errstr.slen = bufsize;
+
+ return errstr;
+}
+
+
+static pj_str_t pjnath_strerror2(pj_status_t statcode,
+ char *buf, pj_size_t bufsize )
+{
+ int stun_code = statcode - PJ_STATUS_FROM_STUN_CODE(0);
+ const pj_str_t cmsg = pj_stun_get_err_reason(stun_code);
+ pj_str_t errstr;
+
+ buf[bufsize-1] = '\0';
+
+ if (cmsg.slen == 0) {
+ /* Not found */
+ errstr.ptr = buf;
+ errstr.slen = pj_ansi_snprintf(buf, bufsize,
+ "Unknown STUN err-code %d",
+ stun_code);
+ } else {
+ errstr.ptr = buf;
+ pj_strncpy(&errstr, &cmsg, bufsize);
+ if (errstr.slen < (int)bufsize)
+ buf[errstr.slen] = '\0';
+ else
+ buf[bufsize-1] = '\0';
+ }
+
+ if (errstr.slen < 0) errstr.slen = 0;
+ else if (errstr.slen > (int)bufsize) errstr.slen = bufsize;
+
+ return errstr;
+}
+
+
+PJ_DEF(pj_status_t) pjnath_init(void)
+{
+ pj_status_t status;
+
+ status = pj_register_strerror(PJNATH_ERRNO_START, 299,
+ &pjnath_strerror);
+ pj_assert(status == PJ_SUCCESS);
+
+ status = pj_register_strerror(PJ_STATUS_FROM_STUN_CODE(300),
+ 699 - 300,
+ &pjnath_strerror2);
+ pj_assert(status == PJ_SUCCESS);
+
+ return PJ_SUCCESS;
+}
+
+
+#if PJNATH_ERROR_LEVEL <= PJ_LOG_MAX_LEVEL
+
+PJ_DEF(void) pjnath_perror(const char *sender, const char *title,
+ pj_status_t status)
+{
+ char errmsg[PJ_ERR_MSG_SIZE];
+
+ pj_strerror(status, errmsg, sizeof(errmsg));
+
+#if PJNATH_ERROR_LEVEL==1
+ PJ_LOG(1,(sender, "%s: %s", title, errmsg));
+#elif PJNATH_ERROR_LEVEL==2
+ PJ_LOG(2,(sender, "%s: %s", title, errmsg));
+#elif PJNATH_ERROR_LEVEL==3
+ PJ_LOG(3,(sender, "%s: %s", title, errmsg));
+#elif PJNATH_ERROR_LEVEL==4
+ PJ_LOG(4,(sender, "%s: %s", title, errmsg));
+#elif PJNATH_ERROR_LEVEL==5
+ PJ_LOG(5,(sender, "%s: %s", title, errmsg));
+#else
+# error Invalid PJNATH_ERROR_LEVEL value
+#endif
+}
+
+#endif /* PJNATH_ERROR_LEVEL <= PJ_LOG_MAX_LEVEL */
+
diff --git a/pjnath/src/pjnath/ice_session.c b/pjnath/src/pjnath/ice_session.c
new file mode 100644
index 0000000..05f39bc
--- /dev/null
+++ b/pjnath/src/pjnath/ice_session.c
@@ -0,0 +1,2968 @@
+/* $Id: ice_session.c 3999 2012-03-30 07:10:13Z bennylp $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/ice_session.h>
+#include <pj/addr_resolv.h>
+#include <pj/array.h>
+#include <pj/assert.h>
+#include <pj/guid.h>
+#include <pj/hash.h>
+#include <pj/log.h>
+#include <pj/os.h>
+#include <pj/pool.h>
+#include <pj/rand.h>
+#include <pj/string.h>
+
+/* String names for candidate types */
+static const char *cand_type_names[] =
+{
+ "host",
+ "srflx",
+ "prflx",
+ "relay"
+
+};
+
+/* String names for pj_ice_sess_check_state */
+#if PJ_LOG_MAX_LEVEL >= 4
+static const char *check_state_name[] =
+{
+ "Frozen",
+ "Waiting",
+ "In Progress",
+ "Succeeded",
+ "Failed"
+};
+
+static const char *clist_state_name[] =
+{
+ "Idle",
+ "Running",
+ "Completed"
+};
+#endif /* PJ_LOG_MAX_LEVEL >= 4 */
+
+static const char *role_names[] =
+{
+ "Unknown",
+ "Controlled",
+ "Controlling"
+};
+
+enum timer_type
+{
+ TIMER_NONE, /**< Timer not active */
+ TIMER_COMPLETION_CALLBACK, /**< Call on_ice_complete() callback */
+ TIMER_CONTROLLED_WAIT_NOM, /**< Controlled agent is waiting for
+ controlling agent to send connectivity
+ check with nominated flag after it has
+ valid check for every components. */
+ TIMER_START_NOMINATED_CHECK,/**< Controlling agent start connectivity
+ checks with USE-CANDIDATE flag. */
+ TIMER_KEEP_ALIVE /**< ICE keep-alive timer. */
+
+};
+
+/* Candidate type preference */
+static pj_uint8_t cand_type_prefs[4] =
+{
+#if PJ_ICE_CAND_TYPE_PREF_BITS < 8
+ /* Keep it to 2 bits */
+ 3, /**< PJ_ICE_HOST_PREF */
+ 1, /**< PJ_ICE_SRFLX_PREF. */
+ 2, /**< PJ_ICE_PRFLX_PREF */
+ 0 /**< PJ_ICE_RELAYED_PREF */
+#else
+ /* Default ICE session preferences, according to draft-ice */
+ 126, /**< PJ_ICE_HOST_PREF */
+ 100, /**< PJ_ICE_SRFLX_PREF. */
+ 110, /**< PJ_ICE_PRFLX_PREF */
+ 0 /**< PJ_ICE_RELAYED_PREF */
+#endif
+};
+
+#define CHECK_NAME_LEN 128
+#define LOG4(expr) PJ_LOG(4,expr)
+#define LOG5(expr) PJ_LOG(4,expr)
+#define GET_LCAND_ID(cand) (cand - ice->lcand)
+#define GET_CHECK_ID(cl, chk) (chk - (cl)->checks)
+
+
+/* The data that will be attached to the STUN session on each
+ * component.
+ */
+typedef struct stun_data
+{
+ pj_ice_sess *ice;
+ unsigned comp_id;
+ pj_ice_sess_comp *comp;
+} stun_data;
+
+
+/* The data that will be attached to the timer to perform
+ * periodic check.
+ */
+typedef struct timer_data
+{
+ pj_ice_sess *ice;
+ pj_ice_sess_checklist *clist;
+} timer_data;
+
+
+/* This is the data that will be attached as token to outgoing
+ * STUN messages.
+ */
+
+
+/* Forward declarations */
+static void on_timer(pj_timer_heap_t *th, pj_timer_entry *te);
+static void on_ice_complete(pj_ice_sess *ice, pj_status_t status);
+static void ice_keep_alive(pj_ice_sess *ice, pj_bool_t send_now);
+static void destroy_ice(pj_ice_sess *ice,
+ pj_status_t reason);
+static pj_status_t start_periodic_check(pj_timer_heap_t *th,
+ pj_timer_entry *te);
+static void start_nominated_check(pj_ice_sess *ice);
+static void periodic_timer(pj_timer_heap_t *th,
+ pj_timer_entry *te);
+static void handle_incoming_check(pj_ice_sess *ice,
+ const pj_ice_rx_check *rcheck);
+
+/* These are the callbacks registered to the STUN sessions */
+static pj_status_t on_stun_send_msg(pj_stun_session *sess,
+ void *token,
+ const void *pkt,
+ pj_size_t pkt_size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len);
+static pj_status_t on_stun_rx_request(pj_stun_session *sess,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_stun_rx_data *rdata,
+ void *token,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len);
+static void on_stun_request_complete(pj_stun_session *stun_sess,
+ pj_status_t status,
+ void *token,
+ pj_stun_tx_data *tdata,
+ const pj_stun_msg *response,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len);
+static pj_status_t on_stun_rx_indication(pj_stun_session *sess,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_stun_msg *msg,
+ void *token,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len);
+
+/* These are the callbacks for performing STUN authentication */
+static pj_status_t stun_auth_get_auth(void *user_data,
+ pj_pool_t *pool,
+ pj_str_t *realm,
+ pj_str_t *nonce);
+static pj_status_t stun_auth_get_cred(const pj_stun_msg *msg,
+ void *user_data,
+ pj_pool_t *pool,
+ pj_str_t *realm,
+ pj_str_t *username,
+ pj_str_t *nonce,
+ pj_stun_passwd_type *data_type,
+ pj_str_t *data);
+static pj_status_t stun_auth_get_password(const pj_stun_msg *msg,
+ void *user_data,
+ const pj_str_t *realm,
+ const pj_str_t *username,
+ pj_pool_t *pool,
+ pj_stun_passwd_type *data_type,
+ pj_str_t *data);
+
+
+PJ_DEF(const char*) pj_ice_get_cand_type_name(pj_ice_cand_type type)
+{
+ PJ_ASSERT_RETURN(type <= PJ_ICE_CAND_TYPE_RELAYED, "???");
+ return cand_type_names[type];
+}
+
+
+PJ_DEF(const char*) pj_ice_sess_role_name(pj_ice_sess_role role)
+{
+ switch (role) {
+ case PJ_ICE_SESS_ROLE_UNKNOWN:
+ return "Unknown";
+ case PJ_ICE_SESS_ROLE_CONTROLLED:
+ return "Controlled";
+ case PJ_ICE_SESS_ROLE_CONTROLLING:
+ return "Controlling";
+ default:
+ return "??";
+ }
+}
+
+
+/* Get the prefix for the foundation */
+static int get_type_prefix(pj_ice_cand_type type)
+{
+ switch (type) {
+ case PJ_ICE_CAND_TYPE_HOST: return 'H';
+ case PJ_ICE_CAND_TYPE_SRFLX: return 'S';
+ case PJ_ICE_CAND_TYPE_PRFLX: return 'P';
+ case PJ_ICE_CAND_TYPE_RELAYED: return 'R';
+ default:
+ pj_assert(!"Invalid type");
+ return 'U';
+ }
+}
+
+/* Calculate foundation:
+ * Two candidates have the same foundation when they are "similar" - of
+ * the same type and obtained from the same host candidate and STUN
+ * server using the same protocol. Otherwise, their foundation is
+ * different.
+ */
+PJ_DEF(void) pj_ice_calc_foundation(pj_pool_t *pool,
+ pj_str_t *foundation,
+ pj_ice_cand_type type,
+ const pj_sockaddr *base_addr)
+{
+#if PJNATH_ICE_PRIO_STD
+ char buf[64];
+ pj_uint32_t val;
+
+ if (base_addr->addr.sa_family == pj_AF_INET()) {
+ val = pj_ntohl(base_addr->ipv4.sin_addr.s_addr);
+ } else {
+ val = pj_hash_calc(0, pj_sockaddr_get_addr(base_addr),
+ pj_sockaddr_get_addr_len(base_addr));
+ }
+ pj_ansi_snprintf(buf, sizeof(buf), "%c%x",
+ get_type_prefix(type), val);
+ pj_strdup2(pool, foundation, buf);
+#else
+ /* Much shorter version, valid for candidates added by
+ * pj_ice_strans.
+ */
+ foundation->ptr = (char*) pj_pool_alloc(pool, 1);
+ *foundation->ptr = (char)get_type_prefix(type);
+ foundation->slen = 1;
+
+ PJ_UNUSED_ARG(base_addr);
+#endif
+}
+
+
+/* Init component */
+static pj_status_t init_comp(pj_ice_sess *ice,
+ unsigned comp_id,
+ pj_ice_sess_comp *comp)
+{
+ pj_stun_session_cb sess_cb;
+ pj_stun_auth_cred auth_cred;
+ stun_data *sd;
+ pj_status_t status;
+
+ /* Init STUN callbacks */
+ pj_bzero(&sess_cb, sizeof(sess_cb));
+ sess_cb.on_request_complete = &on_stun_request_complete;
+ sess_cb.on_rx_indication = &on_stun_rx_indication;
+ sess_cb.on_rx_request = &on_stun_rx_request;
+ sess_cb.on_send_msg = &on_stun_send_msg;
+
+ /* Create STUN session for this candidate */
+ status = pj_stun_session_create(&ice->stun_cfg, NULL,
+ &sess_cb, PJ_TRUE,
+ &comp->stun_sess);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ /* Associate data with this STUN session */
+ sd = PJ_POOL_ZALLOC_T(ice->pool, struct stun_data);
+ sd->ice = ice;
+ sd->comp_id = comp_id;
+ sd->comp = comp;
+ pj_stun_session_set_user_data(comp->stun_sess, sd);
+
+ /* Init STUN authentication credential */
+ pj_bzero(&auth_cred, sizeof(auth_cred));
+ auth_cred.type = PJ_STUN_AUTH_CRED_DYNAMIC;
+ auth_cred.data.dyn_cred.get_auth = &stun_auth_get_auth;
+ auth_cred.data.dyn_cred.get_cred = &stun_auth_get_cred;
+ auth_cred.data.dyn_cred.get_password = &stun_auth_get_password;
+ auth_cred.data.dyn_cred.user_data = comp->stun_sess;
+ pj_stun_session_set_credential(comp->stun_sess, PJ_STUN_AUTH_SHORT_TERM,
+ &auth_cred);
+
+ return PJ_SUCCESS;
+}
+
+
+/* Init options with default values */
+PJ_DEF(void) pj_ice_sess_options_default(pj_ice_sess_options *opt)
+{
+ opt->aggressive = PJ_TRUE;
+ opt->nominated_check_delay = PJ_ICE_NOMINATED_CHECK_DELAY;
+ opt->controlled_agent_want_nom_timeout =
+ ICE_CONTROLLED_AGENT_WAIT_NOMINATION_TIMEOUT;
+}
+
+/*
+ * Create ICE session.
+ */
+PJ_DEF(pj_status_t) pj_ice_sess_create(pj_stun_config *stun_cfg,
+ const char *name,
+ pj_ice_sess_role role,
+ unsigned comp_cnt,
+ const pj_ice_sess_cb *cb,
+ const pj_str_t *local_ufrag,
+ const pj_str_t *local_passwd,
+ pj_ice_sess **p_ice)
+{
+ pj_pool_t *pool;
+ pj_ice_sess *ice;
+ unsigned i;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(stun_cfg && cb && p_ice, PJ_EINVAL);
+
+ if (name == NULL)
+ name = "icess%p";
+
+ pool = pj_pool_create(stun_cfg->pf, name, PJNATH_POOL_LEN_ICE_SESS,
+ PJNATH_POOL_INC_ICE_SESS, NULL);
+ ice = PJ_POOL_ZALLOC_T(pool, pj_ice_sess);
+ ice->pool = pool;
+ ice->role = role;
+ ice->tie_breaker.u32.hi = pj_rand();
+ ice->tie_breaker.u32.lo = pj_rand();
+ ice->prefs = cand_type_prefs;
+ pj_ice_sess_options_default(&ice->opt);
+
+ pj_timer_entry_init(&ice->timer, TIMER_NONE, (void*)ice, &on_timer);
+
+ pj_ansi_snprintf(ice->obj_name, sizeof(ice->obj_name),
+ name, ice);
+
+ status = pj_mutex_create_recursive(pool, ice->obj_name,
+ &ice->mutex);
+ if (status != PJ_SUCCESS) {
+ destroy_ice(ice, status);
+ return status;
+ }
+
+ pj_memcpy(&ice->cb, cb, sizeof(*cb));
+ pj_memcpy(&ice->stun_cfg, stun_cfg, sizeof(*stun_cfg));
+
+ ice->comp_cnt = comp_cnt;
+ for (i=0; i<comp_cnt; ++i) {
+ pj_ice_sess_comp *comp;
+ comp = &ice->comp[i];
+ comp->valid_check = NULL;
+ comp->nominated_check = NULL;
+
+ status = init_comp(ice, i+1, comp);
+ if (status != PJ_SUCCESS) {
+ destroy_ice(ice, status);
+ return status;
+ }
+ }
+
+ /* Initialize transport datas */
+ for (i=0; i<PJ_ARRAY_SIZE(ice->tp_data); ++i) {
+ ice->tp_data[i].transport_id = i;
+ ice->tp_data[i].has_req_data = PJ_FALSE;
+ }
+
+ if (local_ufrag == NULL) {
+ ice->rx_ufrag.ptr = (char*) pj_pool_alloc(ice->pool, PJ_ICE_UFRAG_LEN);
+ pj_create_random_string(ice->rx_ufrag.ptr, PJ_ICE_UFRAG_LEN);
+ ice->rx_ufrag.slen = PJ_ICE_UFRAG_LEN;
+ } else {
+ pj_strdup(ice->pool, &ice->rx_ufrag, local_ufrag);
+ }
+
+ if (local_passwd == NULL) {
+ ice->rx_pass.ptr = (char*) pj_pool_alloc(ice->pool, PJ_ICE_UFRAG_LEN);
+ pj_create_random_string(ice->rx_pass.ptr, PJ_ICE_UFRAG_LEN);
+ ice->rx_pass.slen = PJ_ICE_UFRAG_LEN;
+ } else {
+ pj_strdup(ice->pool, &ice->rx_pass, local_passwd);
+ }
+
+ pj_list_init(&ice->early_check);
+
+ /* Done */
+ *p_ice = ice;
+
+ LOG4((ice->obj_name,
+ "ICE session created, comp_cnt=%d, role is %s agent",
+ comp_cnt, role_names[ice->role]));
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Get the value of various options of the ICE session.
+ */
+PJ_DEF(pj_status_t) pj_ice_sess_get_options(pj_ice_sess *ice,
+ pj_ice_sess_options *opt)
+{
+ PJ_ASSERT_RETURN(ice, PJ_EINVAL);
+ pj_memcpy(opt, &ice->opt, sizeof(*opt));
+ return PJ_SUCCESS;
+}
+
+/*
+ * Specify various options for this ICE session.
+ */
+PJ_DEF(pj_status_t) pj_ice_sess_set_options(pj_ice_sess *ice,
+ const pj_ice_sess_options *opt)
+{
+ PJ_ASSERT_RETURN(ice && opt, PJ_EINVAL);
+ pj_memcpy(&ice->opt, opt, sizeof(*opt));
+ LOG5((ice->obj_name, "ICE nomination type set to %s",
+ (ice->opt.aggressive ? "aggressive" : "regular")));
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Destroy
+ */
+static void destroy_ice(pj_ice_sess *ice,
+ pj_status_t reason)
+{
+ unsigned i;
+
+ if (reason == PJ_SUCCESS) {
+ LOG4((ice->obj_name, "Destroying ICE session"));
+ }
+
+ /* Let other callbacks finish */
+ if (ice->mutex) {
+ pj_mutex_lock(ice->mutex);
+ pj_mutex_unlock(ice->mutex);
+ }
+
+ if (ice->timer.id) {
+ pj_timer_heap_cancel(ice->stun_cfg.timer_heap,
+ &ice->timer);
+ ice->timer.id = PJ_FALSE;
+ }
+
+ for (i=0; i<ice->comp_cnt; ++i) {
+ if (ice->comp[i].stun_sess) {
+ pj_stun_session_destroy(ice->comp[i].stun_sess);
+ ice->comp[i].stun_sess = NULL;
+ }
+ }
+
+ if (ice->clist.timer.id) {
+ pj_timer_heap_cancel(ice->stun_cfg.timer_heap, &ice->clist.timer);
+ ice->clist.timer.id = PJ_FALSE;
+ }
+
+ if (ice->mutex) {
+ pj_mutex_destroy(ice->mutex);
+ ice->mutex = NULL;
+ }
+
+ if (ice->pool) {
+ pj_pool_t *pool = ice->pool;
+ ice->pool = NULL;
+ pj_pool_release(pool);
+ }
+}
+
+
+/*
+ * Destroy
+ */
+PJ_DEF(pj_status_t) pj_ice_sess_destroy(pj_ice_sess *ice)
+{
+ PJ_ASSERT_RETURN(ice, PJ_EINVAL);
+ destroy_ice(ice, PJ_SUCCESS);
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Change session role.
+ */
+PJ_DEF(pj_status_t) pj_ice_sess_change_role(pj_ice_sess *ice,
+ pj_ice_sess_role new_role)
+{
+ PJ_ASSERT_RETURN(ice, PJ_EINVAL);
+
+ if (new_role != ice->role) {
+ ice->role = new_role;
+ LOG4((ice->obj_name, "Role changed to %s", role_names[new_role]));
+ }
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Change type preference
+ */
+PJ_DEF(pj_status_t) pj_ice_sess_set_prefs(pj_ice_sess *ice,
+ const pj_uint8_t prefs[4])
+{
+ unsigned i;
+ PJ_ASSERT_RETURN(ice && prefs, PJ_EINVAL);
+ ice->prefs = (pj_uint8_t*) pj_pool_calloc(ice->pool, PJ_ARRAY_SIZE(prefs),
+ sizeof(pj_uint8_t));
+ for (i=0; i<4; ++i) {
+#if PJ_ICE_CAND_TYPE_PREF_BITS < 8
+ pj_assert(prefs[i] < (2 << PJ_ICE_CAND_TYPE_PREF_BITS));
+#endif
+ ice->prefs[i] = prefs[i];
+ }
+ return PJ_SUCCESS;
+}
+
+
+/* Find component by ID */
+static pj_ice_sess_comp *find_comp(const pj_ice_sess *ice, unsigned comp_id)
+{
+ pj_assert(comp_id > 0 && comp_id <= ice->comp_cnt);
+ return (pj_ice_sess_comp*) &ice->comp[comp_id-1];
+}
+
+
+/* Callback by STUN authentication when it needs to send 401 */
+static pj_status_t stun_auth_get_auth(void *user_data,
+ pj_pool_t *pool,
+ pj_str_t *realm,
+ pj_str_t *nonce)
+{
+ PJ_UNUSED_ARG(user_data);
+ PJ_UNUSED_ARG(pool);
+
+ realm->slen = 0;
+ nonce->slen = 0;
+
+ return PJ_SUCCESS;
+}
+
+
+/* Get credential to be sent with outgoing message */
+static pj_status_t stun_auth_get_cred(const pj_stun_msg *msg,
+ void *user_data,
+ pj_pool_t *pool,
+ pj_str_t *realm,
+ pj_str_t *username,
+ pj_str_t *nonce,
+ pj_stun_passwd_type *data_type,
+ pj_str_t *data)
+{
+ pj_stun_session *sess = (pj_stun_session *)user_data;
+ stun_data *sd = (stun_data*) pj_stun_session_get_user_data(sess);
+ pj_ice_sess *ice = sd->ice;
+
+ PJ_UNUSED_ARG(pool);
+ realm->slen = nonce->slen = 0;
+
+ if (PJ_STUN_IS_RESPONSE(msg->hdr.type)) {
+ /* Outgoing responses need to have the same credential as
+ * incoming requests.
+ */
+ *username = ice->rx_uname;
+ *data_type = PJ_STUN_PASSWD_PLAIN;
+ *data = ice->rx_pass;
+ }
+ else {
+ *username = ice->tx_uname;
+ *data_type = PJ_STUN_PASSWD_PLAIN;
+ *data = ice->tx_pass;
+ }
+
+ return PJ_SUCCESS;
+}
+
+/* Get password to be used to authenticate incoming message */
+static pj_status_t stun_auth_get_password(const pj_stun_msg *msg,
+ void *user_data,
+ const pj_str_t *realm,
+ const pj_str_t *username,
+ pj_pool_t *pool,
+ pj_stun_passwd_type *data_type,
+ pj_str_t *data)
+{
+ pj_stun_session *sess = (pj_stun_session *)user_data;
+ stun_data *sd = (stun_data*) pj_stun_session_get_user_data(sess);
+ pj_ice_sess *ice = sd->ice;
+
+ PJ_UNUSED_ARG(realm);
+ PJ_UNUSED_ARG(pool);
+
+ if (PJ_STUN_IS_SUCCESS_RESPONSE(msg->hdr.type) ||
+ PJ_STUN_IS_ERROR_RESPONSE(msg->hdr.type))
+ {
+ /* Incoming response is authenticated with TX credential */
+ /* Verify username */
+ if (pj_strcmp(username, &ice->tx_uname) != 0)
+ return PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_UNAUTHORIZED);
+ *data_type = PJ_STUN_PASSWD_PLAIN;
+ *data = ice->tx_pass;
+
+ } else {
+ /* Incoming request is authenticated with RX credential */
+ /* The agent MUST accept a credential if the username consists
+ * of two values separated by a colon, where the first value is
+ * equal to the username fragment generated by the agent in an offer
+ * or answer for a session in-progress, and the MESSAGE-INTEGRITY
+ * is the output of a hash of the password and the STUN packet's
+ * contents.
+ */
+ const char *pos;
+ pj_str_t ufrag;
+
+ pos = (const char*)pj_memchr(username->ptr, ':', username->slen);
+ if (pos == NULL)
+ return PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_UNAUTHORIZED);
+
+ ufrag.ptr = (char*)username->ptr;
+ ufrag.slen = (pos - username->ptr);
+
+ if (pj_strcmp(&ufrag, &ice->rx_ufrag) != 0)
+ return PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_UNAUTHORIZED);
+
+ *data_type = PJ_STUN_PASSWD_PLAIN;
+ *data = ice->rx_pass;
+
+ }
+
+ return PJ_SUCCESS;
+}
+
+
+static pj_uint32_t CALC_CAND_PRIO(pj_ice_sess *ice,
+ pj_ice_cand_type type,
+ pj_uint32_t local_pref,
+ pj_uint32_t comp_id)
+{
+#if PJNATH_ICE_PRIO_STD
+ return ((ice->prefs[type] & 0xFF) << 24) +
+ ((local_pref & 0xFFFF) << 8) +
+ (((256 - comp_id) & 0xFF) << 0);
+#else
+ enum {
+ type_mask = ((2 << PJ_ICE_CAND_TYPE_PREF_BITS) - 1),
+ local_mask = ((2 << PJ_ICE_LOCAL_PREF_BITS) - 1),
+ comp_mask = ((2 << PJ_ICE_COMP_BITS) - 1),
+
+ comp_shift = 0,
+ local_shift = (PJ_ICE_COMP_BITS),
+ type_shift = (comp_shift + local_shift),
+
+ max_comp = (2<<PJ_ICE_COMP_BITS),
+ };
+
+ return ((ice->prefs[type] & type_mask) << type_shift) +
+ ((local_pref & local_mask) << local_shift) +
+ (((max_comp - comp_id) & comp_mask) << comp_shift);
+#endif
+}
+
+
+/*
+ * Add ICE candidate
+ */
+PJ_DEF(pj_status_t) pj_ice_sess_add_cand(pj_ice_sess *ice,
+ unsigned comp_id,
+ unsigned transport_id,
+ pj_ice_cand_type type,
+ pj_uint16_t local_pref,
+ const pj_str_t *foundation,
+ const pj_sockaddr_t *addr,
+ const pj_sockaddr_t *base_addr,
+ const pj_sockaddr_t *rel_addr,
+ int addr_len,
+ unsigned *p_cand_id)
+{
+ pj_ice_sess_cand *lcand;
+ pj_status_t status = PJ_SUCCESS;
+
+ PJ_ASSERT_RETURN(ice && comp_id &&
+ foundation && addr && base_addr && addr_len,
+ PJ_EINVAL);
+ PJ_ASSERT_RETURN(comp_id <= ice->comp_cnt, PJ_EINVAL);
+
+ pj_mutex_lock(ice->mutex);
+
+ if (ice->lcand_cnt >= PJ_ARRAY_SIZE(ice->lcand)) {
+ status = PJ_ETOOMANY;
+ goto on_error;
+ }
+
+ lcand = &ice->lcand[ice->lcand_cnt];
+ lcand->comp_id = (pj_uint8_t)comp_id;
+ lcand->transport_id = (pj_uint8_t)transport_id;
+ lcand->type = type;
+ pj_strdup(ice->pool, &lcand->foundation, foundation);
+ lcand->prio = CALC_CAND_PRIO(ice, type, local_pref, lcand->comp_id);
+ pj_memcpy(&lcand->addr, addr, addr_len);
+ pj_memcpy(&lcand->base_addr, base_addr, addr_len);
+ if (rel_addr == NULL)
+ rel_addr = base_addr;
+ pj_memcpy(&lcand->rel_addr, rel_addr, addr_len);
+
+ pj_ansi_strcpy(ice->tmp.txt, pj_inet_ntoa(lcand->addr.ipv4.sin_addr));
+ LOG4((ice->obj_name,
+ "Candidate %d added: comp_id=%d, type=%s, foundation=%.*s, "
+ "addr=%s:%d, base=%s:%d, prio=0x%x (%u)",
+ ice->lcand_cnt,
+ lcand->comp_id,
+ cand_type_names[lcand->type],
+ (int)lcand->foundation.slen,
+ lcand->foundation.ptr,
+ ice->tmp.txt,
+ (int)pj_ntohs(lcand->addr.ipv4.sin_port),
+ pj_inet_ntoa(lcand->base_addr.ipv4.sin_addr),
+ (int)pj_htons(lcand->base_addr.ipv4.sin_port),
+ lcand->prio, lcand->prio));
+
+ if (p_cand_id)
+ *p_cand_id = ice->lcand_cnt;
+
+ ++ice->lcand_cnt;
+
+on_error:
+ pj_mutex_unlock(ice->mutex);
+ return status;
+}
+
+
+/* Find default candidate ID for the component */
+PJ_DEF(pj_status_t) pj_ice_sess_find_default_cand(pj_ice_sess *ice,
+ unsigned comp_id,
+ int *cand_id)
+{
+ unsigned i;
+
+ PJ_ASSERT_RETURN(ice && comp_id && cand_id, PJ_EINVAL);
+ PJ_ASSERT_RETURN(comp_id <= ice->comp_cnt, PJ_EINVAL);
+
+ *cand_id = -1;
+
+ pj_mutex_lock(ice->mutex);
+
+ /* First find in valid list if we have nominated pair */
+ for (i=0; i<ice->valid_list.count; ++i) {
+ pj_ice_sess_check *check = &ice->valid_list.checks[i];
+
+ if (check->lcand->comp_id == comp_id) {
+ *cand_id = GET_LCAND_ID(check->lcand);
+ pj_mutex_unlock(ice->mutex);
+ return PJ_SUCCESS;
+ }
+ }
+
+ /* If there's no nominated pair, find relayed candidate */
+ for (i=0; i<ice->lcand_cnt; ++i) {
+ pj_ice_sess_cand *lcand = &ice->lcand[i];
+ if (lcand->comp_id==comp_id &&
+ lcand->type == PJ_ICE_CAND_TYPE_RELAYED)
+ {
+ *cand_id = GET_LCAND_ID(lcand);
+ pj_mutex_unlock(ice->mutex);
+ return PJ_SUCCESS;
+ }
+ }
+
+ /* If there's no relayed candidate, find reflexive candidate */
+ for (i=0; i<ice->lcand_cnt; ++i) {
+ pj_ice_sess_cand *lcand = &ice->lcand[i];
+ if (lcand->comp_id==comp_id &&
+ (lcand->type == PJ_ICE_CAND_TYPE_SRFLX ||
+ lcand->type == PJ_ICE_CAND_TYPE_PRFLX))
+ {
+ *cand_id = GET_LCAND_ID(lcand);
+ pj_mutex_unlock(ice->mutex);
+ return PJ_SUCCESS;
+ }
+ }
+
+ /* Otherwise return host candidate */
+ for (i=0; i<ice->lcand_cnt; ++i) {
+ pj_ice_sess_cand *lcand = &ice->lcand[i];
+ if (lcand->comp_id==comp_id &&
+ lcand->type == PJ_ICE_CAND_TYPE_HOST)
+ {
+ *cand_id = GET_LCAND_ID(lcand);
+ pj_mutex_unlock(ice->mutex);
+ return PJ_SUCCESS;
+ }
+ }
+
+ /* Still no candidate is found! :( */
+ pj_mutex_unlock(ice->mutex);
+
+ pj_assert(!"Should have a candidate by now");
+ return PJ_EBUG;
+}
+
+
+#ifndef MIN
+# define MIN(a,b) (a < b ? a : b)
+#endif
+
+#ifndef MAX
+# define MAX(a,b) (a > b ? a : b)
+#endif
+
+static pj_timestamp CALC_CHECK_PRIO(const pj_ice_sess *ice,
+ const pj_ice_sess_cand *lcand,
+ const pj_ice_sess_cand *rcand)
+{
+ pj_uint32_t O, A;
+ pj_timestamp prio;
+
+ /* Original formula:
+ * pair priority = 2^32*MIN(O,A) + 2*MAX(O,A) + (O>A?1:0)
+ */
+
+ if (ice->role == PJ_ICE_SESS_ROLE_CONTROLLING) {
+ O = lcand->prio;
+ A = rcand->prio;
+ } else {
+ O = rcand->prio;
+ A = lcand->prio;
+ }
+
+ /*
+ return ((pj_uint64_t)1 << 32) * MIN(O, A) +
+ (pj_uint64_t)2 * MAX(O, A) + (O>A ? 1 : 0);
+ */
+
+ prio.u32.hi = MIN(O,A);
+ prio.u32.lo = (MAX(O, A) << 1) + (O>A ? 1 : 0);
+
+ return prio;
+}
+
+
+PJ_INLINE(int) CMP_CHECK_PRIO(const pj_ice_sess_check *c1,
+ const pj_ice_sess_check *c2)
+{
+ return pj_cmp_timestamp(&c1->prio, &c2->prio);
+}
+
+
+#if PJ_LOG_MAX_LEVEL >= 4
+static const char *dump_check(char *buffer, unsigned bufsize,
+ const pj_ice_sess_checklist *clist,
+ const pj_ice_sess_check *check)
+{
+ const pj_ice_sess_cand *lcand = check->lcand;
+ const pj_ice_sess_cand *rcand = check->rcand;
+ char laddr[PJ_INET6_ADDRSTRLEN];
+ int len;
+
+ PJ_CHECK_STACK();
+
+ pj_ansi_strcpy(laddr, pj_inet_ntoa(lcand->addr.ipv4.sin_addr));
+
+ if (lcand->addr.addr.sa_family == pj_AF_INET()) {
+ len = pj_ansi_snprintf(buffer, bufsize,
+ "%d: [%d] %s:%d-->%s:%d",
+ (int)GET_CHECK_ID(clist, check),
+ check->lcand->comp_id,
+ laddr, (int)pj_ntohs(lcand->addr.ipv4.sin_port),
+ pj_inet_ntoa(rcand->addr.ipv4.sin_addr),
+ (int)pj_ntohs(rcand->addr.ipv4.sin_port));
+ } else {
+ len = pj_ansi_snprintf(buffer, bufsize, "IPv6->IPv6");
+ }
+
+
+ if (len < 0)
+ len = 0;
+ else if (len >= (int)bufsize)
+ len = bufsize - 1;
+
+ buffer[len] = '\0';
+ return buffer;
+}
+
+static void dump_checklist(const char *title, pj_ice_sess *ice,
+ const pj_ice_sess_checklist *clist)
+{
+ unsigned i;
+
+ LOG4((ice->obj_name, "%s", title));
+ for (i=0; i<clist->count; ++i) {
+ const pj_ice_sess_check *c = &clist->checks[i];
+ LOG4((ice->obj_name, " %s (%s, state=%s)",
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt), clist, c),
+ (c->nominated ? "nominated" : "not nominated"),
+ check_state_name[c->state]));
+ }
+}
+
+#else
+#define dump_checklist(title, ice, clist)
+#endif
+
+static void check_set_state(pj_ice_sess *ice, pj_ice_sess_check *check,
+ pj_ice_sess_check_state st,
+ pj_status_t err_code)
+{
+ pj_assert(check->state < PJ_ICE_SESS_CHECK_STATE_SUCCEEDED);
+
+ LOG5((ice->obj_name, "Check %s: state changed from %s to %s",
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt), &ice->clist, check),
+ check_state_name[check->state],
+ check_state_name[st]));
+ check->state = st;
+ check->err_code = err_code;
+}
+
+static void clist_set_state(pj_ice_sess *ice, pj_ice_sess_checklist *clist,
+ pj_ice_sess_checklist_state st)
+{
+ if (clist->state != st) {
+ LOG5((ice->obj_name, "Checklist: state changed from %s to %s",
+ clist_state_name[clist->state],
+ clist_state_name[st]));
+ clist->state = st;
+ }
+}
+
+/* Sort checklist based on priority */
+static void sort_checklist(pj_ice_sess *ice, pj_ice_sess_checklist *clist)
+{
+ unsigned i;
+ pj_ice_sess_check **check_ptr[PJ_ICE_MAX_COMP*2];
+ unsigned check_ptr_cnt = 0;
+
+ for (i=0; i<ice->comp_cnt; ++i) {
+ if (ice->comp[i].valid_check) {
+ check_ptr[check_ptr_cnt++] = &ice->comp[i].valid_check;
+ }
+ if (ice->comp[i].nominated_check) {
+ check_ptr[check_ptr_cnt++] = &ice->comp[i].nominated_check;
+ }
+ }
+
+ for (i=0; i<clist->count-1; ++i) {
+ unsigned j, highest = i;
+
+ for (j=i+1; j<clist->count; ++j) {
+ if (CMP_CHECK_PRIO(&clist->checks[j], &clist->checks[highest]) > 0) {
+ highest = j;
+ }
+ }
+
+ if (highest != i) {
+ pj_ice_sess_check tmp;
+ unsigned k;
+
+ pj_memcpy(&tmp, &clist->checks[i], sizeof(pj_ice_sess_check));
+ pj_memcpy(&clist->checks[i], &clist->checks[highest],
+ sizeof(pj_ice_sess_check));
+ pj_memcpy(&clist->checks[highest], &tmp,
+ sizeof(pj_ice_sess_check));
+
+ /* Update valid and nominated check pointers, since we're moving
+ * around checks
+ */
+ for (k=0; k<check_ptr_cnt; ++k) {
+ if (*check_ptr[k] == &clist->checks[highest])
+ *check_ptr[k] = &clist->checks[i];
+ else if (*check_ptr[k] == &clist->checks[i])
+ *check_ptr[k] = &clist->checks[highest];
+ }
+ }
+ }
+}
+
+enum
+{
+ SOCKADDR_EQUAL = 0,
+ SOCKADDR_NOT_EQUAL = 1
+};
+
+/* Utility: compare sockaddr.
+ * Returns 0 if equal.
+ */
+static int sockaddr_cmp(const pj_sockaddr *a1, const pj_sockaddr *a2)
+{
+ if (a1->addr.sa_family != a2->addr.sa_family)
+ return SOCKADDR_NOT_EQUAL;
+
+ if (a1->addr.sa_family == pj_AF_INET()) {
+ return !(a1->ipv4.sin_addr.s_addr == a2->ipv4.sin_addr.s_addr &&
+ a1->ipv4.sin_port == a2->ipv4.sin_port);
+ } else if (a1->addr.sa_family == pj_AF_INET6()) {
+ return pj_memcmp(&a1->ipv6, &a2->ipv6, sizeof(a1->ipv6));
+ } else {
+ pj_assert(!"Invalid address family!");
+ return SOCKADDR_NOT_EQUAL;
+ }
+}
+
+
+/* Prune checklist, this must have been done after the checklist
+ * is sorted.
+ */
+static pj_status_t prune_checklist(pj_ice_sess *ice,
+ pj_ice_sess_checklist *clist)
+{
+ unsigned i;
+
+ /* Since an agent cannot send requests directly from a reflexive
+ * candidate, but only from its base, the agent next goes through the
+ * sorted list of candidate pairs. For each pair where the local
+ * candidate is server reflexive, the server reflexive candidate MUST be
+ * replaced by its base. Once this has been done, the agent MUST prune
+ * the list. This is done by removing a pair if its local and remote
+ * candidates are identical to the local and remote candidates of a pair
+ * higher up on the priority list. The result is a sequence of ordered
+ * candidate pairs, called the check list for that media stream.
+ */
+ /* First replace SRFLX candidates with their base */
+ for (i=0; i<clist->count; ++i) {
+ pj_ice_sess_cand *srflx = clist->checks[i].lcand;
+
+ if (clist->checks[i].lcand->type == PJ_ICE_CAND_TYPE_SRFLX) {
+ /* Find the base for this candidate */
+ unsigned j;
+ for (j=0; j<ice->lcand_cnt; ++j) {
+ pj_ice_sess_cand *host = &ice->lcand[j];
+
+ if (host->type != PJ_ICE_CAND_TYPE_HOST)
+ continue;
+
+ if (sockaddr_cmp(&srflx->base_addr, &host->addr) == 0) {
+ /* Replace this SRFLX with its BASE */
+ clist->checks[i].lcand = host;
+ break;
+ }
+ }
+
+ if (j==ice->lcand_cnt) {
+ /* Host candidate not found this this srflx! */
+ LOG4((ice->obj_name,
+ "Base candidate %s:%d not found for srflx candidate %d",
+ pj_inet_ntoa(srflx->base_addr.ipv4.sin_addr),
+ pj_ntohs(srflx->base_addr.ipv4.sin_port),
+ GET_LCAND_ID(clist->checks[i].lcand)));
+ return PJNATH_EICENOHOSTCAND;
+ }
+ }
+ }
+
+ /* Next remove a pair if its local and remote candidates are identical
+ * to the local and remote candidates of a pair higher up on the priority
+ * list
+ */
+ /*
+ * Not in ICE!
+ * Remove host candidates if their base are the the same!
+ */
+ for (i=0; i<clist->count; ++i) {
+ pj_ice_sess_cand *licand = clist->checks[i].lcand;
+ pj_ice_sess_cand *ricand = clist->checks[i].rcand;
+ unsigned j;
+
+ for (j=i+1; j<clist->count;) {
+ pj_ice_sess_cand *ljcand = clist->checks[j].lcand;
+ pj_ice_sess_cand *rjcand = clist->checks[j].rcand;
+ const char *reason = NULL;
+
+ if ((licand == ljcand) && (ricand == rjcand)) {
+ reason = "duplicate found";
+ } else if ((rjcand == ricand) &&
+ (sockaddr_cmp(&ljcand->base_addr,
+ &licand->base_addr)==0))
+ {
+ reason = "equal base";
+ }
+
+ if (reason != NULL) {
+ /* Found duplicate, remove it */
+ LOG5((ice->obj_name, "Check %s pruned (%s)",
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
+ &ice->clist, &clist->checks[j]),
+ reason));
+
+ pj_array_erase(clist->checks, sizeof(clist->checks[0]),
+ clist->count, j);
+ --clist->count;
+
+ } else {
+ ++j;
+ }
+ }
+ }
+
+ return PJ_SUCCESS;
+}
+
+/* Timer callback */
+static void on_timer(pj_timer_heap_t *th, pj_timer_entry *te)
+{
+ pj_ice_sess *ice = (pj_ice_sess*) te->user_data;
+ enum timer_type type = (enum timer_type)te->id;
+ pj_bool_t has_mutex = PJ_TRUE;
+
+ PJ_UNUSED_ARG(th);
+
+ pj_mutex_lock(ice->mutex);
+
+ te->id = TIMER_NONE;
+
+ switch (type) {
+ case TIMER_CONTROLLED_WAIT_NOM:
+ LOG4((ice->obj_name,
+ "Controlled agent timed-out in waiting for the controlling "
+ "agent to send nominated check. Setting state to fail now.."));
+ on_ice_complete(ice, PJNATH_EICENOMTIMEOUT);
+ break;
+ case TIMER_COMPLETION_CALLBACK:
+ {
+ void (*on_ice_complete)(pj_ice_sess *ice, pj_status_t status);
+ pj_status_t ice_status;
+
+ /* Start keep-alive timer but don't send any packets yet.
+ * Need to do it here just in case app destroy the session
+ * in the callback.
+ */
+ if (ice->ice_status == PJ_SUCCESS)
+ ice_keep_alive(ice, PJ_FALSE);
+
+ /* Release mutex in case app destroy us in the callback */
+ ice_status = ice->ice_status;
+ on_ice_complete = ice->cb.on_ice_complete;
+ has_mutex = PJ_FALSE;
+ pj_mutex_unlock(ice->mutex);
+
+ /* Notify app about ICE completion*/
+ if (on_ice_complete)
+ (*on_ice_complete)(ice, ice_status);
+ }
+ break;
+ case TIMER_START_NOMINATED_CHECK:
+ start_nominated_check(ice);
+ break;
+ case TIMER_KEEP_ALIVE:
+ ice_keep_alive(ice, PJ_TRUE);
+ break;
+ case TIMER_NONE:
+ /* Nothing to do, just to get rid of gcc warning */
+ break;
+ }
+
+ if (has_mutex)
+ pj_mutex_unlock(ice->mutex);
+}
+
+/* Send keep-alive */
+static void ice_keep_alive(pj_ice_sess *ice, pj_bool_t send_now)
+{
+ if (send_now) {
+ /* Send Binding Indication for the component */
+ pj_ice_sess_comp *comp = &ice->comp[ice->comp_ka];
+ pj_stun_tx_data *tdata;
+ pj_ice_sess_check *the_check;
+ pj_ice_msg_data *msg_data;
+ int addr_len;
+ pj_bool_t saved;
+ pj_status_t status;
+
+ /* Must have nominated check by now */
+ pj_assert(comp->nominated_check != NULL);
+ the_check = comp->nominated_check;
+
+ /* Create the Binding Indication */
+ status = pj_stun_session_create_ind(comp->stun_sess,
+ PJ_STUN_BINDING_INDICATION,
+ &tdata);
+ if (status != PJ_SUCCESS)
+ goto done;
+
+ /* Need the transport_id */
+ msg_data = PJ_POOL_ZALLOC_T(tdata->pool, pj_ice_msg_data);
+ msg_data->transport_id = the_check->lcand->transport_id;
+
+ /* Temporarily disable FINGERPRINT. The Binding Indication
+ * SHOULD NOT contain any attributes.
+ */
+ saved = pj_stun_session_use_fingerprint(comp->stun_sess, PJ_FALSE);
+
+ /* Send to session */
+ addr_len = pj_sockaddr_get_len(&the_check->rcand->addr);
+ status = pj_stun_session_send_msg(comp->stun_sess, msg_data,
+ PJ_FALSE, PJ_FALSE,
+ &the_check->rcand->addr,
+ addr_len, tdata);
+
+ /* Restore FINGERPRINT usage */
+ pj_stun_session_use_fingerprint(comp->stun_sess, saved);
+
+done:
+ ice->comp_ka = (ice->comp_ka + 1) % ice->comp_cnt;
+ }
+
+ if (ice->timer.id == TIMER_NONE) {
+ pj_time_val delay = { 0, 0 };
+
+ delay.msec = (PJ_ICE_SESS_KEEP_ALIVE_MIN +
+ (pj_rand() % PJ_ICE_SESS_KEEP_ALIVE_MAX_RAND)) * 1000 /
+ ice->comp_cnt;
+ pj_time_val_normalize(&delay);
+
+ ice->timer.id = TIMER_KEEP_ALIVE;
+ pj_timer_heap_schedule(ice->stun_cfg.timer_heap, &ice->timer, &delay);
+
+ } else {
+ pj_assert(!"Not expected any timer active");
+ }
+}
+
+/* This function is called when ICE processing completes */
+static void on_ice_complete(pj_ice_sess *ice, pj_status_t status)
+{
+ if (!ice->is_complete) {
+ ice->is_complete = PJ_TRUE;
+ ice->ice_status = status;
+
+ if (ice->timer.id != TIMER_NONE) {
+ pj_timer_heap_cancel(ice->stun_cfg.timer_heap, &ice->timer);
+ ice->timer.id = TIMER_NONE;
+ }
+
+ /* Log message */
+ LOG4((ice->obj_name, "ICE process complete, status=%s",
+ pj_strerror(status, ice->tmp.errmsg,
+ sizeof(ice->tmp.errmsg)).ptr));
+
+ dump_checklist("Valid list", ice, &ice->valid_list);
+
+ /* Call callback */
+ if (ice->cb.on_ice_complete) {
+ pj_time_val delay = {0, 0};
+
+ ice->timer.id = TIMER_COMPLETION_CALLBACK;
+ pj_timer_heap_schedule(ice->stun_cfg.timer_heap,
+ &ice->timer, &delay);
+ }
+ }
+}
+
+/* Update valid check and nominated check for the candidate */
+static void update_comp_check(pj_ice_sess *ice, unsigned comp_id,
+ pj_ice_sess_check *check)
+{
+ pj_ice_sess_comp *comp;
+
+ comp = find_comp(ice, comp_id);
+ if (comp->valid_check == NULL) {
+ comp->valid_check = check;
+ } else {
+ if (CMP_CHECK_PRIO(comp->valid_check, check) < 0)
+ comp->valid_check = check;
+ }
+
+ if (check->nominated) {
+ /* Update the nominated check for the component */
+ if (comp->nominated_check == NULL) {
+ comp->nominated_check = check;
+ } else {
+ if (CMP_CHECK_PRIO(comp->nominated_check, check) < 0)
+ comp->nominated_check = check;
+ }
+ }
+}
+
+/* This function is called when one check completes */
+static pj_bool_t on_check_complete(pj_ice_sess *ice,
+ pj_ice_sess_check *check)
+{
+ pj_ice_sess_comp *comp;
+ unsigned i;
+
+ pj_assert(check->state >= PJ_ICE_SESS_CHECK_STATE_SUCCEEDED);
+
+ comp = find_comp(ice, check->lcand->comp_id);
+
+ /* 7.1.2.2.2. Updating Pair States
+ *
+ * The agent sets the state of the pair that generated the check to
+ * Succeeded. The success of this check might also cause the state of
+ * other checks to change as well. The agent MUST perform the following
+ * two steps:
+ *
+ * 1. The agent changes the states for all other Frozen pairs for the
+ * same media stream and same foundation to Waiting. Typically
+ * these other pairs will have different component IDs but not
+ * always.
+ */
+ if (check->err_code==PJ_SUCCESS) {
+
+ for (i=0; i<ice->clist.count; ++i) {
+ pj_ice_sess_check *c = &ice->clist.checks[i];
+ if (pj_strcmp(&c->lcand->foundation, &check->lcand->foundation)==0
+ && c->state == PJ_ICE_SESS_CHECK_STATE_FROZEN)
+ {
+ check_set_state(ice, c, PJ_ICE_SESS_CHECK_STATE_WAITING, 0);
+ }
+ }
+
+ LOG5((ice->obj_name, "Check %d is successful%s",
+ GET_CHECK_ID(&ice->clist, check),
+ (check->nominated ? " and nominated" : "")));
+
+ }
+
+ /* 8.2. Updating States
+ *
+ * For both controlling and controlled agents, the state of ICE
+ * processing depends on the presence of nominated candidate pairs in
+ * the valid list and on the state of the check list:
+ *
+ * o If there are no nominated pairs in the valid list for a media
+ * stream and the state of the check list is Running, ICE processing
+ * continues.
+ *
+ * o If there is at least one nominated pair in the valid list:
+ *
+ * - The agent MUST remove all Waiting and Frozen pairs in the check
+ * list for the same component as the nominated pairs for that
+ * media stream
+ *
+ * - If an In-Progress pair in the check list is for the same
+ * component as a nominated pair, the agent SHOULD cease
+ * retransmissions for its check if its pair priority is lower
+ * than the lowest priority nominated pair for that component
+ */
+ if (check->err_code==PJ_SUCCESS && check->nominated) {
+
+ for (i=0; i<ice->clist.count; ++i) {
+
+ pj_ice_sess_check *c = &ice->clist.checks[i];
+
+ if (c->lcand->comp_id == check->lcand->comp_id) {
+
+ if (c->state < PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS) {
+
+ /* Just fail Frozen/Waiting check */
+ LOG5((ice->obj_name,
+ "Check %s to be failed because state is %s",
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
+ &ice->clist, c),
+ check_state_name[c->state]));
+ check_set_state(ice, c, PJ_ICE_SESS_CHECK_STATE_FAILED,
+ PJ_ECANCELLED);
+
+ } else if (c->state == PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS
+ && (PJ_ICE_CANCEL_ALL ||
+ CMP_CHECK_PRIO(c, check) < 0)) {
+
+ /* State is IN_PROGRESS, cancel transaction */
+ if (c->tdata) {
+ LOG5((ice->obj_name,
+ "Cancelling check %s (In Progress)",
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
+ &ice->clist, c)));
+ pj_stun_session_cancel_req(comp->stun_sess,
+ c->tdata, PJ_FALSE, 0);
+ c->tdata = NULL;
+ check_set_state(ice, c, PJ_ICE_SESS_CHECK_STATE_FAILED,
+ PJ_ECANCELLED);
+ }
+ }
+ }
+ }
+ }
+
+
+ /* Still in 8.2. Updating States
+ *
+ * o Once there is at least one nominated pair in the valid list for
+ * every component of at least one media stream and the state of the
+ * check list is Running:
+ *
+ * * The agent MUST change the state of processing for its check
+ * list for that media stream to Completed.
+ *
+ * * The agent MUST continue to respond to any checks it may still
+ * receive for that media stream, and MUST perform triggered
+ * checks if required by the processing of Section 7.2.
+ *
+ * * The agent MAY begin transmitting media for this media stream as
+ * described in Section 11.1
+ */
+
+ /* See if all components have nominated pair. If they do, then mark
+ * ICE processing as success, otherwise wait.
+ */
+ for (i=0; i<ice->comp_cnt; ++i) {
+ if (ice->comp[i].nominated_check == NULL)
+ break;
+ }
+ if (i == ice->comp_cnt) {
+ /* All components have nominated pair */
+ on_ice_complete(ice, PJ_SUCCESS);
+ return PJ_TRUE;
+ }
+
+ /* Note: this is the stuffs that we don't do in 7.1.2.2.2, since our
+ * ICE session only supports one media stream for now:
+ *
+ * 7.1.2.2.2. Updating Pair States
+ *
+ * 2. If there is a pair in the valid list for every component of this
+ * media stream (where this is the actual number of components being
+ * used, in cases where the number of components signaled in the SDP
+ * differs from offerer to answerer), the success of this check may
+ * unfreeze checks for other media streams.
+ */
+
+ /* 7.1.2.3. Check List and Timer State Updates
+ * Regardless of whether the check was successful or failed, the
+ * completion of the transaction may require updating of check list and
+ * timer states.
+ *
+ * If all of the pairs in the check list are now either in the Failed or
+ * Succeeded state, and there is not a pair in the valid list for each
+ * component of the media stream, the state of the check list is set to
+ * Failed.
+ */
+
+ /*
+ * See if all checks in the checklist have completed. If we do,
+ * then mark ICE processing as failed.
+ */
+ for (i=0; i<ice->clist.count; ++i) {
+ pj_ice_sess_check *c = &ice->clist.checks[i];
+ if (c->state < PJ_ICE_SESS_CHECK_STATE_SUCCEEDED) {
+ break;
+ }
+ }
+
+ if (i == ice->clist.count) {
+ /* All checks have completed, but we don't have nominated pair.
+ * If agent's role is controlled, check if all components have
+ * valid pair. If it does, this means the controlled agent has
+ * finished the check list and it's waiting for controlling
+ * agent to send checks with USE-CANDIDATE flag set.
+ */
+ if (ice->role == PJ_ICE_SESS_ROLE_CONTROLLED) {
+ for (i=0; i < ice->comp_cnt; ++i) {
+ if (ice->comp[i].valid_check == NULL)
+ break;
+ }
+
+ if (i < ice->comp_cnt) {
+ /* This component ID doesn't have valid pair.
+ * Mark ICE as failed.
+ */
+ on_ice_complete(ice, PJNATH_EICEFAILED);
+ return PJ_TRUE;
+ } else {
+ /* All components have a valid pair.
+ * We should wait until we receive nominated checks.
+ */
+ if (ice->timer.id == TIMER_NONE &&
+ ice->opt.controlled_agent_want_nom_timeout >= 0)
+ {
+ pj_time_val delay;
+
+ delay.sec = 0;
+ delay.msec = ice->opt.controlled_agent_want_nom_timeout;
+ pj_time_val_normalize(&delay);
+
+ ice->timer.id = TIMER_CONTROLLED_WAIT_NOM;
+ pj_timer_heap_schedule(ice->stun_cfg.timer_heap,
+ &ice->timer,
+ &delay);
+
+ LOG5((ice->obj_name,
+ "All checks have completed. Controlled agent now "
+ "waits for nomination from controlling agent "
+ "(timeout=%d msec)",
+ ice->opt.controlled_agent_want_nom_timeout));
+ }
+ return PJ_FALSE;
+ }
+
+ /* Unreached */
+
+ } else if (ice->is_nominating) {
+ /* We are controlling agent and all checks have completed but
+ * there's at least one component without nominated pair (or
+ * more likely we don't have any nominated pairs at all).
+ */
+ on_ice_complete(ice, PJNATH_EICEFAILED);
+ return PJ_TRUE;
+
+ } else {
+ /* We are controlling agent and all checks have completed. If
+ * we have valid list for every component, then move on to
+ * sending nominated check, otherwise we have failed.
+ */
+ for (i=0; i<ice->comp_cnt; ++i) {
+ if (ice->comp[i].valid_check == NULL)
+ break;
+ }
+
+ if (i < ice->comp_cnt) {
+ /* At least one component doesn't have a valid check. Mark
+ * ICE as failed.
+ */
+ on_ice_complete(ice, PJNATH_EICEFAILED);
+ return PJ_TRUE;
+ }
+
+ /* Now it's time to send connectivity check with nomination
+ * flag set.
+ */
+ LOG4((ice->obj_name,
+ "All checks have completed, starting nominated checks now"));
+ start_nominated_check(ice);
+ return PJ_FALSE;
+ }
+ }
+
+ /* If this connectivity check has been successful, scan all components
+ * and see if they have a valid pair, if we are controlling and we haven't
+ * started our nominated check yet.
+ */
+ if (check->err_code == PJ_SUCCESS &&
+ ice->role==PJ_ICE_SESS_ROLE_CONTROLLING &&
+ !ice->is_nominating &&
+ ice->timer.id == TIMER_NONE)
+ {
+ pj_time_val delay;
+
+ for (i=0; i<ice->comp_cnt; ++i) {
+ if (ice->comp[i].valid_check == NULL)
+ break;
+ }
+
+ if (i < ice->comp_cnt) {
+ /* Some components still don't have valid pair, continue
+ * processing.
+ */
+ return PJ_FALSE;
+ }
+
+ LOG4((ice->obj_name,
+ "Scheduling nominated check in %d ms",
+ ice->opt.nominated_check_delay));
+
+ if (ice->timer.id != TIMER_NONE) {
+ pj_timer_heap_cancel(ice->stun_cfg.timer_heap, &ice->timer);
+ ice->timer.id = TIMER_NONE;
+ }
+
+ /* All components have valid pair. Let connectivity checks run for
+ * a little bit more time, then start our nominated check.
+ */
+ delay.sec = 0;
+ delay.msec = ice->opt.nominated_check_delay;
+ pj_time_val_normalize(&delay);
+
+ ice->timer.id = TIMER_START_NOMINATED_CHECK;
+ pj_timer_heap_schedule(ice->stun_cfg.timer_heap, &ice->timer, &delay);
+ return PJ_FALSE;
+ }
+
+ /* We still have checks to perform */
+ return PJ_FALSE;
+}
+
+
+/* Create checklist by pairing local candidates with remote candidates */
+PJ_DEF(pj_status_t) pj_ice_sess_create_check_list(
+ pj_ice_sess *ice,
+ const pj_str_t *rem_ufrag,
+ const pj_str_t *rem_passwd,
+ unsigned rcand_cnt,
+ const pj_ice_sess_cand rcand[])
+{
+ pj_ice_sess_checklist *clist;
+ char buf[128];
+ pj_str_t username;
+ timer_data *td;
+ unsigned i, j;
+ unsigned highest_comp = 0;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(ice && rem_ufrag && rem_passwd && rcand_cnt && rcand,
+ PJ_EINVAL);
+ PJ_ASSERT_RETURN(rcand_cnt + ice->rcand_cnt <= PJ_ICE_MAX_CAND,
+ PJ_ETOOMANY);
+
+ pj_mutex_lock(ice->mutex);
+
+ /* Save credentials */
+ username.ptr = buf;
+
+ pj_strcpy(&username, rem_ufrag);
+ pj_strcat2(&username, ":");
+ pj_strcat(&username, &ice->rx_ufrag);
+
+ pj_strdup(ice->pool, &ice->tx_uname, &username);
+ pj_strdup(ice->pool, &ice->tx_ufrag, rem_ufrag);
+ pj_strdup(ice->pool, &ice->tx_pass, rem_passwd);
+
+ pj_strcpy(&username, &ice->rx_ufrag);
+ pj_strcat2(&username, ":");
+ pj_strcat(&username, rem_ufrag);
+
+ pj_strdup(ice->pool, &ice->rx_uname, &username);
+
+
+ /* Save remote candidates */
+ ice->rcand_cnt = 0;
+ for (i=0; i<rcand_cnt; ++i) {
+ pj_ice_sess_cand *cn = &ice->rcand[ice->rcand_cnt];
+
+ /* Ignore candidate which has no matching component ID */
+ if (rcand[i].comp_id==0 || rcand[i].comp_id > ice->comp_cnt) {
+ continue;
+ }
+
+ if (rcand[i].comp_id > highest_comp)
+ highest_comp = rcand[i].comp_id;
+
+ pj_memcpy(cn, &rcand[i], sizeof(pj_ice_sess_cand));
+ pj_strdup(ice->pool, &cn->foundation, &rcand[i].foundation);
+ ice->rcand_cnt++;
+ }
+
+ /* Generate checklist */
+ clist = &ice->clist;
+ for (i=0; i<ice->lcand_cnt; ++i) {
+ for (j=0; j<ice->rcand_cnt; ++j) {
+
+ pj_ice_sess_cand *lcand = &ice->lcand[i];
+ pj_ice_sess_cand *rcand = &ice->rcand[j];
+ pj_ice_sess_check *chk = &clist->checks[clist->count];
+
+ if (clist->count >= PJ_ICE_MAX_CHECKS) {
+ pj_mutex_unlock(ice->mutex);
+ return PJ_ETOOMANY;
+ }
+
+ /* A local candidate is paired with a remote candidate if
+ * and only if the two candidates have the same component ID
+ * and have the same IP address version.
+ */
+ if ((lcand->comp_id != rcand->comp_id) ||
+ (lcand->addr.addr.sa_family != rcand->addr.addr.sa_family))
+ {
+ continue;
+ }
+
+
+ chk->lcand = lcand;
+ chk->rcand = rcand;
+ chk->state = PJ_ICE_SESS_CHECK_STATE_FROZEN;
+
+ chk->prio = CALC_CHECK_PRIO(ice, lcand, rcand);
+
+ clist->count++;
+ }
+ }
+
+ /* Sort checklist based on priority */
+ sort_checklist(ice, clist);
+
+ /* Prune the checklist */
+ status = prune_checklist(ice, clist);
+ if (status != PJ_SUCCESS) {
+ pj_mutex_unlock(ice->mutex);
+ return status;
+ }
+
+ /* Disable our components which don't have matching component */
+ for (i=highest_comp; i<ice->comp_cnt; ++i) {
+ if (ice->comp[i].stun_sess) {
+ pj_stun_session_destroy(ice->comp[i].stun_sess);
+ pj_bzero(&ice->comp[i], sizeof(ice->comp[i]));
+ }
+ }
+ ice->comp_cnt = highest_comp;
+
+ /* Init timer entry in the checklist. Initially the timer ID is FALSE
+ * because timer is not running.
+ */
+ clist->timer.id = PJ_FALSE;
+ td = PJ_POOL_ZALLOC_T(ice->pool, timer_data);
+ td->ice = ice;
+ td->clist = clist;
+ clist->timer.user_data = (void*)td;
+ clist->timer.cb = &periodic_timer;
+
+
+ /* Log checklist */
+ dump_checklist("Checklist created:", ice, clist);
+
+ pj_mutex_unlock(ice->mutex);
+
+ return PJ_SUCCESS;
+}
+
+/* Perform check on the specified candidate pair. */
+static pj_status_t perform_check(pj_ice_sess *ice,
+ pj_ice_sess_checklist *clist,
+ unsigned check_id,
+ pj_bool_t nominate)
+{
+ pj_ice_sess_comp *comp;
+ pj_ice_msg_data *msg_data;
+ pj_ice_sess_check *check;
+ const pj_ice_sess_cand *lcand;
+ const pj_ice_sess_cand *rcand;
+ pj_uint32_t prio;
+ pj_status_t status;
+
+ check = &clist->checks[check_id];
+ lcand = check->lcand;
+ rcand = check->rcand;
+ comp = find_comp(ice, lcand->comp_id);
+
+ LOG5((ice->obj_name,
+ "Sending connectivity check for check %s",
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt), clist, check)));
+ pj_log_push_indent();
+
+ /* Create request */
+ status = pj_stun_session_create_req(comp->stun_sess,
+ PJ_STUN_BINDING_REQUEST, PJ_STUN_MAGIC,
+ NULL, &check->tdata);
+ if (status != PJ_SUCCESS) {
+ pjnath_perror(ice->obj_name, "Error creating STUN request", status);
+ pj_log_pop_indent();
+ return status;
+ }
+
+ /* Attach data to be retrieved later when STUN request transaction
+ * completes and on_stun_request_complete() callback is called.
+ */
+ msg_data = PJ_POOL_ZALLOC_T(check->tdata->pool, pj_ice_msg_data);
+ msg_data->transport_id = lcand->transport_id;
+ msg_data->has_req_data = PJ_TRUE;
+ msg_data->data.req.ice = ice;
+ msg_data->data.req.clist = clist;
+ msg_data->data.req.ckid = check_id;
+
+ /* Add PRIORITY */
+#if PJNATH_ICE_PRIO_STD
+ prio = CALC_CAND_PRIO(ice, PJ_ICE_CAND_TYPE_PRFLX, 65535,
+ lcand->comp_id);
+#else
+ prio = CALC_CAND_PRIO(ice, PJ_ICE_CAND_TYPE_PRFLX, 0,
+ lcand->comp_id);
+#endif
+ pj_stun_msg_add_uint_attr(check->tdata->pool, check->tdata->msg,
+ PJ_STUN_ATTR_PRIORITY, prio);
+
+ /* Add USE-CANDIDATE and set this check to nominated.
+ * Also add ICE-CONTROLLING or ICE-CONTROLLED
+ */
+ if (ice->role == PJ_ICE_SESS_ROLE_CONTROLLING) {
+ if (nominate) {
+ pj_stun_msg_add_empty_attr(check->tdata->pool, check->tdata->msg,
+ PJ_STUN_ATTR_USE_CANDIDATE);
+ check->nominated = PJ_TRUE;
+ }
+
+ pj_stun_msg_add_uint64_attr(check->tdata->pool, check->tdata->msg,
+ PJ_STUN_ATTR_ICE_CONTROLLING,
+ &ice->tie_breaker);
+
+ } else {
+ pj_stun_msg_add_uint64_attr(check->tdata->pool, check->tdata->msg,
+ PJ_STUN_ATTR_ICE_CONTROLLED,
+ &ice->tie_breaker);
+ }
+
+
+ /* Note that USERNAME and MESSAGE-INTEGRITY will be added by the
+ * STUN session.
+ */
+
+ /* Initiate STUN transaction to send the request */
+ status = pj_stun_session_send_msg(comp->stun_sess, msg_data, PJ_FALSE,
+ PJ_TRUE, &rcand->addr,
+ sizeof(pj_sockaddr_in), check->tdata);
+ if (status != PJ_SUCCESS) {
+ check->tdata = NULL;
+ pjnath_perror(ice->obj_name, "Error sending STUN request", status);
+ pj_log_pop_indent();
+ return status;
+ }
+
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS,
+ PJ_SUCCESS);
+ pj_log_pop_indent();
+ return PJ_SUCCESS;
+}
+
+
+/* Start periodic check for the specified checklist.
+ * This callback is called by timer on every Ta (20msec by default)
+ */
+static pj_status_t start_periodic_check(pj_timer_heap_t *th,
+ pj_timer_entry *te)
+{
+ timer_data *td;
+ pj_ice_sess *ice;
+ pj_ice_sess_checklist *clist;
+ unsigned i, start_count=0;
+ pj_status_t status;
+
+ td = (struct timer_data*) te->user_data;
+ ice = td->ice;
+ clist = td->clist;
+
+ pj_mutex_lock(ice->mutex);
+
+ /* Set timer ID to FALSE first */
+ te->id = PJ_FALSE;
+
+ /* Set checklist state to Running */
+ clist_set_state(ice, clist, PJ_ICE_SESS_CHECKLIST_ST_RUNNING);
+
+ LOG5((ice->obj_name, "Starting checklist periodic check"));
+ pj_log_push_indent();
+
+ /* Send STUN Binding request for check with highest priority on
+ * Waiting state.
+ */
+ for (i=0; i<clist->count; ++i) {
+ pj_ice_sess_check *check = &clist->checks[i];
+
+ if (check->state == PJ_ICE_SESS_CHECK_STATE_WAITING) {
+ status = perform_check(ice, clist, i, ice->is_nominating);
+ if (status != PJ_SUCCESS) {
+ pj_mutex_unlock(ice->mutex);
+ pj_log_pop_indent();
+ return status;
+ }
+
+ ++start_count;
+ break;
+ }
+ }
+
+ /* If we don't have anything in Waiting state, perform check to
+ * highest priority pair that is in Frozen state.
+ */
+ if (start_count==0) {
+ for (i=0; i<clist->count; ++i) {
+ pj_ice_sess_check *check = &clist->checks[i];
+
+ if (check->state == PJ_ICE_SESS_CHECK_STATE_FROZEN) {
+ status = perform_check(ice, clist, i, ice->is_nominating);
+ if (status != PJ_SUCCESS) {
+ pj_mutex_unlock(ice->mutex);
+ pj_log_pop_indent();
+ return status;
+ }
+
+ ++start_count;
+ break;
+ }
+ }
+ }
+
+ /* Cannot start check because there's no suitable candidate pair.
+ */
+ if (start_count!=0) {
+ /* Schedule for next timer */
+ pj_time_val timeout = {0, PJ_ICE_TA_VAL};
+
+ te->id = PJ_TRUE;
+ pj_time_val_normalize(&timeout);
+ pj_timer_heap_schedule(th, te, &timeout);
+ }
+
+ pj_mutex_unlock(ice->mutex);
+ pj_log_pop_indent();
+ return PJ_SUCCESS;
+}
+
+
+/* Start sending connectivity check with USE-CANDIDATE */
+static void start_nominated_check(pj_ice_sess *ice)
+{
+ pj_time_val delay;
+ unsigned i;
+ pj_status_t status;
+
+ LOG4((ice->obj_name, "Starting nominated check.."));
+ pj_log_push_indent();
+
+ pj_assert(ice->is_nominating == PJ_FALSE);
+
+ /* Stop our timer if it's active */
+ if (ice->timer.id == TIMER_START_NOMINATED_CHECK) {
+ pj_timer_heap_cancel(ice->stun_cfg.timer_heap, &ice->timer);
+ ice->timer.id = TIMER_NONE;
+ }
+
+ /* For each component, set the check state of valid check with
+ * highest priority to Waiting (it should have Success state now).
+ */
+ for (i=0; i<ice->comp_cnt; ++i) {
+ unsigned j;
+ const pj_ice_sess_check *vc = ice->comp[i].valid_check;
+
+ pj_assert(ice->comp[i].nominated_check == NULL);
+ pj_assert(vc->err_code == PJ_SUCCESS);
+
+ for (j=0; j<ice->clist.count; ++j) {
+ pj_ice_sess_check *c = &ice->clist.checks[j];
+ if (c->lcand->transport_id == vc->lcand->transport_id &&
+ c->rcand == vc->rcand)
+ {
+ pj_assert(c->err_code == PJ_SUCCESS);
+ c->state = PJ_ICE_SESS_CHECK_STATE_FROZEN;
+ check_set_state(ice, c, PJ_ICE_SESS_CHECK_STATE_WAITING,
+ PJ_SUCCESS);
+ break;
+ }
+ }
+ }
+
+ /* And (re)start the periodic check */
+ if (ice->clist.timer.id) {
+ pj_timer_heap_cancel(ice->stun_cfg.timer_heap, &ice->clist.timer);
+ ice->clist.timer.id = PJ_FALSE;
+ }
+
+ ice->clist.timer.id = PJ_TRUE;
+ delay.sec = delay.msec = 0;
+ status = pj_timer_heap_schedule(ice->stun_cfg.timer_heap,
+ &ice->clist.timer, &delay);
+ if (status != PJ_SUCCESS) {
+ ice->clist.timer.id = PJ_FALSE;
+ } else {
+ LOG5((ice->obj_name, "Periodic timer rescheduled.."));
+ }
+
+ ice->is_nominating = PJ_TRUE;
+ pj_log_pop_indent();
+}
+
+/* Timer callback to perform periodic check */
+static void periodic_timer(pj_timer_heap_t *th,
+ pj_timer_entry *te)
+{
+ start_periodic_check(th, te);
+}
+
+
+/* Utility: find string in string array */
+const pj_str_t *find_str(const pj_str_t *strlist[], unsigned count,
+ const pj_str_t *str)
+{
+ unsigned i;
+ for (i=0; i<count; ++i) {
+ if (pj_strcmp(strlist[i], str)==0)
+ return strlist[i];
+ }
+ return NULL;
+}
+
+
+/*
+ * Start ICE periodic check. This function will return immediately, and
+ * application will be notified about the connectivity check status in
+ * #pj_ice_sess_cb callback.
+ */
+PJ_DEF(pj_status_t) pj_ice_sess_start_check(pj_ice_sess *ice)
+{
+ pj_ice_sess_checklist *clist;
+ const pj_ice_sess_cand *cand0;
+ const pj_str_t *flist[PJ_ICE_MAX_CAND]; // XXX
+ pj_ice_rx_check *rcheck;
+ unsigned i, flist_cnt = 0;
+ pj_time_val delay;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(ice, PJ_EINVAL);
+
+ /* Checklist must have been created */
+ PJ_ASSERT_RETURN(ice->clist.count > 0, PJ_EINVALIDOP);
+
+ /* Lock session */
+ pj_mutex_lock(ice->mutex);
+
+ LOG4((ice->obj_name, "Starting ICE check.."));
+ pj_log_push_indent();
+
+ /* If we are using aggressive nomination, set the is_nominating state */
+ if (ice->opt.aggressive)
+ ice->is_nominating = PJ_TRUE;
+
+ /* The agent examines the check list for the first media stream (a
+ * media stream is the first media stream when it is described by
+ * the first m-line in the SDP offer and answer). For that media
+ * stream, it:
+ *
+ * - Groups together all of the pairs with the same foundation,
+ *
+ * - For each group, sets the state of the pair with the lowest
+ * component ID to Waiting. If there is more than one such pair,
+ * the one with the highest priority is used.
+ */
+
+ clist = &ice->clist;
+
+ /* Pickup the first pair for component 1. */
+ for (i=0; i<clist->count; ++i) {
+ if (clist->checks[i].lcand->comp_id == 1)
+ break;
+ }
+ if (i == clist->count) {
+ pj_assert(!"Unable to find checklist for component 1");
+ pj_mutex_unlock(ice->mutex);
+ pj_log_pop_indent();
+ return PJNATH_EICEINCOMPID;
+ }
+
+ /* Set this check to WAITING only if state is frozen. It may be possible
+ * that this check has already been started by a trigger check
+ */
+ if (clist->checks[i].state == PJ_ICE_SESS_CHECK_STATE_FROZEN) {
+ check_set_state(ice, &clist->checks[i],
+ PJ_ICE_SESS_CHECK_STATE_WAITING, PJ_SUCCESS);
+ }
+
+ cand0 = clist->checks[i].lcand;
+ flist[flist_cnt++] = &clist->checks[i].lcand->foundation;
+
+ /* Find all of the other pairs in that check list with the same
+ * component ID, but different foundations, and sets all of their
+ * states to Waiting as well.
+ */
+ for (++i; i<clist->count; ++i) {
+ const pj_ice_sess_cand *cand1;
+
+ cand1 = clist->checks[i].lcand;
+
+ if (cand1->comp_id==cand0->comp_id &&
+ find_str(flist, flist_cnt, &cand1->foundation)==NULL)
+ {
+ if (clist->checks[i].state == PJ_ICE_SESS_CHECK_STATE_FROZEN) {
+ check_set_state(ice, &clist->checks[i],
+ PJ_ICE_SESS_CHECK_STATE_WAITING, PJ_SUCCESS);
+ }
+ flist[flist_cnt++] = &cand1->foundation;
+ }
+ }
+
+ /* First, perform all pending triggered checks, simultaneously. */
+ rcheck = ice->early_check.next;
+ while (rcheck != &ice->early_check) {
+ LOG4((ice->obj_name,
+ "Performing delayed triggerred check for component %d",
+ rcheck->comp_id));
+ pj_log_push_indent();
+ handle_incoming_check(ice, rcheck);
+ rcheck = rcheck->next;
+ pj_log_pop_indent();
+ }
+ pj_list_init(&ice->early_check);
+
+ /* Start periodic check */
+ /* We could start it immediately like below, but lets schedule timer
+ * instead to reduce stack usage:
+ * return start_periodic_check(ice->stun_cfg.timer_heap, &clist->timer);
+ */
+ clist->timer.id = PJ_TRUE;
+ delay.sec = delay.msec = 0;
+ status = pj_timer_heap_schedule(ice->stun_cfg.timer_heap,
+ &clist->timer, &delay);
+ if (status != PJ_SUCCESS) {
+ clist->timer.id = PJ_FALSE;
+ }
+
+ pj_mutex_unlock(ice->mutex);
+ pj_log_pop_indent();
+ return status;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+
+/* Callback called by STUN session to send the STUN message.
+ * STUN session also doesn't have a transport, remember?!
+ */
+static pj_status_t on_stun_send_msg(pj_stun_session *sess,
+ void *token,
+ const void *pkt,
+ pj_size_t pkt_size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len)
+{
+ stun_data *sd = (stun_data*) pj_stun_session_get_user_data(sess);
+ pj_ice_sess *ice = sd->ice;
+ pj_ice_msg_data *msg_data = (pj_ice_msg_data*) token;
+
+ return (*ice->cb.on_tx_pkt)(ice, sd->comp_id, msg_data->transport_id,
+ pkt, pkt_size, dst_addr, addr_len);
+}
+
+
+/* This callback is called when outgoing STUN request completed */
+static void on_stun_request_complete(pj_stun_session *stun_sess,
+ pj_status_t status,
+ void *token,
+ pj_stun_tx_data *tdata,
+ const pj_stun_msg *response,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_ice_msg_data *msg_data = (pj_ice_msg_data*) token;
+ pj_ice_sess *ice;
+ pj_ice_sess_check *check, *new_check;
+ pj_ice_sess_cand *lcand;
+ pj_ice_sess_checklist *clist;
+ pj_stun_xor_mapped_addr_attr *xaddr;
+ unsigned i;
+
+ PJ_UNUSED_ARG(stun_sess);
+ PJ_UNUSED_ARG(src_addr_len);
+
+ pj_assert(msg_data->has_req_data);
+
+ ice = msg_data->data.req.ice;
+ clist = msg_data->data.req.clist;
+ check = &clist->checks[msg_data->data.req.ckid];
+
+
+ /* Mark STUN transaction as complete */
+ pj_assert(tdata == check->tdata);
+ check->tdata = NULL;
+
+ pj_mutex_lock(ice->mutex);
+
+ /* Init lcand to NULL. lcand will be found from the mapped address
+ * found in the response.
+ */
+ lcand = NULL;
+
+ if (status != PJ_SUCCESS) {
+ char errmsg[PJ_ERR_MSG_SIZE];
+
+ if (status==PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_ROLE_CONFLICT)) {
+
+ /* Role conclict response.
+ *
+ * 7.1.2.1. Failure Cases:
+ *
+ * If the request had contained the ICE-CONTROLLED attribute,
+ * the agent MUST switch to the controlling role if it has not
+ * already done so. If the request had contained the
+ * ICE-CONTROLLING attribute, the agent MUST switch to the
+ * controlled role if it has not already done so. Once it has
+ * switched, the agent MUST immediately retry the request with
+ * the ICE-CONTROLLING or ICE-CONTROLLED attribute reflecting
+ * its new role.
+ */
+ pj_ice_sess_role new_role = PJ_ICE_SESS_ROLE_UNKNOWN;
+ pj_stun_msg *req = tdata->msg;
+
+ if (pj_stun_msg_find_attr(req, PJ_STUN_ATTR_ICE_CONTROLLING, 0)) {
+ new_role = PJ_ICE_SESS_ROLE_CONTROLLED;
+ } else if (pj_stun_msg_find_attr(req, PJ_STUN_ATTR_ICE_CONTROLLED,
+ 0)) {
+ new_role = PJ_ICE_SESS_ROLE_CONTROLLING;
+ } else {
+ pj_assert(!"We should have put CONTROLLING/CONTROLLED attr!");
+ new_role = PJ_ICE_SESS_ROLE_CONTROLLED;
+ }
+
+ if (new_role != ice->role) {
+ LOG4((ice->obj_name,
+ "Changing role because of role conflict response"));
+ pj_ice_sess_change_role(ice, new_role);
+ }
+
+ /* Resend request */
+ LOG4((ice->obj_name, "Resending check because of role conflict"));
+ pj_log_push_indent();
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_WAITING, 0);
+ perform_check(ice, clist, msg_data->data.req.ckid,
+ check->nominated || ice->is_nominating);
+ pj_log_pop_indent();
+ pj_mutex_unlock(ice->mutex);
+ return;
+ }
+
+ pj_strerror(status, errmsg, sizeof(errmsg));
+ LOG4((ice->obj_name,
+ "Check %s%s: connectivity check FAILED: %s",
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
+ &ice->clist, check),
+ (check->nominated ? " (nominated)" : " (not nominated)"),
+ errmsg));
+ pj_log_push_indent();
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED, status);
+ on_check_complete(ice, check);
+ pj_log_pop_indent();
+ pj_mutex_unlock(ice->mutex);
+ return;
+ }
+
+
+ /* 7.1.2.1. Failure Cases
+ *
+ * The agent MUST check that the source IP address and port of the
+ * response equals the destination IP address and port that the Binding
+ * Request was sent to, and that the destination IP address and port of
+ * the response match the source IP address and port that the Binding
+ * Request was sent from.
+ */
+ if (sockaddr_cmp(&check->rcand->addr, (const pj_sockaddr*)src_addr) != 0) {
+ status = PJNATH_EICEINSRCADDR;
+ LOG4((ice->obj_name,
+ "Check %s%s: connectivity check FAILED: source address mismatch",
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
+ &ice->clist, check),
+ (check->nominated ? " (nominated)" : " (not nominated)")));
+ pj_log_push_indent();
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED, status);
+ on_check_complete(ice, check);
+ pj_log_pop_indent();
+ pj_mutex_unlock(ice->mutex);
+ return;
+ }
+
+ /* 7.1.2.2. Success Cases
+ *
+ * A check is considered to be a success if all of the following are
+ * true:
+ *
+ * o the STUN transaction generated a success response
+ *
+ * o the source IP address and port of the response equals the
+ * destination IP address and port that the Binding Request was sent
+ * to
+ *
+ * o the destination IP address and port of the response match the
+ * source IP address and port that the Binding Request was sent from
+ */
+
+
+ LOG4((ice->obj_name,
+ "Check %s%s: connectivity check SUCCESS",
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
+ &ice->clist, check),
+ (check->nominated ? " (nominated)" : " (not nominated)")));
+
+ /* Get the STUN XOR-MAPPED-ADDRESS attribute. */
+ xaddr = (pj_stun_xor_mapped_addr_attr*)
+ pj_stun_msg_find_attr(response, PJ_STUN_ATTR_XOR_MAPPED_ADDR,0);
+ if (!xaddr) {
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED,
+ PJNATH_ESTUNNOMAPPEDADDR);
+ on_check_complete(ice, check);
+ pj_mutex_unlock(ice->mutex);
+ return;
+ }
+
+ /* Find local candidate that matches the XOR-MAPPED-ADDRESS */
+ pj_assert(lcand == NULL);
+ for (i=0; i<ice->lcand_cnt; ++i) {
+ if (sockaddr_cmp(&xaddr->sockaddr, &ice->lcand[i].addr) == 0) {
+ /* Match */
+ lcand = &ice->lcand[i];
+ break;
+ }
+ }
+
+ /* 7.1.2.2.1. Discovering Peer Reflexive Candidates
+ * If the transport address returned in XOR-MAPPED-ADDRESS does not match
+ * any of the local candidates that the agent knows about, the mapped
+ * address represents a new candidate - a peer reflexive candidate.
+ */
+ if (lcand == NULL) {
+ unsigned cand_id;
+ pj_str_t foundation;
+
+ pj_ice_calc_foundation(ice->pool, &foundation, PJ_ICE_CAND_TYPE_PRFLX,
+ &check->lcand->base_addr);
+
+ /* Still in 7.1.2.2.1. Discovering Peer Reflexive Candidates
+ * Its priority is set equal to the value of the PRIORITY attribute
+ * in the Binding Request.
+ *
+ * I think the priority calculated by add_cand() should be the same
+ * as the one calculated in perform_check(), so there's no need to
+ * get the priority from the PRIORITY attribute.
+ */
+
+ /* Add new peer reflexive candidate */
+ status = pj_ice_sess_add_cand(ice, check->lcand->comp_id,
+ msg_data->transport_id,
+ PJ_ICE_CAND_TYPE_PRFLX,
+ 65535, &foundation,
+ &xaddr->sockaddr,
+ &check->lcand->base_addr,
+ &check->lcand->base_addr,
+ sizeof(pj_sockaddr_in), &cand_id);
+ if (status != PJ_SUCCESS) {
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED,
+ status);
+ on_check_complete(ice, check);
+ pj_mutex_unlock(ice->mutex);
+ return;
+ }
+
+ /* Update local candidate */
+ lcand = &ice->lcand[cand_id];
+
+ }
+
+ /* 7.1.2.2.3. Constructing a Valid Pair
+ * Next, the agent constructs a candidate pair whose local candidate
+ * equals the mapped address of the response, and whose remote candidate
+ * equals the destination address to which the request was sent.
+ */
+
+ /* Add pair to valid list, if it's not there, otherwise just update
+ * nominated flag
+ */
+ for (i=0; i<ice->valid_list.count; ++i) {
+ if (ice->valid_list.checks[i].lcand == lcand &&
+ ice->valid_list.checks[i].rcand == check->rcand)
+ break;
+ }
+
+ if (i==ice->valid_list.count) {
+ pj_assert(ice->valid_list.count < PJ_ICE_MAX_CHECKS);
+ new_check = &ice->valid_list.checks[ice->valid_list.count++];
+ new_check->lcand = lcand;
+ new_check->rcand = check->rcand;
+ new_check->prio = CALC_CHECK_PRIO(ice, lcand, check->rcand);
+ new_check->state = PJ_ICE_SESS_CHECK_STATE_SUCCEEDED;
+ new_check->nominated = check->nominated;
+ new_check->err_code = PJ_SUCCESS;
+ } else {
+ new_check = &ice->valid_list.checks[i];
+ ice->valid_list.checks[i].nominated = check->nominated;
+ }
+
+ /* Update valid check and nominated check for the component */
+ update_comp_check(ice, new_check->lcand->comp_id, new_check);
+
+ /* Sort valid_list (must do so after update_comp_check(), otherwise
+ * new_check will point to something else (#953)
+ */
+ sort_checklist(ice, &ice->valid_list);
+
+ /* 7.1.2.2.2. Updating Pair States
+ *
+ * The agent sets the state of the pair that generated the check to
+ * Succeeded. The success of this check might also cause the state of
+ * other checks to change as well.
+ */
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_SUCCEEDED,
+ PJ_SUCCESS);
+
+ /* Perform 7.1.2.2.2. Updating Pair States.
+ * This may terminate ICE processing.
+ */
+ if (on_check_complete(ice, check)) {
+ /* ICE complete! */
+ pj_mutex_unlock(ice->mutex);
+ return;
+ }
+
+ pj_mutex_unlock(ice->mutex);
+}
+
+
+/* This callback is called by the STUN session associated with a candidate
+ * when it receives incoming request.
+ */
+static pj_status_t on_stun_rx_request(pj_stun_session *sess,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_stun_rx_data *rdata,
+ void *token,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ stun_data *sd;
+ const pj_stun_msg *msg = rdata->msg;
+ pj_ice_msg_data *msg_data;
+ pj_ice_sess *ice;
+ pj_stun_priority_attr *prio_attr;
+ pj_stun_use_candidate_attr *uc_attr;
+ pj_stun_uint64_attr *role_attr;
+ pj_stun_tx_data *tdata;
+ pj_ice_rx_check *rcheck, tmp_rcheck;
+ pj_status_t status;
+
+ PJ_UNUSED_ARG(pkt);
+ PJ_UNUSED_ARG(pkt_len);
+
+ /* Reject any requests except Binding request */
+ if (msg->hdr.type != PJ_STUN_BINDING_REQUEST) {
+ pj_stun_session_respond(sess, rdata, PJ_STUN_SC_BAD_REQUEST,
+ NULL, token, PJ_TRUE,
+ src_addr, src_addr_len);
+ return PJ_SUCCESS;
+ }
+
+
+ sd = (stun_data*) pj_stun_session_get_user_data(sess);
+ ice = sd->ice;
+
+ pj_mutex_lock(ice->mutex);
+
+ /*
+ * Note:
+ * Be aware that when STUN request is received, we might not get
+ * SDP answer yet, so we might not have remote candidates and
+ * checklist yet. This case will be handled after we send
+ * a response.
+ */
+
+ /* Get PRIORITY attribute */
+ prio_attr = (pj_stun_priority_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_PRIORITY, 0);
+ if (prio_attr == NULL) {
+ LOG5((ice->obj_name, "Received Binding request with no PRIORITY"));
+ pj_mutex_unlock(ice->mutex);
+ return PJ_SUCCESS;
+ }
+
+ /* Get USE-CANDIDATE attribute */
+ uc_attr = (pj_stun_use_candidate_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_USE_CANDIDATE, 0);
+
+
+ /* Get ICE-CONTROLLING or ICE-CONTROLLED */
+ role_attr = (pj_stun_uint64_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_ICE_CONTROLLING, 0);
+ if (role_attr == NULL) {
+ role_attr = (pj_stun_uint64_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_ICE_CONTROLLED, 0);
+ }
+
+ /* Handle the case when request comes before answer is received.
+ * We need to put credential in the response, and since we haven't
+ * got the response, copy the username from the request.
+ */
+ if (ice->rcand_cnt == 0) {
+ pj_stun_string_attr *uname_attr;
+
+ uname_attr = (pj_stun_string_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_USERNAME, 0);
+ pj_assert(uname_attr != NULL);
+ pj_strdup(ice->pool, &ice->rx_uname, &uname_attr->value);
+ }
+
+ /* 7.2.1.1. Detecting and Repairing Role Conflicts
+ */
+ if (ice->role == PJ_ICE_SESS_ROLE_CONTROLLING &&
+ role_attr && role_attr->hdr.type == PJ_STUN_ATTR_ICE_CONTROLLING)
+ {
+ if (pj_cmp_timestamp(&ice->tie_breaker, &role_attr->value) < 0) {
+ /* Switch role to controlled */
+ LOG4((ice->obj_name,
+ "Changing role because of ICE-CONTROLLING attribute"));
+ pj_ice_sess_change_role(ice, PJ_ICE_SESS_ROLE_CONTROLLED);
+ } else {
+ /* Generate 487 response */
+ pj_stun_session_respond(sess, rdata, PJ_STUN_SC_ROLE_CONFLICT,
+ NULL, token, PJ_TRUE,
+ src_addr, src_addr_len);
+ pj_mutex_unlock(ice->mutex);
+ return PJ_SUCCESS;
+ }
+
+ } else if (ice->role == PJ_ICE_SESS_ROLE_CONTROLLED &&
+ role_attr && role_attr->hdr.type == PJ_STUN_ATTR_ICE_CONTROLLED)
+ {
+ if (pj_cmp_timestamp(&ice->tie_breaker, &role_attr->value) < 0) {
+ /* Generate 487 response */
+ pj_stun_session_respond(sess, rdata, PJ_STUN_SC_ROLE_CONFLICT,
+ NULL, token, PJ_TRUE,
+ src_addr, src_addr_len);
+ pj_mutex_unlock(ice->mutex);
+ return PJ_SUCCESS;
+ } else {
+ /* Switch role to controlled */
+ LOG4((ice->obj_name,
+ "Changing role because of ICE-CONTROLLED attribute"));
+ pj_ice_sess_change_role(ice, PJ_ICE_SESS_ROLE_CONTROLLING);
+ }
+ }
+
+ /*
+ * First send response to this request
+ */
+ status = pj_stun_session_create_res(sess, rdata, 0, NULL, &tdata);
+ if (status != PJ_SUCCESS) {
+ pj_mutex_unlock(ice->mutex);
+ return status;
+ }
+
+ /* Add XOR-MAPPED-ADDRESS attribute */
+ status = pj_stun_msg_add_sockaddr_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_XOR_MAPPED_ADDR,
+ PJ_TRUE, src_addr, src_addr_len);
+
+ /* Create a msg_data to be associated with this response */
+ msg_data = PJ_POOL_ZALLOC_T(tdata->pool, pj_ice_msg_data);
+ msg_data->transport_id = ((pj_ice_msg_data*)token)->transport_id;
+ msg_data->has_req_data = PJ_FALSE;
+
+ /* Send the response */
+ status = pj_stun_session_send_msg(sess, msg_data, PJ_TRUE, PJ_TRUE,
+ src_addr, src_addr_len, tdata);
+
+
+ /*
+ * Handling early check.
+ *
+ * It's possible that we receive this request before we receive SDP
+ * answer. In this case, we can't perform trigger check since we
+ * don't have checklist yet, so just save this check in a pending
+ * triggered check array to be acted upon later.
+ */
+ if (ice->rcand_cnt == 0) {
+ rcheck = PJ_POOL_ZALLOC_T(ice->pool, pj_ice_rx_check);
+ } else {
+ rcheck = &tmp_rcheck;
+ }
+
+ /* Init rcheck */
+ rcheck->comp_id = sd->comp_id;
+ rcheck->transport_id = ((pj_ice_msg_data*)token)->transport_id;
+ rcheck->src_addr_len = src_addr_len;
+ pj_memcpy(&rcheck->src_addr, src_addr, src_addr_len);
+ rcheck->use_candidate = (uc_attr != NULL);
+ rcheck->priority = prio_attr->value;
+ rcheck->role_attr = role_attr;
+
+ if (ice->rcand_cnt == 0) {
+ /* We don't have answer yet, so keep this request for later */
+ LOG4((ice->obj_name, "Received an early check for comp %d",
+ rcheck->comp_id));
+ pj_list_push_back(&ice->early_check, rcheck);
+ } else {
+ /* Handle this check */
+ handle_incoming_check(ice, rcheck);
+ }
+
+ pj_mutex_unlock(ice->mutex);
+ return PJ_SUCCESS;
+}
+
+
+/* Handle incoming Binding request and perform triggered check.
+ * This function may be called by on_stun_rx_request(), or when
+ * SDP answer is received and we have received early checks.
+ */
+static void handle_incoming_check(pj_ice_sess *ice,
+ const pj_ice_rx_check *rcheck)
+{
+ pj_ice_sess_comp *comp;
+ pj_ice_sess_cand *lcand = NULL;
+ pj_ice_sess_cand *rcand;
+ unsigned i;
+
+ comp = find_comp(ice, rcheck->comp_id);
+
+ /* Find remote candidate based on the source transport address of
+ * the request.
+ */
+ for (i=0; i<ice->rcand_cnt; ++i) {
+ if (sockaddr_cmp(&rcheck->src_addr, &ice->rcand[i].addr)==0)
+ break;
+ }
+
+ /* 7.2.1.3. Learning Peer Reflexive Candidates
+ * If the source transport address of the request does not match any
+ * existing remote candidates, it represents a new peer reflexive remote
+ * candidate.
+ */
+ if (i == ice->rcand_cnt) {
+ if (ice->rcand_cnt >= PJ_ICE_MAX_CAND) {
+ LOG4((ice->obj_name,
+ "Unable to add new peer reflexive candidate: too many "
+ "candidates already (%d)", PJ_ICE_MAX_CAND));
+ return;
+ }
+
+ rcand = &ice->rcand[ice->rcand_cnt++];
+ rcand->comp_id = (pj_uint8_t)rcheck->comp_id;
+ rcand->type = PJ_ICE_CAND_TYPE_PRFLX;
+ rcand->prio = rcheck->priority;
+ pj_memcpy(&rcand->addr, &rcheck->src_addr, rcheck->src_addr_len);
+
+ /* Foundation is random, unique from other foundation */
+ rcand->foundation.ptr = (char*) pj_pool_alloc(ice->pool, 36);
+ rcand->foundation.slen = pj_ansi_snprintf(rcand->foundation.ptr, 36,
+ "f%p",
+ rcand->foundation.ptr);
+
+ LOG4((ice->obj_name,
+ "Added new remote candidate from the request: %s:%d",
+ pj_inet_ntoa(rcand->addr.ipv4.sin_addr),
+ (int)pj_ntohs(rcand->addr.ipv4.sin_port)));
+
+ } else {
+ /* Remote candidate found */
+ rcand = &ice->rcand[i];
+ }
+
+#if 0
+ /* Find again the local candidate by matching the base address
+ * with the local candidates in the checklist. Checks may have
+ * been pruned before, so it's possible that if we use the lcand
+ * as it is, we wouldn't be able to find the check in the checklist
+ * and we will end up creating a new check unnecessarily.
+ */
+ for (i=0; i<ice->clist.count; ++i) {
+ pj_ice_sess_check *c = &ice->clist.checks[i];
+ if (/*c->lcand == lcand ||*/
+ sockaddr_cmp(&c->lcand->base_addr, &lcand->base_addr)==0)
+ {
+ lcand = c->lcand;
+ break;
+ }
+ }
+#else
+ /* Just get candidate with the highest priority and same transport ID
+ * for the specified component ID in the checklist.
+ */
+ for (i=0; i<ice->clist.count; ++i) {
+ pj_ice_sess_check *c = &ice->clist.checks[i];
+ if (c->lcand->comp_id == rcheck->comp_id &&
+ c->lcand->transport_id == rcheck->transport_id)
+ {
+ lcand = c->lcand;
+ break;
+ }
+ }
+ if (lcand == NULL) {
+ /* Should not happen, but just in case remote is sending a
+ * Binding request for a component which it doesn't have.
+ */
+ LOG4((ice->obj_name,
+ "Received Binding request but no local candidate is found!"));
+ return;
+ }
+#endif
+
+ /*
+ * Create candidate pair for this request.
+ */
+
+ /*
+ * 7.2.1.4. Triggered Checks
+ *
+ * Now that we have local and remote candidate, check if we already
+ * have this pair in our checklist.
+ */
+ for (i=0; i<ice->clist.count; ++i) {
+ pj_ice_sess_check *c = &ice->clist.checks[i];
+ if (c->lcand == lcand && c->rcand == rcand)
+ break;
+ }
+
+ /* If the pair is already on the check list:
+ * - If the state of that pair is Waiting or Frozen, its state is
+ * changed to In-Progress and a check for that pair is performed
+ * immediately. This is called a triggered check.
+ *
+ * - If the state of that pair is In-Progress, the agent SHOULD
+ * generate an immediate retransmit of the Binding Request for the
+ * check in progress. This is to facilitate rapid completion of
+ * ICE when both agents are behind NAT.
+ *
+ * - If the state of that pair is Failed or Succeeded, no triggered
+ * check is sent.
+ */
+ if (i != ice->clist.count) {
+ pj_ice_sess_check *c = &ice->clist.checks[i];
+
+ /* If USE-CANDIDATE is present, set nominated flag
+ * Note: DO NOT overwrite nominated flag if one is already set.
+ */
+ c->nominated = ((rcheck->use_candidate) || c->nominated);
+
+ if (c->state == PJ_ICE_SESS_CHECK_STATE_FROZEN ||
+ c->state == PJ_ICE_SESS_CHECK_STATE_WAITING)
+ {
+ /* See if we shall nominate this check */
+ pj_bool_t nominate = (c->nominated || ice->is_nominating);
+
+ LOG5((ice->obj_name, "Performing triggered check for check %d",i));
+ pj_log_push_indent();
+ perform_check(ice, &ice->clist, i, nominate);
+ pj_log_pop_indent();
+
+ } else if (c->state == PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS) {
+ /* Should retransmit immediately
+ */
+ LOG5((ice->obj_name, "Triggered check for check %d not performed "
+ "because it's in progress. Retransmitting", i));
+ pj_log_push_indent();
+ pj_stun_session_retransmit_req(comp->stun_sess, c->tdata);
+ pj_log_pop_indent();
+
+ } else if (c->state == PJ_ICE_SESS_CHECK_STATE_SUCCEEDED) {
+ /* Check complete for this component.
+ * Note this may end ICE process.
+ */
+ pj_bool_t complete;
+ unsigned j;
+
+ /* If this check is nominated, scan the valid_list for the
+ * same check and update the nominated flag. A controlled
+ * agent might have finished the check earlier.
+ */
+ if (rcheck->use_candidate) {
+ for (j=0; j<ice->valid_list.count; ++j) {
+ pj_ice_sess_check *vc = &ice->valid_list.checks[j];
+ if (vc->lcand->transport_id == c->lcand->transport_id &&
+ vc->rcand == c->rcand)
+ {
+ /* Set nominated flag */
+ vc->nominated = PJ_TRUE;
+
+ /* Update valid check and nominated check for the component */
+ update_comp_check(ice, vc->lcand->comp_id, vc);
+
+ LOG5((ice->obj_name, "Valid check %s is nominated",
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
+ &ice->valid_list, vc)));
+ }
+ }
+ }
+
+ LOG5((ice->obj_name, "Triggered check for check %d not performed "
+ "because it's completed", i));
+ pj_log_push_indent();
+ complete = on_check_complete(ice, c);
+ pj_log_pop_indent();
+ if (complete) {
+ return;
+ }
+ }
+
+ }
+ /* If the pair is not already on the check list:
+ * - The pair is inserted into the check list based on its priority.
+ * - Its state is set to In-Progress
+ * - A triggered check for that pair is performed immediately.
+ */
+ /* Note: only do this if we don't have too many checks in checklist */
+ else if (ice->clist.count < PJ_ICE_MAX_CHECKS) {
+
+ pj_ice_sess_check *c = &ice->clist.checks[ice->clist.count];
+ pj_bool_t nominate;
+
+ c->lcand = lcand;
+ c->rcand = rcand;
+ c->prio = CALC_CHECK_PRIO(ice, lcand, rcand);
+ c->state = PJ_ICE_SESS_CHECK_STATE_WAITING;
+ c->nominated = rcheck->use_candidate;
+ c->err_code = PJ_SUCCESS;
+
+ nominate = (c->nominated || ice->is_nominating);
+
+ LOG4((ice->obj_name, "New triggered check added: %d",
+ ice->clist.count));
+ pj_log_push_indent();
+ perform_check(ice, &ice->clist, ice->clist.count++, nominate);
+ pj_log_pop_indent();
+
+ } else {
+ LOG4((ice->obj_name, "Error: unable to perform triggered check: "
+ "TOO MANY CHECKS IN CHECKLIST!"));
+ }
+}
+
+
+static pj_status_t on_stun_rx_indication(pj_stun_session *sess,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_stun_msg *msg,
+ void *token,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ struct stun_data *sd;
+
+ PJ_UNUSED_ARG(sess);
+ PJ_UNUSED_ARG(pkt);
+ PJ_UNUSED_ARG(pkt_len);
+ PJ_UNUSED_ARG(msg);
+ PJ_UNUSED_ARG(token);
+ PJ_UNUSED_ARG(src_addr);
+ PJ_UNUSED_ARG(src_addr_len);
+
+ sd = (struct stun_data*) pj_stun_session_get_user_data(sess);
+
+ pj_log_push_indent();
+
+ if (msg->hdr.type == PJ_STUN_BINDING_INDICATION) {
+ LOG5((sd->ice->obj_name, "Received Binding Indication keep-alive "
+ "for component %d", sd->comp_id));
+ } else {
+ LOG4((sd->ice->obj_name, "Received unexpected %s indication "
+ "for component %d", pj_stun_get_method_name(msg->hdr.type),
+ sd->comp_id));
+ }
+
+ pj_log_pop_indent();
+
+ return PJ_SUCCESS;
+}
+
+
+PJ_DEF(pj_status_t) pj_ice_sess_send_data(pj_ice_sess *ice,
+ unsigned comp_id,
+ const void *data,
+ pj_size_t data_len)
+{
+ pj_status_t status = PJ_SUCCESS;
+ pj_ice_sess_comp *comp;
+ pj_ice_sess_cand *cand;
+ pj_uint8_t transport_id;
+ pj_sockaddr addr;
+
+ PJ_ASSERT_RETURN(ice && comp_id, PJ_EINVAL);
+
+ /* It is possible that comp_cnt is less than comp_id, when remote
+ * doesn't support all the components that we have.
+ */
+ if (comp_id > ice->comp_cnt) {
+ return PJNATH_EICEINCOMPID;
+ }
+
+ pj_mutex_lock(ice->mutex);
+
+ comp = find_comp(ice, comp_id);
+ if (comp == NULL) {
+ status = PJNATH_EICEINCOMPID;
+ pj_mutex_unlock(ice->mutex);
+ goto on_return;
+ }
+
+ if (comp->valid_check == NULL) {
+ status = PJNATH_EICEINPROGRESS;
+ pj_mutex_unlock(ice->mutex);
+ goto on_return;
+ }
+
+ cand = comp->valid_check->lcand;
+ transport_id = cand->transport_id;
+ pj_sockaddr_cp(&addr, &comp->valid_check->rcand->addr);
+
+ /* Release the mutex now to avoid deadlock (see ticket #1451). */
+ pj_mutex_unlock(ice->mutex);
+
+ status = (*ice->cb.on_tx_pkt)(ice, comp_id, transport_id,
+ data, data_len,
+ &addr,
+ sizeof(pj_sockaddr_in));
+
+on_return:
+ return status;
+}
+
+
+PJ_DEF(pj_status_t) pj_ice_sess_on_rx_pkt(pj_ice_sess *ice,
+ unsigned comp_id,
+ unsigned transport_id,
+ void *pkt,
+ pj_size_t pkt_size,
+ const pj_sockaddr_t *src_addr,
+ int src_addr_len)
+{
+ pj_status_t status = PJ_SUCCESS;
+ pj_ice_sess_comp *comp;
+ pj_ice_msg_data *msg_data = NULL;
+ unsigned i;
+
+ PJ_ASSERT_RETURN(ice, PJ_EINVAL);
+
+ pj_mutex_lock(ice->mutex);
+
+ comp = find_comp(ice, comp_id);
+ if (comp == NULL) {
+ pj_mutex_unlock(ice->mutex);
+ return PJNATH_EICEINCOMPID;
+ }
+
+ /* Find transport */
+ for (i=0; i<PJ_ARRAY_SIZE(ice->tp_data); ++i) {
+ if (ice->tp_data[i].transport_id == transport_id) {
+ msg_data = &ice->tp_data[i];
+ break;
+ }
+ }
+ if (msg_data == NULL) {
+ pj_assert(!"Invalid transport ID");
+ pj_mutex_unlock(ice->mutex);
+ return PJ_EINVAL;
+ }
+
+ /* Don't check fingerprint. We only need to distinguish STUN and non-STUN
+ * packets. We don't need to verify the STUN packet too rigorously, that
+ * will be done by the user.
+ */
+ status = pj_stun_msg_check((const pj_uint8_t*)pkt, pkt_size,
+ PJ_STUN_IS_DATAGRAM |
+ PJ_STUN_NO_FINGERPRINT_CHECK);
+ if (status == PJ_SUCCESS) {
+ status = pj_stun_session_on_rx_pkt(comp->stun_sess, pkt, pkt_size,
+ PJ_STUN_IS_DATAGRAM, msg_data,
+ NULL, src_addr, src_addr_len);
+ if (status != PJ_SUCCESS) {
+ pj_strerror(status, ice->tmp.errmsg, sizeof(ice->tmp.errmsg));
+ LOG4((ice->obj_name, "Error processing incoming message: %s",
+ ice->tmp.errmsg));
+ }
+ pj_mutex_unlock(ice->mutex);
+ } else {
+ /* Not a STUN packet. Call application's callback instead, but release
+ * the mutex now or otherwise we may get deadlock.
+ */
+ pj_mutex_unlock(ice->mutex);
+
+ (*ice->cb.on_rx_data)(ice, comp_id, transport_id, pkt, pkt_size,
+ src_addr, src_addr_len);
+ status = PJ_SUCCESS;
+ }
+
+ return status;
+}
+
+
diff --git a/pjnath/src/pjnath/ice_strans.c b/pjnath/src/pjnath/ice_strans.c
new file mode 100644
index 0000000..8ae2a90
--- /dev/null
+++ b/pjnath/src/pjnath/ice_strans.c
@@ -0,0 +1,1757 @@
+/* $Id: ice_strans.c 4133 2012-05-21 14:00:17Z bennylp $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/ice_strans.h>
+#include <pjnath/errno.h>
+#include <pj/addr_resolv.h>
+#include <pj/array.h>
+#include <pj/assert.h>
+#include <pj/ip_helper.h>
+#include <pj/lock.h>
+#include <pj/log.h>
+#include <pj/os.h>
+#include <pj/pool.h>
+#include <pj/rand.h>
+#include <pj/string.h>
+#include <pj/compat/socket.h>
+
+
+#if 0
+# define TRACE_PKT(expr) PJ_LOG(5,expr)
+#else
+# define TRACE_PKT(expr)
+#endif
+
+
+/* Transport IDs */
+enum tp_type
+{
+ TP_NONE,
+ TP_STUN,
+ TP_TURN
+};
+
+/* Candidate's local preference values. This is mostly used to
+ * specify preference among candidates with the same type. Since
+ * we don't have the facility to specify that, we'll just set it
+ * all to the same value.
+ */
+#if PJNATH_ICE_PRIO_STD
+# define SRFLX_PREF 65535
+# define HOST_PREF 65535
+# define RELAY_PREF 65535
+#else
+# define SRFLX_PREF 0
+# define HOST_PREF 0
+# define RELAY_PREF 0
+#endif
+
+
+/* The candidate type preference when STUN candidate is used */
+static pj_uint8_t srflx_pref_table[4] =
+{
+#if PJNATH_ICE_PRIO_STD
+ 100, /**< PJ_ICE_HOST_PREF */
+ 110, /**< PJ_ICE_SRFLX_PREF */
+ 126, /**< PJ_ICE_PRFLX_PREF */
+ 0 /**< PJ_ICE_RELAYED_PREF */
+#else
+ /* Keep it to 2 bits */
+ 1, /**< PJ_ICE_HOST_PREF */
+ 2, /**< PJ_ICE_SRFLX_PREF */
+ 3, /**< PJ_ICE_PRFLX_PREF */
+ 0 /**< PJ_ICE_RELAYED_PREF */
+#endif
+};
+
+
+/* ICE callbacks */
+static void on_ice_complete(pj_ice_sess *ice, pj_status_t status);
+static pj_status_t ice_tx_pkt(pj_ice_sess *ice,
+ unsigned comp_id,
+ unsigned transport_id,
+ const void *pkt, pj_size_t size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned dst_addr_len);
+static void ice_rx_data(pj_ice_sess *ice,
+ unsigned comp_id,
+ unsigned transport_id,
+ void *pkt, pj_size_t size,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len);
+
+
+/* STUN socket callbacks */
+/* Notification when incoming packet has been received. */
+static pj_bool_t stun_on_rx_data(pj_stun_sock *stun_sock,
+ void *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *src_addr,
+ unsigned addr_len);
+/* Notifification when asynchronous send operation has completed. */
+static pj_bool_t stun_on_data_sent(pj_stun_sock *stun_sock,
+ pj_ioqueue_op_key_t *send_key,
+ pj_ssize_t sent);
+/* Notification when the status of the STUN transport has changed. */
+static pj_bool_t stun_on_status(pj_stun_sock *stun_sock,
+ pj_stun_sock_op op,
+ pj_status_t status);
+
+
+/* TURN callbacks */
+static void turn_on_rx_data(pj_turn_sock *turn_sock,
+ void *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *peer_addr,
+ unsigned addr_len);
+static void turn_on_state(pj_turn_sock *turn_sock, pj_turn_state_t old_state,
+ pj_turn_state_t new_state);
+
+
+
+/* Forward decls */
+static void destroy_ice_st(pj_ice_strans *ice_st);
+#define ice_st_perror(ice_st,msg,rc) pjnath_perror(ice_st->obj_name,msg,rc)
+static void sess_init_update(pj_ice_strans *ice_st);
+
+static void sess_add_ref(pj_ice_strans *ice_st);
+static pj_bool_t sess_dec_ref(pj_ice_strans *ice_st);
+
+/**
+ * This structure describes an ICE stream transport component. A component
+ * in ICE stream transport typically corresponds to a single socket created
+ * for this component, and bound to a specific transport address. This
+ * component may have multiple alias addresses, for example one alias
+ * address for each interfaces in multi-homed host, another for server
+ * reflexive alias, and another for relayed alias. For each transport
+ * address alias, an ICE stream transport candidate (#pj_ice_sess_cand) will
+ * be created, and these candidates will eventually registered to the ICE
+ * session.
+ */
+typedef struct pj_ice_strans_comp
+{
+ pj_ice_strans *ice_st; /**< ICE stream transport. */
+ unsigned comp_id; /**< Component ID. */
+
+ pj_stun_sock *stun_sock; /**< STUN transport. */
+ pj_turn_sock *turn_sock; /**< TURN relay transport. */
+ pj_bool_t turn_log_off; /**< TURN loggin off? */
+ unsigned turn_err_cnt; /**< TURN disconnected count. */
+
+ unsigned cand_cnt; /**< # of candidates/aliaes. */
+ pj_ice_sess_cand cand_list[PJ_ICE_ST_MAX_CAND]; /**< Cand array */
+
+ unsigned default_cand; /**< Default candidate. */
+
+} pj_ice_strans_comp;
+
+
+/**
+ * This structure represents the ICE stream transport.
+ */
+struct pj_ice_strans
+{
+ char *obj_name; /**< Log ID. */
+ pj_pool_t *pool; /**< Pool used by this object. */
+ void *user_data; /**< Application data. */
+ pj_ice_strans_cfg cfg; /**< Configuration. */
+ pj_ice_strans_cb cb; /**< Application callback. */
+ pj_lock_t *init_lock; /**< Initialization mutex. */
+
+ pj_ice_strans_state state; /**< Session state. */
+ pj_ice_sess *ice; /**< ICE session. */
+ pj_time_val start_time;/**< Time when ICE was started */
+
+ unsigned comp_cnt; /**< Number of components. */
+ pj_ice_strans_comp **comp; /**< Components array. */
+
+ pj_timer_entry ka_timer; /**< STUN keep-alive timer. */
+
+ pj_atomic_t *busy_cnt; /**< To prevent destroy */
+ pj_bool_t destroy_req;/**< Destroy has been called? */
+ pj_bool_t cb_called; /**< Init error callback called?*/
+};
+
+
+/* Validate configuration */
+static pj_status_t pj_ice_strans_cfg_check_valid(const pj_ice_strans_cfg *cfg)
+{
+ pj_status_t status;
+
+ status = pj_stun_config_check_valid(&cfg->stun_cfg);
+ if (!status)
+ return status;
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Initialize ICE transport configuration with default values.
+ */
+PJ_DEF(void) pj_ice_strans_cfg_default(pj_ice_strans_cfg *cfg)
+{
+ pj_bzero(cfg, sizeof(*cfg));
+
+ pj_stun_config_init(&cfg->stun_cfg, NULL, 0, NULL, NULL);
+ pj_stun_sock_cfg_default(&cfg->stun.cfg);
+ pj_turn_alloc_param_default(&cfg->turn.alloc_param);
+ pj_turn_sock_cfg_default(&cfg->turn.cfg);
+
+ pj_ice_sess_options_default(&cfg->opt);
+
+ cfg->af = pj_AF_INET();
+ cfg->stun.port = PJ_STUN_PORT;
+ cfg->turn.conn_type = PJ_TURN_TP_UDP;
+
+ cfg->stun.max_host_cands = 64;
+ cfg->stun.ignore_stun_error = PJ_FALSE;
+}
+
+
+/*
+ * Copy configuration.
+ */
+PJ_DEF(void) pj_ice_strans_cfg_copy( pj_pool_t *pool,
+ pj_ice_strans_cfg *dst,
+ const pj_ice_strans_cfg *src)
+{
+ pj_memcpy(dst, src, sizeof(*src));
+
+ if (src->stun.server.slen)
+ pj_strdup(pool, &dst->stun.server, &src->stun.server);
+ if (src->turn.server.slen)
+ pj_strdup(pool, &dst->turn.server, &src->turn.server);
+ pj_stun_auth_cred_dup(pool, &dst->turn.auth_cred,
+ &src->turn.auth_cred);
+}
+
+
+/*
+ * Add or update TURN candidate.
+ */
+static pj_status_t add_update_turn(pj_ice_strans *ice_st,
+ pj_ice_strans_comp *comp)
+{
+ pj_turn_sock_cb turn_sock_cb;
+ pj_ice_sess_cand *cand = NULL;
+ unsigned i;
+ pj_status_t status;
+
+ /* Find relayed candidate in the component */
+ for (i=0; i<comp->cand_cnt; ++i) {
+ if (comp->cand_list[i].type == PJ_ICE_CAND_TYPE_RELAYED) {
+ cand = &comp->cand_list[i];
+ break;
+ }
+ }
+
+ /* If candidate is found, invalidate it first */
+ if (cand) {
+ cand->status = PJ_EPENDING;
+
+ /* Also if this component's default candidate is set to relay,
+ * move it temporarily to something else.
+ */
+ if ((int)comp->default_cand == cand - comp->cand_list) {
+ /* Init to something */
+ comp->default_cand = 0;
+ /* Use srflx candidate as the default, if any */
+ for (i=0; i<comp->cand_cnt; ++i) {
+ if (comp->cand_list[i].type == PJ_ICE_CAND_TYPE_SRFLX) {
+ comp->default_cand = i;
+ break;
+ }
+ }
+ }
+ }
+
+ /* Init TURN socket */
+ pj_bzero(&turn_sock_cb, sizeof(turn_sock_cb));
+ turn_sock_cb.on_rx_data = &turn_on_rx_data;
+ turn_sock_cb.on_state = &turn_on_state;
+
+ /* Override with component specific QoS settings, if any */
+ if (ice_st->cfg.comp[comp->comp_id-1].qos_type) {
+ ice_st->cfg.turn.cfg.qos_type =
+ ice_st->cfg.comp[comp->comp_id-1].qos_type;
+ }
+ if (ice_st->cfg.comp[comp->comp_id-1].qos_params.flags) {
+ pj_memcpy(&ice_st->cfg.turn.cfg.qos_params,
+ &ice_st->cfg.comp[comp->comp_id-1].qos_params,
+ sizeof(ice_st->cfg.turn.cfg.qos_params));
+ }
+
+ /* Create the TURN transport */
+ status = pj_turn_sock_create(&ice_st->cfg.stun_cfg, ice_st->cfg.af,
+ ice_st->cfg.turn.conn_type,
+ &turn_sock_cb, &ice_st->cfg.turn.cfg,
+ comp, &comp->turn_sock);
+ if (status != PJ_SUCCESS) {
+ return status;
+ }
+
+ /* Add pending job */
+ ///sess_add_ref(ice_st);
+
+ /* Start allocation */
+ status=pj_turn_sock_alloc(comp->turn_sock,
+ &ice_st->cfg.turn.server,
+ ice_st->cfg.turn.port,
+ ice_st->cfg.resolver,
+ &ice_st->cfg.turn.auth_cred,
+ &ice_st->cfg.turn.alloc_param);
+ if (status != PJ_SUCCESS) {
+ ///sess_dec_ref(ice_st);
+ return status;
+ }
+
+ /* Add relayed candidate with pending status if there's no existing one */
+ if (cand == NULL) {
+ cand = &comp->cand_list[comp->cand_cnt++];
+ cand->type = PJ_ICE_CAND_TYPE_RELAYED;
+ cand->status = PJ_EPENDING;
+ cand->local_pref = RELAY_PREF;
+ cand->transport_id = TP_TURN;
+ cand->comp_id = (pj_uint8_t) comp->comp_id;
+ }
+
+ PJ_LOG(4,(ice_st->obj_name,
+ "Comp %d: TURN relay candidate waiting for allocation",
+ comp->comp_id));
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Create the component.
+ */
+static pj_status_t create_comp(pj_ice_strans *ice_st, unsigned comp_id)
+{
+ pj_ice_strans_comp *comp = NULL;
+ pj_status_t status;
+
+ /* Verify arguments */
+ PJ_ASSERT_RETURN(ice_st && comp_id, PJ_EINVAL);
+
+ /* Check that component ID present */
+ PJ_ASSERT_RETURN(comp_id <= ice_st->comp_cnt, PJNATH_EICEINCOMPID);
+
+ /* Create component */
+ comp = PJ_POOL_ZALLOC_T(ice_st->pool, pj_ice_strans_comp);
+ comp->ice_st = ice_st;
+ comp->comp_id = comp_id;
+
+ ice_st->comp[comp_id-1] = comp;
+
+ /* Initialize default candidate */
+ comp->default_cand = 0;
+
+ /* Create STUN transport if configured */
+ if (ice_st->cfg.stun.server.slen || ice_st->cfg.stun.max_host_cands) {
+ pj_stun_sock_cb stun_sock_cb;
+ pj_ice_sess_cand *cand;
+
+ pj_bzero(&stun_sock_cb, sizeof(stun_sock_cb));
+ stun_sock_cb.on_rx_data = &stun_on_rx_data;
+ stun_sock_cb.on_status = &stun_on_status;
+ stun_sock_cb.on_data_sent = &stun_on_data_sent;
+
+ /* Override component specific QoS settings, if any */
+ if (ice_st->cfg.comp[comp_id-1].qos_type) {
+ ice_st->cfg.stun.cfg.qos_type =
+ ice_st->cfg.comp[comp_id-1].qos_type;
+ }
+ if (ice_st->cfg.comp[comp_id-1].qos_params.flags) {
+ pj_memcpy(&ice_st->cfg.stun.cfg.qos_params,
+ &ice_st->cfg.comp[comp_id-1].qos_params,
+ sizeof(ice_st->cfg.stun.cfg.qos_params));
+ }
+
+ /* Create the STUN transport */
+ status = pj_stun_sock_create(&ice_st->cfg.stun_cfg, NULL,
+ ice_st->cfg.af, &stun_sock_cb,
+ &ice_st->cfg.stun.cfg,
+ comp, &comp->stun_sock);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ /* Start STUN Binding resolution and add srflx candidate
+ * only if server is set
+ */
+ if (ice_st->cfg.stun.server.slen) {
+ pj_stun_sock_info stun_sock_info;
+
+ /* Add pending job */
+ ///sess_add_ref(ice_st);
+
+ PJ_LOG(4,(ice_st->obj_name,
+ "Comp %d: srflx candidate starts Binding discovery",
+ comp_id));
+
+ pj_log_push_indent();
+
+ /* Start Binding resolution */
+ status = pj_stun_sock_start(comp->stun_sock,
+ &ice_st->cfg.stun.server,
+ ice_st->cfg.stun.port,
+ ice_st->cfg.resolver);
+ if (status != PJ_SUCCESS) {
+ ///sess_dec_ref(ice_st);
+ pj_log_pop_indent();
+ return status;
+ }
+
+ /* Enumerate addresses */
+ status = pj_stun_sock_get_info(comp->stun_sock, &stun_sock_info);
+ if (status != PJ_SUCCESS) {
+ ///sess_dec_ref(ice_st);
+ pj_log_pop_indent();
+ return status;
+ }
+
+ /* Add srflx candidate with pending status. */
+ cand = &comp->cand_list[comp->cand_cnt++];
+ cand->type = PJ_ICE_CAND_TYPE_SRFLX;
+ cand->status = PJ_EPENDING;
+ cand->local_pref = SRFLX_PREF;
+ cand->transport_id = TP_STUN;
+ cand->comp_id = (pj_uint8_t) comp_id;
+ pj_sockaddr_cp(&cand->base_addr, &stun_sock_info.aliases[0]);
+ pj_sockaddr_cp(&cand->rel_addr, &cand->base_addr);
+ pj_ice_calc_foundation(ice_st->pool, &cand->foundation,
+ cand->type, &cand->base_addr);
+
+ /* Set default candidate to srflx */
+ comp->default_cand = cand - comp->cand_list;
+
+ pj_log_pop_indent();
+ }
+
+ /* Add local addresses to host candidates, unless max_host_cands
+ * is set to zero.
+ */
+ if (ice_st->cfg.stun.max_host_cands) {
+ pj_stun_sock_info stun_sock_info;
+ unsigned i;
+
+ /* Enumerate addresses */
+ status = pj_stun_sock_get_info(comp->stun_sock, &stun_sock_info);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ for (i=0; i<stun_sock_info.alias_cnt &&
+ i<ice_st->cfg.stun.max_host_cands; ++i)
+ {
+ char addrinfo[PJ_INET6_ADDRSTRLEN+10];
+ const pj_sockaddr *addr = &stun_sock_info.aliases[i];
+
+ /* Leave one candidate for relay */
+ if (comp->cand_cnt >= PJ_ICE_ST_MAX_CAND-1) {
+ PJ_LOG(4,(ice_st->obj_name, "Too many host candidates"));
+ break;
+ }
+
+ /* Ignore loopback addresses unless cfg->stun.loop_addr
+ * is set
+ */
+ if ((pj_ntohl(addr->ipv4.sin_addr.s_addr)>>24)==127) {
+ if (ice_st->cfg.stun.loop_addr==PJ_FALSE)
+ continue;
+ }
+
+ cand = &comp->cand_list[comp->cand_cnt++];
+
+ cand->type = PJ_ICE_CAND_TYPE_HOST;
+ cand->status = PJ_SUCCESS;
+ cand->local_pref = HOST_PREF;
+ cand->transport_id = TP_STUN;
+ cand->comp_id = (pj_uint8_t) comp_id;
+ pj_sockaddr_cp(&cand->addr, addr);
+ pj_sockaddr_cp(&cand->base_addr, addr);
+ pj_bzero(&cand->rel_addr, sizeof(cand->rel_addr));
+ pj_ice_calc_foundation(ice_st->pool, &cand->foundation,
+ cand->type, &cand->base_addr);
+
+ PJ_LOG(4,(ice_st->obj_name,
+ "Comp %d: host candidate %s added",
+ comp_id, pj_sockaddr_print(&cand->addr, addrinfo,
+ sizeof(addrinfo), 3)));
+ }
+ }
+ }
+
+ /* Create TURN relay if configured. */
+ if (ice_st->cfg.turn.server.slen) {
+ add_update_turn(ice_st, comp);
+ }
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Create ICE stream transport
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_create( const char *name,
+ const pj_ice_strans_cfg *cfg,
+ unsigned comp_cnt,
+ void *user_data,
+ const pj_ice_strans_cb *cb,
+ pj_ice_strans **p_ice_st)
+{
+ pj_pool_t *pool;
+ pj_ice_strans *ice_st;
+ unsigned i;
+ pj_status_t status;
+
+ status = pj_ice_strans_cfg_check_valid(cfg);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ PJ_ASSERT_RETURN(comp_cnt && cb && p_ice_st &&
+ comp_cnt <= PJ_ICE_MAX_COMP , PJ_EINVAL);
+
+ if (name == NULL)
+ name = "ice%p";
+
+ pool = pj_pool_create(cfg->stun_cfg.pf, name, PJNATH_POOL_LEN_ICE_STRANS,
+ PJNATH_POOL_INC_ICE_STRANS, NULL);
+ ice_st = PJ_POOL_ZALLOC_T(pool, pj_ice_strans);
+ ice_st->pool = pool;
+ ice_st->obj_name = pool->obj_name;
+ ice_st->user_data = user_data;
+
+ PJ_LOG(4,(ice_st->obj_name,
+ "Creating ICE stream transport with %d component(s)",
+ comp_cnt));
+ pj_log_push_indent();
+
+ pj_ice_strans_cfg_copy(pool, &ice_st->cfg, cfg);
+ pj_memcpy(&ice_st->cb, cb, sizeof(*cb));
+
+ status = pj_atomic_create(pool, 0, &ice_st->busy_cnt);
+ if (status != PJ_SUCCESS) {
+ destroy_ice_st(ice_st);
+ return status;
+ }
+
+ status = pj_lock_create_recursive_mutex(pool, ice_st->obj_name,
+ &ice_st->init_lock);
+ if (status != PJ_SUCCESS) {
+ destroy_ice_st(ice_st);
+ pj_log_pop_indent();
+ return status;
+ }
+
+ ice_st->comp_cnt = comp_cnt;
+ ice_st->comp = (pj_ice_strans_comp**)
+ pj_pool_calloc(pool, comp_cnt, sizeof(pj_ice_strans_comp*));
+
+ /* Move state to candidate gathering */
+ ice_st->state = PJ_ICE_STRANS_STATE_INIT;
+
+ /* Acquire initialization mutex to prevent callback to be
+ * called before we finish initialization.
+ */
+ pj_lock_acquire(ice_st->init_lock);
+
+ for (i=0; i<comp_cnt; ++i) {
+ status = create_comp(ice_st, i+1);
+ if (status != PJ_SUCCESS) {
+ pj_lock_release(ice_st->init_lock);
+ destroy_ice_st(ice_st);
+ pj_log_pop_indent();
+ return status;
+ }
+ }
+
+ /* Done with initialization */
+ pj_lock_release(ice_st->init_lock);
+
+ PJ_LOG(4,(ice_st->obj_name, "ICE stream transport created"));
+
+ *p_ice_st = ice_st;
+
+ /* Check if all candidates are ready (this may call callback) */
+ sess_init_update(ice_st);
+
+ pj_log_pop_indent();
+
+ return PJ_SUCCESS;
+}
+
+/* Destroy ICE */
+static void destroy_ice_st(pj_ice_strans *ice_st)
+{
+ unsigned i;
+
+ PJ_LOG(5,(ice_st->obj_name, "ICE stream transport destroying.."));
+ pj_log_push_indent();
+
+ /* Destroy ICE if we have ICE */
+ if (ice_st->ice) {
+ pj_ice_sess_destroy(ice_st->ice);
+ ice_st->ice = NULL;
+ }
+
+ /* Destroy all components */
+ for (i=0; i<ice_st->comp_cnt; ++i) {
+ if (ice_st->comp[i]) {
+ if (ice_st->comp[i]->stun_sock) {
+ pj_stun_sock_set_user_data(ice_st->comp[i]->stun_sock, NULL);
+ pj_stun_sock_destroy(ice_st->comp[i]->stun_sock);
+ ice_st->comp[i]->stun_sock = NULL;
+ }
+ if (ice_st->comp[i]->turn_sock) {
+ pj_turn_sock_set_user_data(ice_st->comp[i]->turn_sock, NULL);
+ pj_turn_sock_destroy(ice_st->comp[i]->turn_sock);
+ ice_st->comp[i]->turn_sock = NULL;
+ }
+ }
+ }
+ ice_st->comp_cnt = 0;
+
+ /* Destroy mutex */
+ if (ice_st->init_lock) {
+ pj_lock_acquire(ice_st->init_lock);
+ pj_lock_release(ice_st->init_lock);
+ pj_lock_destroy(ice_st->init_lock);
+ ice_st->init_lock = NULL;
+ }
+
+ /* Destroy reference counter */
+ if (ice_st->busy_cnt) {
+ pj_assert(pj_atomic_get(ice_st->busy_cnt)==0);
+ pj_atomic_destroy(ice_st->busy_cnt);
+ ice_st->busy_cnt = NULL;
+ }
+
+ PJ_LOG(4,(ice_st->obj_name, "ICE stream transport destroyed"));
+
+ /* Done */
+ pj_pool_release(ice_st->pool);
+ pj_log_pop_indent();
+}
+
+/* Get ICE session state. */
+PJ_DEF(pj_ice_strans_state) pj_ice_strans_get_state(pj_ice_strans *ice_st)
+{
+ return ice_st->state;
+}
+
+/* State string */
+PJ_DEF(const char*) pj_ice_strans_state_name(pj_ice_strans_state state)
+{
+ const char *names[] = {
+ "Null",
+ "Candidate Gathering",
+ "Candidate Gathering Complete",
+ "Session Initialized",
+ "Negotiation In Progress",
+ "Negotiation Success",
+ "Negotiation Failed"
+ };
+
+ PJ_ASSERT_RETURN(state <= PJ_ICE_STRANS_STATE_FAILED, "???");
+ return names[state];
+}
+
+/* Notification about failure */
+static void sess_fail(pj_ice_strans *ice_st, pj_ice_strans_op op,
+ const char *title, pj_status_t status)
+{
+ char errmsg[PJ_ERR_MSG_SIZE];
+
+ pj_strerror(status, errmsg, sizeof(errmsg));
+ PJ_LOG(4,(ice_st->obj_name, "%s: %s", title, errmsg));
+ pj_log_push_indent();
+
+ if (op==PJ_ICE_STRANS_OP_INIT && ice_st->cb_called) {
+ pj_log_pop_indent();
+ return;
+ }
+
+ ice_st->cb_called = PJ_TRUE;
+
+ if (ice_st->cb.on_ice_complete)
+ (*ice_st->cb.on_ice_complete)(ice_st, op, status);
+
+ pj_log_pop_indent();
+}
+
+/* Update initialization status */
+static void sess_init_update(pj_ice_strans *ice_st)
+{
+ unsigned i;
+
+ /* Ignore if init callback has been called */
+ if (ice_st->cb_called)
+ return;
+
+ /* Notify application when all candidates have been gathered */
+ for (i=0; i<ice_st->comp_cnt; ++i) {
+ unsigned j;
+ pj_ice_strans_comp *comp = ice_st->comp[i];
+
+ for (j=0; j<comp->cand_cnt; ++j) {
+ pj_ice_sess_cand *cand = &comp->cand_list[j];
+
+ if (cand->status == PJ_EPENDING)
+ return;
+ }
+ }
+
+ /* All candidates have been gathered */
+ ice_st->cb_called = PJ_TRUE;
+ ice_st->state = PJ_ICE_STRANS_STATE_READY;
+ if (ice_st->cb.on_ice_complete)
+ (*ice_st->cb.on_ice_complete)(ice_st, PJ_ICE_STRANS_OP_INIT,
+ PJ_SUCCESS);
+}
+
+/*
+ * Destroy ICE stream transport.
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_destroy(pj_ice_strans *ice_st)
+{
+ PJ_ASSERT_RETURN(ice_st, PJ_EINVAL);
+
+ ice_st->destroy_req = PJ_TRUE;
+ if (pj_atomic_get(ice_st->busy_cnt) > 0) {
+ PJ_LOG(5,(ice_st->obj_name,
+ "ICE strans object is busy, will destroy later"));
+ return PJ_EPENDING;
+ }
+
+ destroy_ice_st(ice_st);
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Increment busy counter.
+ */
+static void sess_add_ref(pj_ice_strans *ice_st)
+{
+ pj_atomic_inc(ice_st->busy_cnt);
+}
+
+/*
+ * Decrement busy counter. If the counter has reached zero and destroy
+ * has been requested, destroy the object and return FALSE.
+ */
+static pj_bool_t sess_dec_ref(pj_ice_strans *ice_st)
+{
+ int count = pj_atomic_dec_and_get(ice_st->busy_cnt);
+ pj_assert(count >= 0);
+ if (count==0 && ice_st->destroy_req) {
+ pj_ice_strans_destroy(ice_st);
+ return PJ_FALSE;
+ } else {
+ return PJ_TRUE;
+ }
+}
+
+/*
+ * Get user data
+ */
+PJ_DEF(void*) pj_ice_strans_get_user_data(pj_ice_strans *ice_st)
+{
+ PJ_ASSERT_RETURN(ice_st, NULL);
+ return ice_st->user_data;
+}
+
+
+/*
+ * Get the value of various options of the ICE stream transport.
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_get_options( pj_ice_strans *ice_st,
+ pj_ice_sess_options *opt)
+{
+ PJ_ASSERT_RETURN(ice_st && opt, PJ_EINVAL);
+ pj_memcpy(opt, &ice_st->cfg.opt, sizeof(*opt));
+ return PJ_SUCCESS;
+}
+
+/*
+ * Specify various options for this ICE stream transport.
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_set_options(pj_ice_strans *ice_st,
+ const pj_ice_sess_options *opt)
+{
+ PJ_ASSERT_RETURN(ice_st && opt, PJ_EINVAL);
+ pj_memcpy(&ice_st->cfg.opt, opt, sizeof(*opt));
+ if (ice_st->ice)
+ pj_ice_sess_set_options(ice_st->ice, &ice_st->cfg.opt);
+ return PJ_SUCCESS;
+}
+
+/*
+ * Create ICE!
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_init_ice(pj_ice_strans *ice_st,
+ pj_ice_sess_role role,
+ const pj_str_t *local_ufrag,
+ const pj_str_t *local_passwd)
+{
+ pj_status_t status;
+ unsigned i;
+ pj_ice_sess_cb ice_cb;
+ //const pj_uint8_t srflx_prio[4] = { 100, 126, 110, 0 };
+
+ /* Check arguments */
+ PJ_ASSERT_RETURN(ice_st, PJ_EINVAL);
+ /* Must not have ICE */
+ PJ_ASSERT_RETURN(ice_st->ice == NULL, PJ_EINVALIDOP);
+ /* Components must have been created */
+ PJ_ASSERT_RETURN(ice_st->comp[0] != NULL, PJ_EINVALIDOP);
+
+ /* Init callback */
+ pj_bzero(&ice_cb, sizeof(ice_cb));
+ ice_cb.on_ice_complete = &on_ice_complete;
+ ice_cb.on_rx_data = &ice_rx_data;
+ ice_cb.on_tx_pkt = &ice_tx_pkt;
+
+ /* Create! */
+ status = pj_ice_sess_create(&ice_st->cfg.stun_cfg, ice_st->obj_name, role,
+ ice_st->comp_cnt, &ice_cb,
+ local_ufrag, local_passwd, &ice_st->ice);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ /* Associate user data */
+ ice_st->ice->user_data = (void*)ice_st;
+
+ /* Set options */
+ pj_ice_sess_set_options(ice_st->ice, &ice_st->cfg.opt);
+
+ /* If default candidate for components are SRFLX one, upload a custom
+ * type priority to ICE session so that SRFLX candidates will get
+ * checked first.
+ */
+ if (ice_st->comp[0]->default_cand >= 0 &&
+ ice_st->comp[0]->cand_list[ice_st->comp[0]->default_cand].type
+ == PJ_ICE_CAND_TYPE_SRFLX)
+ {
+ pj_ice_sess_set_prefs(ice_st->ice, srflx_pref_table);
+ }
+
+ /* Add components/candidates */
+ for (i=0; i<ice_st->comp_cnt; ++i) {
+ unsigned j;
+ pj_ice_strans_comp *comp = ice_st->comp[i];
+
+ /* Re-enable logging for Send/Data indications */
+ if (comp->turn_sock) {
+ PJ_LOG(5,(ice_st->obj_name,
+ "Disabling STUN Indication logging for "
+ "component %d", i+1));
+ pj_turn_sock_set_log(comp->turn_sock, 0xFFFF);
+ comp->turn_log_off = PJ_FALSE;
+ }
+
+ for (j=0; j<comp->cand_cnt; ++j) {
+ pj_ice_sess_cand *cand = &comp->cand_list[j];
+ unsigned ice_cand_id;
+
+ /* Skip if candidate is not ready */
+ if (cand->status != PJ_SUCCESS) {
+ PJ_LOG(5,(ice_st->obj_name,
+ "Candidate %d of comp %d is not added (pending)",
+ j, i));
+ continue;
+ }
+
+ /* Must have address */
+ pj_assert(pj_sockaddr_has_addr(&cand->addr));
+
+ /* Add the candidate */
+ status = pj_ice_sess_add_cand(ice_st->ice, comp->comp_id,
+ cand->transport_id, cand->type,
+ cand->local_pref,
+ &cand->foundation, &cand->addr,
+ &cand->base_addr, &cand->rel_addr,
+ pj_sockaddr_get_len(&cand->addr),
+ (unsigned*)&ice_cand_id);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+ }
+ }
+
+ /* ICE session is ready for negotiation */
+ ice_st->state = PJ_ICE_STRANS_STATE_SESS_READY;
+
+ return PJ_SUCCESS;
+
+on_error:
+ pj_ice_strans_stop_ice(ice_st);
+ return status;
+}
+
+/*
+ * Check if the ICE stream transport has the ICE session created.
+ */
+PJ_DEF(pj_bool_t) pj_ice_strans_has_sess(pj_ice_strans *ice_st)
+{
+ PJ_ASSERT_RETURN(ice_st, PJ_FALSE);
+ return ice_st->ice != NULL;
+}
+
+/*
+ * Check if ICE negotiation is still running.
+ */
+PJ_DEF(pj_bool_t) pj_ice_strans_sess_is_running(pj_ice_strans *ice_st)
+{
+ return ice_st && ice_st->ice && ice_st->ice->rcand_cnt &&
+ !pj_ice_strans_sess_is_complete(ice_st);
+}
+
+
+/*
+ * Check if ICE negotiation has completed.
+ */
+PJ_DEF(pj_bool_t) pj_ice_strans_sess_is_complete(pj_ice_strans *ice_st)
+{
+ return ice_st && ice_st->ice && ice_st->ice->is_complete;
+}
+
+
+/*
+ * Get the current/running component count.
+ */
+PJ_DEF(unsigned) pj_ice_strans_get_running_comp_cnt(pj_ice_strans *ice_st)
+{
+ PJ_ASSERT_RETURN(ice_st, PJ_EINVAL);
+
+ if (ice_st->ice && ice_st->ice->rcand_cnt) {
+ return ice_st->ice->comp_cnt;
+ } else {
+ return ice_st->comp_cnt;
+ }
+}
+
+
+/*
+ * Get the ICE username fragment and password of the ICE session.
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_get_ufrag_pwd( pj_ice_strans *ice_st,
+ pj_str_t *loc_ufrag,
+ pj_str_t *loc_pwd,
+ pj_str_t *rem_ufrag,
+ pj_str_t *rem_pwd)
+{
+ PJ_ASSERT_RETURN(ice_st && ice_st->ice, PJ_EINVALIDOP);
+
+ if (loc_ufrag) *loc_ufrag = ice_st->ice->rx_ufrag;
+ if (loc_pwd) *loc_pwd = ice_st->ice->rx_pass;
+
+ if (rem_ufrag || rem_pwd) {
+ PJ_ASSERT_RETURN(ice_st->ice->rcand_cnt != 0, PJ_EINVALIDOP);
+ if (rem_ufrag) *rem_ufrag = ice_st->ice->tx_ufrag;
+ if (rem_pwd) *rem_pwd = ice_st->ice->tx_pass;
+ }
+
+ return PJ_SUCCESS;
+}
+
+/*
+ * Get number of candidates
+ */
+PJ_DEF(unsigned) pj_ice_strans_get_cands_count(pj_ice_strans *ice_st,
+ unsigned comp_id)
+{
+ unsigned i, cnt;
+
+ PJ_ASSERT_RETURN(ice_st && ice_st->ice && comp_id &&
+ comp_id <= ice_st->comp_cnt, 0);
+
+ cnt = 0;
+ for (i=0; i<ice_st->ice->lcand_cnt; ++i) {
+ if (ice_st->ice->lcand[i].comp_id != comp_id)
+ continue;
+ ++cnt;
+ }
+
+ return cnt;
+}
+
+/*
+ * Enum candidates
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_enum_cands(pj_ice_strans *ice_st,
+ unsigned comp_id,
+ unsigned *count,
+ pj_ice_sess_cand cand[])
+{
+ unsigned i, cnt;
+
+ PJ_ASSERT_RETURN(ice_st && ice_st->ice && comp_id &&
+ comp_id <= ice_st->comp_cnt && count && cand, PJ_EINVAL);
+
+ cnt = 0;
+ for (i=0; i<ice_st->ice->lcand_cnt && cnt<*count; ++i) {
+ if (ice_st->ice->lcand[i].comp_id != comp_id)
+ continue;
+ pj_memcpy(&cand[cnt], &ice_st->ice->lcand[i],
+ sizeof(pj_ice_sess_cand));
+ ++cnt;
+ }
+
+ *count = cnt;
+ return PJ_SUCCESS;
+}
+
+/*
+ * Get default candidate.
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_get_def_cand( pj_ice_strans *ice_st,
+ unsigned comp_id,
+ pj_ice_sess_cand *cand)
+{
+ const pj_ice_sess_check *valid_pair;
+
+ PJ_ASSERT_RETURN(ice_st && comp_id && comp_id <= ice_st->comp_cnt &&
+ cand, PJ_EINVAL);
+
+ valid_pair = pj_ice_strans_get_valid_pair(ice_st, comp_id);
+ if (valid_pair) {
+ pj_memcpy(cand, valid_pair->lcand, sizeof(pj_ice_sess_cand));
+ } else {
+ pj_ice_strans_comp *comp = ice_st->comp[comp_id - 1];
+ pj_assert(comp->default_cand>=0 && comp->default_cand<comp->cand_cnt);
+ pj_memcpy(cand, &comp->cand_list[comp->default_cand],
+ sizeof(pj_ice_sess_cand));
+ }
+ return PJ_SUCCESS;
+}
+
+/*
+ * Get the current ICE role.
+ */
+PJ_DEF(pj_ice_sess_role) pj_ice_strans_get_role(pj_ice_strans *ice_st)
+{
+ PJ_ASSERT_RETURN(ice_st && ice_st->ice, PJ_ICE_SESS_ROLE_UNKNOWN);
+ return ice_st->ice->role;
+}
+
+/*
+ * Change session role.
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_change_role( pj_ice_strans *ice_st,
+ pj_ice_sess_role new_role)
+{
+ PJ_ASSERT_RETURN(ice_st && ice_st->ice, PJ_EINVALIDOP);
+ return pj_ice_sess_change_role(ice_st->ice, new_role);
+}
+
+/*
+ * Start ICE processing !
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_start_ice( pj_ice_strans *ice_st,
+ const pj_str_t *rem_ufrag,
+ const pj_str_t *rem_passwd,
+ unsigned rem_cand_cnt,
+ const pj_ice_sess_cand rem_cand[])
+{
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(ice_st && rem_ufrag && rem_passwd &&
+ rem_cand_cnt && rem_cand, PJ_EINVAL);
+
+ /* Mark start time */
+ pj_gettimeofday(&ice_st->start_time);
+
+ /* Build check list */
+ status = pj_ice_sess_create_check_list(ice_st->ice, rem_ufrag, rem_passwd,
+ rem_cand_cnt, rem_cand);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ /* If we have TURN candidate, now is the time to create the permissions */
+ if (ice_st->comp[0]->turn_sock) {
+ unsigned i;
+
+ for (i=0; i<ice_st->comp_cnt; ++i) {
+ pj_ice_strans_comp *comp = ice_st->comp[i];
+ pj_sockaddr addrs[PJ_ICE_ST_MAX_CAND];
+ unsigned j, count=0;
+
+ /* Gather remote addresses for this component */
+ for (j=0; j<rem_cand_cnt && count<PJ_ARRAY_SIZE(addrs); ++j) {
+ if (rem_cand[j].comp_id==i+1) {
+ pj_memcpy(&addrs[count++], &rem_cand[j].addr,
+ pj_sockaddr_get_len(&rem_cand[j].addr));
+ }
+ }
+
+ if (count) {
+ status = pj_turn_sock_set_perm(comp->turn_sock, count,
+ addrs, 0);
+ if (status != PJ_SUCCESS) {
+ pj_ice_strans_stop_ice(ice_st);
+ return status;
+ }
+ }
+ }
+ }
+
+ /* Start ICE negotiation! */
+ status = pj_ice_sess_start_check(ice_st->ice);
+ if (status != PJ_SUCCESS) {
+ pj_ice_strans_stop_ice(ice_st);
+ return status;
+ }
+
+ ice_st->state = PJ_ICE_STRANS_STATE_NEGO;
+ return status;
+}
+
+/*
+ * Get valid pair.
+ */
+PJ_DEF(const pj_ice_sess_check*)
+pj_ice_strans_get_valid_pair(const pj_ice_strans *ice_st,
+ unsigned comp_id)
+{
+ PJ_ASSERT_RETURN(ice_st && comp_id && comp_id <= ice_st->comp_cnt,
+ NULL);
+
+ if (ice_st->ice == NULL)
+ return NULL;
+
+ return ice_st->ice->comp[comp_id-1].valid_check;
+}
+
+/*
+ * Stop ICE!
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_stop_ice(pj_ice_strans *ice_st)
+{
+ if (ice_st->ice) {
+ pj_ice_sess_destroy(ice_st->ice);
+ ice_st->ice = NULL;
+ }
+
+ ice_st->state = PJ_ICE_STRANS_STATE_INIT;
+ return PJ_SUCCESS;
+}
+
+/*
+ * Application wants to send outgoing packet.
+ */
+PJ_DEF(pj_status_t) pj_ice_strans_sendto( pj_ice_strans *ice_st,
+ unsigned comp_id,
+ const void *data,
+ pj_size_t data_len,
+ const pj_sockaddr_t *dst_addr,
+ int dst_addr_len)
+{
+ pj_ssize_t pkt_size;
+ pj_ice_strans_comp *comp;
+ unsigned def_cand;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(ice_st && comp_id && comp_id <= ice_st->comp_cnt &&
+ dst_addr && dst_addr_len, PJ_EINVAL);
+
+ comp = ice_st->comp[comp_id-1];
+
+ /* Check that default candidate for the component exists */
+ def_cand = comp->default_cand;
+ if (def_cand >= comp->cand_cnt)
+ return PJ_EINVALIDOP;
+
+ /* If ICE is available, send data with ICE, otherwise send with the
+ * default candidate selected during initialization.
+ *
+ * https://trac.pjsip.org/repos/ticket/1416:
+ * Once ICE has failed, also send data with the default candidate.
+ */
+ if (ice_st->ice && ice_st->state < PJ_ICE_STRANS_STATE_FAILED) {
+ if (comp->turn_sock) {
+ pj_turn_sock_lock(comp->turn_sock);
+ }
+ status = pj_ice_sess_send_data(ice_st->ice, comp_id, data, data_len);
+ if (comp->turn_sock) {
+ pj_turn_sock_unlock(comp->turn_sock);
+ }
+ return status;
+
+ } else if (comp->cand_list[def_cand].status == PJ_SUCCESS) {
+
+ if (comp->cand_list[def_cand].type == PJ_ICE_CAND_TYPE_RELAYED) {
+
+ enum {
+ msg_disable_ind = 0xFFFF &
+ ~(PJ_STUN_SESS_LOG_TX_IND|
+ PJ_STUN_SESS_LOG_RX_IND)
+ };
+
+ /* https://trac.pjsip.org/repos/ticket/1316 */
+ if (comp->turn_sock == NULL) {
+ /* TURN socket error */
+ return PJ_EINVALIDOP;
+ }
+
+ if (!comp->turn_log_off) {
+ /* Disable logging for Send/Data indications */
+ PJ_LOG(5,(ice_st->obj_name,
+ "Disabling STUN Indication logging for "
+ "component %d", comp->comp_id));
+ pj_turn_sock_set_log(comp->turn_sock, msg_disable_ind);
+ comp->turn_log_off = PJ_TRUE;
+ }
+
+ status = pj_turn_sock_sendto(comp->turn_sock, (const pj_uint8_t*)data, data_len,
+ dst_addr, dst_addr_len);
+ return (status==PJ_SUCCESS||status==PJ_EPENDING) ?
+ PJ_SUCCESS : status;
+ } else {
+ pkt_size = data_len;
+ status = pj_stun_sock_sendto(comp->stun_sock, NULL, data,
+ data_len, 0, dst_addr, dst_addr_len);
+ return (status==PJ_SUCCESS||status==PJ_EPENDING) ?
+ PJ_SUCCESS : status;
+ }
+
+ } else
+ return PJ_EINVALIDOP;
+}
+
+/*
+ * Callback called by ICE session when ICE processing is complete, either
+ * successfully or with failure.
+ */
+static void on_ice_complete(pj_ice_sess *ice, pj_status_t status)
+{
+ pj_ice_strans *ice_st = (pj_ice_strans*)ice->user_data;
+ pj_time_val t;
+ unsigned msec;
+
+ sess_add_ref(ice_st);
+
+ pj_gettimeofday(&t);
+ PJ_TIME_VAL_SUB(t, ice_st->start_time);
+ msec = PJ_TIME_VAL_MSEC(t);
+
+ if (ice_st->cb.on_ice_complete) {
+ if (status != PJ_SUCCESS) {
+ char errmsg[PJ_ERR_MSG_SIZE];
+ pj_strerror(status, errmsg, sizeof(errmsg));
+ PJ_LOG(4,(ice_st->obj_name,
+ "ICE negotiation failed after %ds:%03d: %s",
+ msec/1000, msec%1000, errmsg));
+ } else {
+ unsigned i;
+ enum {
+ msg_disable_ind = 0xFFFF &
+ ~(PJ_STUN_SESS_LOG_TX_IND|
+ PJ_STUN_SESS_LOG_RX_IND)
+ };
+
+ PJ_LOG(4,(ice_st->obj_name,
+ "ICE negotiation success after %ds:%03d",
+ msec/1000, msec%1000));
+
+ for (i=0; i<ice_st->comp_cnt; ++i) {
+ const pj_ice_sess_check *check;
+
+ check = pj_ice_strans_get_valid_pair(ice_st, i+1);
+ if (check) {
+ char lip[PJ_INET6_ADDRSTRLEN+10];
+ char rip[PJ_INET6_ADDRSTRLEN+10];
+
+ pj_sockaddr_print(&check->lcand->addr, lip,
+ sizeof(lip), 3);
+ pj_sockaddr_print(&check->rcand->addr, rip,
+ sizeof(rip), 3);
+
+ if (check->lcand->transport_id == TP_TURN) {
+ /* Activate channel binding for the remote address
+ * for more efficient data transfer using TURN.
+ */
+ status = pj_turn_sock_bind_channel(
+ ice_st->comp[i]->turn_sock,
+ &check->rcand->addr,
+ sizeof(check->rcand->addr));
+
+ /* Disable logging for Send/Data indications */
+ PJ_LOG(5,(ice_st->obj_name,
+ "Disabling STUN Indication logging for "
+ "component %d", i+1));
+ pj_turn_sock_set_log(ice_st->comp[i]->turn_sock,
+ msg_disable_ind);
+ ice_st->comp[i]->turn_log_off = PJ_TRUE;
+ }
+
+ PJ_LOG(4,(ice_st->obj_name, " Comp %d: "
+ "sending from %s candidate %s to "
+ "%s candidate %s",
+ i+1,
+ pj_ice_get_cand_type_name(check->lcand->type),
+ lip,
+ pj_ice_get_cand_type_name(check->rcand->type),
+ rip));
+
+ } else {
+ PJ_LOG(4,(ice_st->obj_name,
+ "Comp %d: disabled", i+1));
+ }
+ }
+ }
+
+ ice_st->state = (status==PJ_SUCCESS) ? PJ_ICE_STRANS_STATE_RUNNING :
+ PJ_ICE_STRANS_STATE_FAILED;
+
+ pj_log_push_indent();
+ (*ice_st->cb.on_ice_complete)(ice_st, PJ_ICE_STRANS_OP_NEGOTIATION,
+ status);
+ pj_log_pop_indent();
+
+ }
+
+ sess_dec_ref(ice_st);
+}
+
+/*
+ * Callback called by ICE session when it wants to send outgoing packet.
+ */
+static pj_status_t ice_tx_pkt(pj_ice_sess *ice,
+ unsigned comp_id,
+ unsigned transport_id,
+ const void *pkt, pj_size_t size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned dst_addr_len)
+{
+ pj_ice_strans *ice_st = (pj_ice_strans*)ice->user_data;
+ pj_ice_strans_comp *comp;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(comp_id && comp_id <= ice_st->comp_cnt, PJ_EINVAL);
+
+ comp = ice_st->comp[comp_id-1];
+
+ TRACE_PKT((comp->ice_st->obj_name,
+ "Component %d TX packet to %s:%d with transport %d",
+ comp_id,
+ pj_inet_ntoa(((pj_sockaddr_in*)dst_addr)->sin_addr),
+ (int)pj_ntohs(((pj_sockaddr_in*)dst_addr)->sin_port),
+ transport_id));
+
+ if (transport_id == TP_TURN) {
+ if (comp->turn_sock) {
+ status = pj_turn_sock_sendto(comp->turn_sock,
+ (const pj_uint8_t*)pkt, size,
+ dst_addr, dst_addr_len);
+ } else {
+ status = PJ_EINVALIDOP;
+ }
+ } else if (transport_id == TP_STUN) {
+ status = pj_stun_sock_sendto(comp->stun_sock, NULL,
+ pkt, size, 0,
+ dst_addr, dst_addr_len);
+ } else {
+ pj_assert(!"Invalid transport ID");
+ status = PJ_EINVALIDOP;
+ }
+
+ return (status==PJ_SUCCESS||status==PJ_EPENDING) ? PJ_SUCCESS : status;
+}
+
+/*
+ * Callback called by ICE session when it receives application data.
+ */
+static void ice_rx_data(pj_ice_sess *ice,
+ unsigned comp_id,
+ unsigned transport_id,
+ void *pkt, pj_size_t size,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_ice_strans *ice_st = (pj_ice_strans*)ice->user_data;
+
+ PJ_UNUSED_ARG(transport_id);
+
+ if (ice_st->cb.on_rx_data) {
+ (*ice_st->cb.on_rx_data)(ice_st, comp_id, pkt, size,
+ src_addr, src_addr_len);
+ }
+}
+
+/* Notification when incoming packet has been received from
+ * the STUN socket.
+ */
+static pj_bool_t stun_on_rx_data(pj_stun_sock *stun_sock,
+ void *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *src_addr,
+ unsigned addr_len)
+{
+ pj_ice_strans_comp *comp;
+ pj_ice_strans *ice_st;
+ pj_status_t status;
+
+ comp = (pj_ice_strans_comp*) pj_stun_sock_get_user_data(stun_sock);
+ if (comp == NULL) {
+ /* We have disassociated ourselves from the STUN socket */
+ return PJ_FALSE;
+ }
+
+ ice_st = comp->ice_st;
+
+ sess_add_ref(ice_st);
+
+ if (ice_st->ice == NULL) {
+ /* The ICE session is gone, but we're still receiving packets.
+ * This could also happen if remote doesn't do ICE. So just
+ * report this to application.
+ */
+ if (ice_st->cb.on_rx_data) {
+ (*ice_st->cb.on_rx_data)(ice_st, comp->comp_id, pkt, pkt_len,
+ src_addr, addr_len);
+ }
+
+ } else {
+
+ /* Hand over the packet to ICE session */
+ status = pj_ice_sess_on_rx_pkt(comp->ice_st->ice, comp->comp_id,
+ TP_STUN, pkt, pkt_len,
+ src_addr, addr_len);
+
+ if (status != PJ_SUCCESS) {
+ ice_st_perror(comp->ice_st, "Error processing packet",
+ status);
+ }
+ }
+
+ return sess_dec_ref(ice_st);
+}
+
+/* Notifification when asynchronous send operation to the STUN socket
+ * has completed.
+ */
+static pj_bool_t stun_on_data_sent(pj_stun_sock *stun_sock,
+ pj_ioqueue_op_key_t *send_key,
+ pj_ssize_t sent)
+{
+ PJ_UNUSED_ARG(stun_sock);
+ PJ_UNUSED_ARG(send_key);
+ PJ_UNUSED_ARG(sent);
+ return PJ_TRUE;
+}
+
+/* Notification when the status of the STUN transport has changed. */
+static pj_bool_t stun_on_status(pj_stun_sock *stun_sock,
+ pj_stun_sock_op op,
+ pj_status_t status)
+{
+ pj_ice_strans_comp *comp;
+ pj_ice_strans *ice_st;
+ pj_ice_sess_cand *cand = NULL;
+ unsigned i;
+
+ pj_assert(status != PJ_EPENDING);
+
+ comp = (pj_ice_strans_comp*) pj_stun_sock_get_user_data(stun_sock);
+ ice_st = comp->ice_st;
+
+ sess_add_ref(ice_st);
+
+ /* Wait until initialization completes */
+ pj_lock_acquire(ice_st->init_lock);
+
+ /* Find the srflx cancidate */
+ for (i=0; i<comp->cand_cnt; ++i) {
+ if (comp->cand_list[i].type == PJ_ICE_CAND_TYPE_SRFLX) {
+ cand = &comp->cand_list[i];
+ break;
+ }
+ }
+
+ pj_lock_release(ice_st->init_lock);
+
+ /* It is possible that we don't have srflx candidate even though this
+ * callback is called. This could happen when we cancel adding srflx
+ * candidate due to initialization error.
+ */
+ if (cand == NULL) {
+ return sess_dec_ref(ice_st);
+ }
+
+ switch (op) {
+ case PJ_STUN_SOCK_DNS_OP:
+ if (status != PJ_SUCCESS) {
+ /* May not have cand, e.g. when error during init */
+ if (cand)
+ cand->status = status;
+ if (!ice_st->cfg.stun.ignore_stun_error) {
+ sess_fail(ice_st, PJ_ICE_STRANS_OP_INIT,
+ "DNS resolution failed", status);
+ } else {
+ PJ_LOG(4,(ice_st->obj_name,
+ "STUN error is ignored for comp %d",
+ comp->comp_id));
+ }
+ }
+ break;
+ case PJ_STUN_SOCK_BINDING_OP:
+ case PJ_STUN_SOCK_MAPPED_ADDR_CHANGE:
+ if (status == PJ_SUCCESS) {
+ pj_stun_sock_info info;
+
+ status = pj_stun_sock_get_info(stun_sock, &info);
+ if (status == PJ_SUCCESS) {
+ char ipaddr[PJ_INET6_ADDRSTRLEN+10];
+ const char *op_name = (op==PJ_STUN_SOCK_BINDING_OP) ?
+ "Binding discovery complete" :
+ "srflx address changed";
+ pj_bool_t dup = PJ_FALSE;
+
+ /* Eliminate the srflx candidate if the address is
+ * equal to other (host) candidates.
+ */
+ for (i=0; i<comp->cand_cnt; ++i) {
+ if (comp->cand_list[i].type == PJ_ICE_CAND_TYPE_HOST &&
+ pj_sockaddr_cmp(&comp->cand_list[i].addr,
+ &info.mapped_addr) == 0)
+ {
+ dup = PJ_TRUE;
+ break;
+ }
+ }
+
+ if (dup) {
+ /* Duplicate found, remove the srflx candidate */
+ unsigned idx = cand - comp->cand_list;
+
+ /* Update default candidate index */
+ if (comp->default_cand > idx) {
+ --comp->default_cand;
+ } else if (comp->default_cand == idx) {
+ comp->default_cand = !idx;
+ }
+
+ /* Remove srflx candidate */
+ pj_array_erase(comp->cand_list, sizeof(comp->cand_list[0]),
+ comp->cand_cnt, idx);
+ --comp->cand_cnt;
+ } else {
+ /* Otherwise update the address */
+ pj_sockaddr_cp(&cand->addr, &info.mapped_addr);
+ cand->status = PJ_SUCCESS;
+ }
+
+ PJ_LOG(4,(comp->ice_st->obj_name,
+ "Comp %d: %s, "
+ "srflx address is %s",
+ comp->comp_id, op_name,
+ pj_sockaddr_print(&info.mapped_addr, ipaddr,
+ sizeof(ipaddr), 3)));
+
+ sess_init_update(ice_st);
+ }
+ }
+
+ if (status != PJ_SUCCESS) {
+ /* May not have cand, e.g. when error during init */
+ if (cand)
+ cand->status = status;
+ if (!ice_st->cfg.stun.ignore_stun_error) {
+ sess_fail(ice_st, PJ_ICE_STRANS_OP_INIT,
+ "STUN binding request failed", status);
+ } else {
+ PJ_LOG(4,(ice_st->obj_name,
+ "STUN error is ignored for comp %d",
+ comp->comp_id));
+
+ if (cand) {
+ unsigned idx = cand - comp->cand_list;
+
+ /* Update default candidate index */
+ if (comp->default_cand == idx) {
+ comp->default_cand = !idx;
+ }
+ }
+
+ sess_init_update(ice_st);
+ }
+ }
+ break;
+ case PJ_STUN_SOCK_KEEP_ALIVE_OP:
+ if (status != PJ_SUCCESS) {
+ pj_assert(cand != NULL);
+ cand->status = status;
+ if (!ice_st->cfg.stun.ignore_stun_error) {
+ sess_fail(ice_st, PJ_ICE_STRANS_OP_INIT,
+ "STUN keep-alive failed", status);
+ } else {
+ PJ_LOG(4,(ice_st->obj_name, "STUN error is ignored"));
+ }
+ }
+ break;
+ }
+
+ return sess_dec_ref(ice_st);
+}
+
+/* Callback when TURN socket has received a packet */
+static void turn_on_rx_data(pj_turn_sock *turn_sock,
+ void *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *peer_addr,
+ unsigned addr_len)
+{
+ pj_ice_strans_comp *comp;
+ pj_status_t status;
+
+ comp = (pj_ice_strans_comp*) pj_turn_sock_get_user_data(turn_sock);
+ if (comp == NULL) {
+ /* We have disassociated ourselves from the TURN socket */
+ return;
+ }
+
+ sess_add_ref(comp->ice_st);
+
+ if (comp->ice_st->ice == NULL) {
+ /* The ICE session is gone, but we're still receiving packets.
+ * This could also happen if remote doesn't do ICE and application
+ * specifies TURN as the default address in SDP.
+ * So in this case just give the packet to application.
+ */
+ if (comp->ice_st->cb.on_rx_data) {
+ (*comp->ice_st->cb.on_rx_data)(comp->ice_st, comp->comp_id, pkt,
+ pkt_len, peer_addr, addr_len);
+ }
+
+ } else {
+
+ /* Hand over the packet to ICE */
+ status = pj_ice_sess_on_rx_pkt(comp->ice_st->ice, comp->comp_id,
+ TP_TURN, pkt, pkt_len,
+ peer_addr, addr_len);
+
+ if (status != PJ_SUCCESS) {
+ ice_st_perror(comp->ice_st,
+ "Error processing packet from TURN relay",
+ status);
+ }
+ }
+
+ sess_dec_ref(comp->ice_st);
+}
+
+
+/* Callback when TURN client state has changed */
+static void turn_on_state(pj_turn_sock *turn_sock, pj_turn_state_t old_state,
+ pj_turn_state_t new_state)
+{
+ pj_ice_strans_comp *comp;
+
+ comp = (pj_ice_strans_comp*) pj_turn_sock_get_user_data(turn_sock);
+ if (comp == NULL) {
+ /* Not interested in further state notification once the relay is
+ * disconnecting.
+ */
+ return;
+ }
+
+ PJ_LOG(5,(comp->ice_st->obj_name, "TURN client state changed %s --> %s",
+ pj_turn_state_name(old_state), pj_turn_state_name(new_state)));
+ pj_log_push_indent();
+
+ sess_add_ref(comp->ice_st);
+
+ if (new_state == PJ_TURN_STATE_READY) {
+ pj_turn_session_info rel_info;
+ char ipaddr[PJ_INET6_ADDRSTRLEN+8];
+ pj_ice_sess_cand *cand = NULL;
+ unsigned i;
+
+ comp->turn_err_cnt = 0;
+
+ /* Get allocation info */
+ pj_turn_sock_get_info(turn_sock, &rel_info);
+
+ /* Wait until initialization completes */
+ pj_lock_acquire(comp->ice_st->init_lock);
+
+ /* Find relayed candidate in the component */
+ for (i=0; i<comp->cand_cnt; ++i) {
+ if (comp->cand_list[i].type == PJ_ICE_CAND_TYPE_RELAYED) {
+ cand = &comp->cand_list[i];
+ break;
+ }
+ }
+ pj_assert(cand != NULL);
+
+ pj_lock_release(comp->ice_st->init_lock);
+
+ /* Update candidate */
+ pj_sockaddr_cp(&cand->addr, &rel_info.relay_addr);
+ pj_sockaddr_cp(&cand->base_addr, &rel_info.relay_addr);
+ pj_sockaddr_cp(&cand->rel_addr, &rel_info.mapped_addr);
+ pj_ice_calc_foundation(comp->ice_st->pool, &cand->foundation,
+ PJ_ICE_CAND_TYPE_RELAYED,
+ &rel_info.relay_addr);
+ cand->status = PJ_SUCCESS;
+
+ /* Set default candidate to relay */
+ comp->default_cand = cand - comp->cand_list;
+
+ PJ_LOG(4,(comp->ice_st->obj_name,
+ "Comp %d: TURN allocation complete, relay address is %s",
+ comp->comp_id,
+ pj_sockaddr_print(&rel_info.relay_addr, ipaddr,
+ sizeof(ipaddr), 3)));
+
+ sess_init_update(comp->ice_st);
+
+ } else if (new_state >= PJ_TURN_STATE_DEALLOCATING) {
+ pj_turn_session_info info;
+
+ ++comp->turn_err_cnt;
+
+ pj_turn_sock_get_info(turn_sock, &info);
+
+ /* Unregister ourself from the TURN relay */
+ pj_turn_sock_set_user_data(turn_sock, NULL);
+ comp->turn_sock = NULL;
+
+ /* Set session to fail if we're still initializing */
+ if (comp->ice_st->state < PJ_ICE_STRANS_STATE_READY) {
+ sess_fail(comp->ice_st, PJ_ICE_STRANS_OP_INIT,
+ "TURN allocation failed", info.last_status);
+ } else if (comp->turn_err_cnt > 1) {
+ sess_fail(comp->ice_st, PJ_ICE_STRANS_OP_KEEP_ALIVE,
+ "TURN refresh failed", info.last_status);
+ } else {
+ PJ_PERROR(4,(comp->ice_st->obj_name, info.last_status,
+ "Comp %d: TURN allocation failed, retrying",
+ comp->comp_id));
+ add_update_turn(comp->ice_st, comp);
+ }
+ }
+
+ sess_dec_ref(comp->ice_st);
+
+ pj_log_pop_indent();
+}
+
diff --git a/pjnath/src/pjnath/nat_detect.c b/pjnath/src/pjnath/nat_detect.c
new file mode 100644
index 0000000..86ac694
--- /dev/null
+++ b/pjnath/src/pjnath/nat_detect.c
@@ -0,0 +1,911 @@
+/* $Id: nat_detect.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/nat_detect.h>
+#include <pjnath/errno.h>
+#include <pj/assert.h>
+#include <pj/ioqueue.h>
+#include <pj/log.h>
+#include <pj/os.h>
+#include <pj/pool.h>
+#include <pj/rand.h>
+#include <pj/string.h>
+#include <pj/timer.h>
+#include <pj/compat/socket.h>
+
+
+static const char *nat_type_names[] =
+{
+ "Unknown",
+ "ErrUnknown",
+ "Open",
+ "Blocked",
+ "Symmetric UDP",
+ "Full Cone",
+ "Symmetric",
+ "Restricted",
+ "Port Restricted"
+};
+
+
+#define CHANGE_IP_FLAG 4
+#define CHANGE_PORT_FLAG 2
+#define CHANGE_IP_PORT_FLAG (CHANGE_IP_FLAG | CHANGE_PORT_FLAG)
+#define TEST_INTERVAL 50
+
+enum test_type
+{
+ ST_TEST_1,
+ ST_TEST_2,
+ ST_TEST_3,
+ ST_TEST_1B,
+ ST_MAX
+};
+
+static const char *test_names[] =
+{
+ "Test I: Binding request",
+ "Test II: Binding request with change address and port request",
+ "Test III: Binding request with change port request",
+ "Test IB: Binding request to alternate address"
+};
+
+enum timer_type
+{
+ TIMER_TEST = 1,
+ TIMER_DESTROY = 2
+};
+
+typedef struct nat_detect_session
+{
+ pj_pool_t *pool;
+ pj_mutex_t *mutex;
+
+ pj_timer_heap_t *timer_heap;
+ pj_timer_entry timer;
+ unsigned timer_executed;
+
+ void *user_data;
+ pj_stun_nat_detect_cb *cb;
+ pj_sock_t sock;
+ pj_sockaddr_in local_addr;
+ pj_ioqueue_key_t *key;
+ pj_sockaddr_in server;
+ pj_sockaddr_in *cur_server;
+ pj_stun_session *stun_sess;
+
+ pj_ioqueue_op_key_t read_op, write_op;
+ pj_uint8_t rx_pkt[PJ_STUN_MAX_PKT_LEN];
+ pj_ssize_t rx_pkt_len;
+ pj_sockaddr_in src_addr;
+ int src_addr_len;
+
+ struct result
+ {
+ pj_bool_t executed;
+ pj_bool_t complete;
+ pj_status_t status;
+ pj_sockaddr_in ma;
+ pj_sockaddr_in ca;
+ pj_stun_tx_data *tdata;
+ } result[ST_MAX];
+
+} nat_detect_session;
+
+
+static void on_read_complete(pj_ioqueue_key_t *key,
+ pj_ioqueue_op_key_t *op_key,
+ pj_ssize_t bytes_read);
+static void on_request_complete(pj_stun_session *sess,
+ pj_status_t status,
+ void *token,
+ pj_stun_tx_data *tdata,
+ const pj_stun_msg *response,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len);
+static pj_status_t on_send_msg(pj_stun_session *sess,
+ void *token,
+ const void *pkt,
+ pj_size_t pkt_size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len);
+
+static pj_status_t send_test(nat_detect_session *sess,
+ enum test_type test_id,
+ const pj_sockaddr_in *alt_addr,
+ pj_uint32_t change_flag);
+static void on_sess_timer(pj_timer_heap_t *th,
+ pj_timer_entry *te);
+static void sess_destroy(nat_detect_session *sess);
+
+
+/*
+ * Get the NAT name from the specified NAT type.
+ */
+PJ_DEF(const char*) pj_stun_get_nat_name(pj_stun_nat_type type)
+{
+ PJ_ASSERT_RETURN(type >= 0 && type <= PJ_STUN_NAT_TYPE_PORT_RESTRICTED,
+ "*Invalid*");
+
+ return nat_type_names[type];
+}
+
+static int test_executed(nat_detect_session *sess)
+{
+ unsigned i, count;
+ for (i=0, count=0; i<PJ_ARRAY_SIZE(sess->result); ++i) {
+ if (sess->result[i].executed)
+ ++count;
+ }
+ return count;
+}
+
+static int test_completed(nat_detect_session *sess)
+{
+ unsigned i, count;
+ for (i=0, count=0; i<PJ_ARRAY_SIZE(sess->result); ++i) {
+ if (sess->result[i].complete)
+ ++count;
+ }
+ return count;
+}
+
+static pj_status_t get_local_interface(const pj_sockaddr_in *server,
+ pj_in_addr *local_addr)
+{
+ pj_sock_t sock;
+ pj_sockaddr_in tmp;
+ int addr_len;
+ pj_status_t status;
+
+ status = pj_sock_socket(pj_AF_INET(), pj_SOCK_DGRAM(), 0, &sock);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ status = pj_sock_bind_in(sock, 0, 0);
+ if (status != PJ_SUCCESS) {
+ pj_sock_close(sock);
+ return status;
+ }
+
+ status = pj_sock_connect(sock, server, sizeof(pj_sockaddr_in));
+ if (status != PJ_SUCCESS) {
+ pj_sock_close(sock);
+ return status;
+ }
+
+ addr_len = sizeof(pj_sockaddr_in);
+ status = pj_sock_getsockname(sock, &tmp, &addr_len);
+ if (status != PJ_SUCCESS) {
+ pj_sock_close(sock);
+ return status;
+ }
+
+ local_addr->s_addr = tmp.sin_addr.s_addr;
+
+ pj_sock_close(sock);
+ return PJ_SUCCESS;
+}
+
+
+PJ_DEF(pj_status_t) pj_stun_detect_nat_type(const pj_sockaddr_in *server,
+ pj_stun_config *stun_cfg,
+ void *user_data,
+ pj_stun_nat_detect_cb *cb)
+{
+ pj_pool_t *pool;
+ nat_detect_session *sess;
+ pj_stun_session_cb sess_cb;
+ pj_ioqueue_callback ioqueue_cb;
+ int addr_len;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(server && stun_cfg, PJ_EINVAL);
+ PJ_ASSERT_RETURN(stun_cfg->pf && stun_cfg->ioqueue && stun_cfg->timer_heap,
+ PJ_EINVAL);
+
+ /*
+ * Init NAT detection session.
+ */
+ pool = pj_pool_create(stun_cfg->pf, "natck%p", PJNATH_POOL_LEN_NATCK,
+ PJNATH_POOL_INC_NATCK, NULL);
+ if (!pool)
+ return PJ_ENOMEM;
+
+ sess = PJ_POOL_ZALLOC_T(pool, nat_detect_session);
+ sess->pool = pool;
+ sess->user_data = user_data;
+ sess->cb = cb;
+
+ status = pj_mutex_create_recursive(pool, pool->obj_name, &sess->mutex);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ pj_memcpy(&sess->server, server, sizeof(pj_sockaddr_in));
+
+ /*
+ * Init timer to self-destroy.
+ */
+ sess->timer_heap = stun_cfg->timer_heap;
+ sess->timer.cb = &on_sess_timer;
+ sess->timer.user_data = sess;
+
+
+ /*
+ * Initialize socket.
+ */
+ status = pj_sock_socket(pj_AF_INET(), pj_SOCK_DGRAM(), 0, &sess->sock);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /*
+ * Bind to any.
+ */
+ pj_bzero(&sess->local_addr, sizeof(pj_sockaddr_in));
+ sess->local_addr.sin_family = pj_AF_INET();
+ status = pj_sock_bind(sess->sock, &sess->local_addr,
+ sizeof(pj_sockaddr_in));
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /*
+ * Get local/bound address.
+ */
+ addr_len = sizeof(sess->local_addr);
+ status = pj_sock_getsockname(sess->sock, &sess->local_addr, &addr_len);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /*
+ * Find out which interface is used to send to the server.
+ */
+ status = get_local_interface(server, &sess->local_addr.sin_addr);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ PJ_LOG(5,(sess->pool->obj_name, "Local address is %s:%d",
+ pj_inet_ntoa(sess->local_addr.sin_addr),
+ pj_ntohs(sess->local_addr.sin_port)));
+
+ PJ_LOG(5,(sess->pool->obj_name, "Server set to %s:%d",
+ pj_inet_ntoa(server->sin_addr),
+ pj_ntohs(server->sin_port)));
+
+ /*
+ * Register socket to ioqueue to receive asynchronous input
+ * notification.
+ */
+ pj_bzero(&ioqueue_cb, sizeof(ioqueue_cb));
+ ioqueue_cb.on_read_complete = &on_read_complete;
+
+ status = pj_ioqueue_register_sock(sess->pool, stun_cfg->ioqueue,
+ sess->sock, sess, &ioqueue_cb,
+ &sess->key);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /*
+ * Create STUN session.
+ */
+ pj_bzero(&sess_cb, sizeof(sess_cb));
+ sess_cb.on_request_complete = &on_request_complete;
+ sess_cb.on_send_msg = &on_send_msg;
+ status = pj_stun_session_create(stun_cfg, pool->obj_name, &sess_cb,
+ PJ_FALSE, &sess->stun_sess);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ pj_stun_session_set_user_data(sess->stun_sess, sess);
+
+ /*
+ * Kick-off ioqueue reading.
+ */
+ pj_ioqueue_op_key_init(&sess->read_op, sizeof(sess->read_op));
+ pj_ioqueue_op_key_init(&sess->write_op, sizeof(sess->write_op));
+ on_read_complete(sess->key, &sess->read_op, 0);
+
+ /*
+ * Start TEST_1
+ */
+ sess->timer.id = TIMER_TEST;
+ on_sess_timer(stun_cfg->timer_heap, &sess->timer);
+
+ return PJ_SUCCESS;
+
+on_error:
+ sess_destroy(sess);
+ return status;
+}
+
+
+static void sess_destroy(nat_detect_session *sess)
+{
+ if (sess->stun_sess) {
+ pj_stun_session_destroy(sess->stun_sess);
+ }
+
+ if (sess->key) {
+ pj_ioqueue_unregister(sess->key);
+ } else if (sess->sock && sess->sock != PJ_INVALID_SOCKET) {
+ pj_sock_close(sess->sock);
+ }
+
+ if (sess->mutex) {
+ pj_mutex_destroy(sess->mutex);
+ }
+
+ if (sess->pool) {
+ pj_pool_release(sess->pool);
+ }
+}
+
+
+static void end_session(nat_detect_session *sess,
+ pj_status_t status,
+ pj_stun_nat_type nat_type)
+{
+ pj_stun_nat_detect_result result;
+ char errmsg[PJ_ERR_MSG_SIZE];
+ pj_time_val delay;
+
+ if (sess->timer.id != 0) {
+ pj_timer_heap_cancel(sess->timer_heap, &sess->timer);
+ sess->timer.id = 0;
+ }
+
+ pj_bzero(&result, sizeof(result));
+ errmsg[0] = '\0';
+ result.status_text = errmsg;
+
+ result.status = status;
+ pj_strerror(status, errmsg, sizeof(errmsg));
+ result.nat_type = nat_type;
+ result.nat_type_name = nat_type_names[result.nat_type];
+
+ if (sess->cb)
+ (*sess->cb)(sess->user_data, &result);
+
+ delay.sec = 0;
+ delay.msec = 0;
+
+ sess->timer.id = TIMER_DESTROY;
+ pj_timer_heap_schedule(sess->timer_heap, &sess->timer, &delay);
+}
+
+
+/*
+ * Callback upon receiving packet from network.
+ */
+static void on_read_complete(pj_ioqueue_key_t *key,
+ pj_ioqueue_op_key_t *op_key,
+ pj_ssize_t bytes_read)
+{
+ nat_detect_session *sess;
+ pj_status_t status;
+
+ sess = (nat_detect_session *) pj_ioqueue_get_user_data(key);
+ pj_assert(sess != NULL);
+
+ pj_mutex_lock(sess->mutex);
+
+ if (bytes_read < 0) {
+ if (-bytes_read != PJ_STATUS_FROM_OS(OSERR_EWOULDBLOCK) &&
+ -bytes_read != PJ_STATUS_FROM_OS(OSERR_EINPROGRESS) &&
+ -bytes_read != PJ_STATUS_FROM_OS(OSERR_ECONNRESET))
+ {
+ /* Permanent error */
+ end_session(sess, -bytes_read, PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
+ goto on_return;
+ }
+
+ } else if (bytes_read > 0) {
+ pj_stun_session_on_rx_pkt(sess->stun_sess, sess->rx_pkt, bytes_read,
+ PJ_STUN_IS_DATAGRAM|PJ_STUN_CHECK_PACKET,
+ NULL, NULL,
+ &sess->src_addr, sess->src_addr_len);
+ }
+
+
+ sess->rx_pkt_len = sizeof(sess->rx_pkt);
+ sess->src_addr_len = sizeof(sess->src_addr);
+ status = pj_ioqueue_recvfrom(key, op_key, sess->rx_pkt, &sess->rx_pkt_len,
+ PJ_IOQUEUE_ALWAYS_ASYNC,
+ &sess->src_addr, &sess->src_addr_len);
+
+ if (status != PJ_EPENDING) {
+ pj_assert(status != PJ_SUCCESS);
+ end_session(sess, status, PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
+ }
+
+on_return:
+ pj_mutex_unlock(sess->mutex);
+}
+
+
+/*
+ * Callback to send outgoing packet from STUN session.
+ */
+static pj_status_t on_send_msg(pj_stun_session *stun_sess,
+ void *token,
+ const void *pkt,
+ pj_size_t pkt_size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len)
+{
+ nat_detect_session *sess;
+ pj_ssize_t pkt_len;
+ pj_status_t status;
+
+ PJ_UNUSED_ARG(token);
+
+ sess = (nat_detect_session*) pj_stun_session_get_user_data(stun_sess);
+
+ pkt_len = pkt_size;
+ status = pj_ioqueue_sendto(sess->key, &sess->write_op, pkt, &pkt_len, 0,
+ dst_addr, addr_len);
+
+ return status;
+
+}
+
+/*
+ * Callback upon request completion.
+ */
+static void on_request_complete(pj_stun_session *stun_sess,
+ pj_status_t status,
+ void *token,
+ pj_stun_tx_data *tdata,
+ const pj_stun_msg *response,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ nat_detect_session *sess;
+ pj_stun_sockaddr_attr *mattr = NULL;
+ pj_stun_changed_addr_attr *ca = NULL;
+ pj_uint32_t *tsx_id;
+ int cmp;
+ unsigned test_id;
+
+ PJ_UNUSED_ARG(token);
+ PJ_UNUSED_ARG(tdata);
+ PJ_UNUSED_ARG(src_addr);
+ PJ_UNUSED_ARG(src_addr_len);
+
+ sess = (nat_detect_session*) pj_stun_session_get_user_data(stun_sess);
+
+ pj_mutex_lock(sess->mutex);
+
+ /* Find errors in the response */
+ if (status == PJ_SUCCESS) {
+
+ /* Check error message */
+ if (PJ_STUN_IS_ERROR_RESPONSE(response->hdr.type)) {
+ pj_stun_errcode_attr *eattr;
+ int err_code;
+
+ eattr = (pj_stun_errcode_attr*)
+ pj_stun_msg_find_attr(response, PJ_STUN_ATTR_ERROR_CODE, 0);
+
+ if (eattr != NULL)
+ err_code = eattr->err_code;
+ else
+ err_code = PJ_STUN_SC_SERVER_ERROR;
+
+ status = PJ_STATUS_FROM_STUN_CODE(err_code);
+
+
+ } else {
+
+ /* Get MAPPED-ADDRESS or XOR-MAPPED-ADDRESS */
+ mattr = (pj_stun_sockaddr_attr*)
+ pj_stun_msg_find_attr(response, PJ_STUN_ATTR_XOR_MAPPED_ADDR, 0);
+ if (mattr == NULL) {
+ mattr = (pj_stun_sockaddr_attr*)
+ pj_stun_msg_find_attr(response, PJ_STUN_ATTR_MAPPED_ADDR, 0);
+ }
+
+ if (mattr == NULL) {
+ status = PJNATH_ESTUNNOMAPPEDADDR;
+ }
+
+ /* Get CHANGED-ADDRESS attribute */
+ ca = (pj_stun_changed_addr_attr*)
+ pj_stun_msg_find_attr(response, PJ_STUN_ATTR_CHANGED_ADDR, 0);
+
+ if (ca == NULL) {
+ status = PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_SERVER_ERROR);
+ }
+
+ }
+ }
+
+ /* Save the result */
+ tsx_id = (pj_uint32_t*) tdata->msg->hdr.tsx_id;
+ test_id = tsx_id[2];
+
+ if (test_id >= ST_MAX) {
+ PJ_LOG(4,(sess->pool->obj_name, "Invalid transaction ID %u in response",
+ test_id));
+ end_session(sess, PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_SERVER_ERROR),
+ PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
+ goto on_return;
+ }
+
+ PJ_LOG(5,(sess->pool->obj_name, "Completed %s, status=%d",
+ test_names[test_id], status));
+
+ sess->result[test_id].complete = PJ_TRUE;
+ sess->result[test_id].status = status;
+ if (status == PJ_SUCCESS) {
+ pj_memcpy(&sess->result[test_id].ma, &mattr->sockaddr.ipv4,
+ sizeof(pj_sockaddr_in));
+ pj_memcpy(&sess->result[test_id].ca, &ca->sockaddr.ipv4,
+ sizeof(pj_sockaddr_in));
+ }
+
+ /* Send Test 1B only when Test 2 completes. Must not send Test 1B
+ * before Test 2 completes to avoid creating mapping on the NAT.
+ */
+ if (!sess->result[ST_TEST_1B].executed &&
+ sess->result[ST_TEST_2].complete &&
+ sess->result[ST_TEST_2].status != PJ_SUCCESS &&
+ sess->result[ST_TEST_1].complete &&
+ sess->result[ST_TEST_1].status == PJ_SUCCESS)
+ {
+ cmp = pj_memcmp(&sess->local_addr, &sess->result[ST_TEST_1].ma,
+ sizeof(pj_sockaddr_in));
+ if (cmp != 0)
+ send_test(sess, ST_TEST_1B, &sess->result[ST_TEST_1].ca, 0);
+ }
+
+ if (test_completed(sess)<3 || test_completed(sess)!=test_executed(sess))
+ goto on_return;
+
+ /* Handle the test result according to RFC 3489 page 22:
+
+
+ +--------+
+ | Test |
+ | 1 |
+ +--------+
+ |
+ |
+ V
+ /\ /\
+ N / \ Y / \ Y +--------+
+ UDP <-------/Resp\--------->/ IP \------------->| Test |
+ Blocked \ ? / \Same/ | 2 |
+ \ / \? / +--------+
+ \/ \/ |
+ | N |
+ | V
+ V /\
+ +--------+ Sym. N / \
+ | Test | UDP <---/Resp\
+ | 2 | Firewall \ ? /
+ +--------+ \ /
+ | \/
+ V |Y
+ /\ /\ |
+ Symmetric N / \ +--------+ N / \ V
+ NAT <--- / IP \<-----| Test |<--- /Resp\ Open
+ \Same/ | 1B | \ ? / Internet
+ \? / +--------+ \ /
+ \/ \/
+ | |Y
+ | |
+ | V
+ | Full
+ | Cone
+ V /\
+ +--------+ / \ Y
+ | Test |------>/Resp\---->Restricted
+ | 3 | \ ? /
+ +--------+ \ /
+ \/
+ |N
+ | Port
+ +------>Restricted
+
+ Figure 2: Flow for type discovery process
+ */
+
+ switch (sess->result[ST_TEST_1].status) {
+ case PJNATH_ESTUNTIMEDOUT:
+ /*
+ * Test 1 has timed-out. Conclude with NAT_TYPE_BLOCKED.
+ */
+ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_BLOCKED);
+ break;
+ case PJ_SUCCESS:
+ /*
+ * Test 1 is successful. Further tests are needed to detect
+ * NAT type. Compare the MAPPED-ADDRESS with the local address.
+ */
+ cmp = pj_memcmp(&sess->local_addr, &sess->result[ST_TEST_1].ma,
+ sizeof(pj_sockaddr_in));
+ if (cmp==0) {
+ /*
+ * MAPPED-ADDRESS and local address is equal. Need one more
+ * test to determine NAT type.
+ */
+ switch (sess->result[ST_TEST_2].status) {
+ case PJ_SUCCESS:
+ /*
+ * Test 2 is also successful. We're in the open.
+ */
+ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_OPEN);
+ break;
+ case PJNATH_ESTUNTIMEDOUT:
+ /*
+ * Test 2 has timed out. We're behind somekind of UDP
+ * firewall.
+ */
+ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_SYMMETRIC_UDP);
+ break;
+ default:
+ /*
+ * We've got other error with Test 2.
+ */
+ end_session(sess, sess->result[ST_TEST_2].status,
+ PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
+ break;
+ }
+ } else {
+ /*
+ * MAPPED-ADDRESS is different than local address.
+ * We're behind NAT.
+ */
+ switch (sess->result[ST_TEST_2].status) {
+ case PJ_SUCCESS:
+ /*
+ * Test 2 is successful. We're behind a full-cone NAT.
+ */
+ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_FULL_CONE);
+ break;
+ case PJNATH_ESTUNTIMEDOUT:
+ /*
+ * Test 2 has timed-out Check result of test 1B..
+ */
+ switch (sess->result[ST_TEST_1B].status) {
+ case PJ_SUCCESS:
+ /*
+ * Compare the MAPPED-ADDRESS of test 1B with the
+ * MAPPED-ADDRESS returned in test 1..
+ */
+ cmp = pj_memcmp(&sess->result[ST_TEST_1].ma,
+ &sess->result[ST_TEST_1B].ma,
+ sizeof(pj_sockaddr_in));
+ if (cmp != 0) {
+ /*
+ * MAPPED-ADDRESS is different, we're behind a
+ * symmetric NAT.
+ */
+ end_session(sess, PJ_SUCCESS,
+ PJ_STUN_NAT_TYPE_SYMMETRIC);
+ } else {
+ /*
+ * MAPPED-ADDRESS is equal. We're behind a restricted
+ * or port-restricted NAT, depending on the result of
+ * test 3.
+ */
+ switch (sess->result[ST_TEST_3].status) {
+ case PJ_SUCCESS:
+ /*
+ * Test 3 is successful, we're behind a restricted
+ * NAT.
+ */
+ end_session(sess, PJ_SUCCESS,
+ PJ_STUN_NAT_TYPE_RESTRICTED);
+ break;
+ case PJNATH_ESTUNTIMEDOUT:
+ /*
+ * Test 3 failed, we're behind a port restricted
+ * NAT.
+ */
+ end_session(sess, PJ_SUCCESS,
+ PJ_STUN_NAT_TYPE_PORT_RESTRICTED);
+ break;
+ default:
+ /*
+ * Got other error with test 3.
+ */
+ end_session(sess, sess->result[ST_TEST_3].status,
+ PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
+ break;
+ }
+ }
+ break;
+ case PJNATH_ESTUNTIMEDOUT:
+ /*
+ * Strangely test 1B has failed. Maybe connectivity was
+ * lost? Or perhaps port 3489 (the usual port number in
+ * CHANGED-ADDRESS) is blocked?
+ */
+ switch (sess->result[ST_TEST_3].status) {
+ case PJ_SUCCESS:
+ /* Although test 1B failed, test 3 was successful.
+ * It could be that port 3489 is blocked, while the
+ * NAT itself looks to be a Restricted one.
+ */
+ end_session(sess, PJ_SUCCESS,
+ PJ_STUN_NAT_TYPE_RESTRICTED);
+ break;
+ default:
+ /* Can't distinguish between Symmetric and Port
+ * Restricted, so set the type to Unknown
+ */
+ end_session(sess, PJ_SUCCESS,
+ PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
+ break;
+ }
+ break;
+ default:
+ /*
+ * Got other error with test 1B.
+ */
+ end_session(sess, sess->result[ST_TEST_1B].status,
+ PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
+ break;
+ }
+ break;
+ default:
+ /*
+ * We've got other error with Test 2.
+ */
+ end_session(sess, sess->result[ST_TEST_2].status,
+ PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
+ break;
+ }
+ }
+ break;
+ default:
+ /*
+ * We've got other error with Test 1.
+ */
+ end_session(sess, sess->result[ST_TEST_1].status,
+ PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
+ break;
+ }
+
+on_return:
+ pj_mutex_unlock(sess->mutex);
+}
+
+
+/* Perform test */
+static pj_status_t send_test(nat_detect_session *sess,
+ enum test_type test_id,
+ const pj_sockaddr_in *alt_addr,
+ pj_uint32_t change_flag)
+{
+ pj_uint32_t magic, tsx_id[3];
+ pj_status_t status;
+
+ sess->result[test_id].executed = PJ_TRUE;
+
+ /* Randomize tsx id */
+ do {
+ magic = pj_rand();
+ } while (magic == PJ_STUN_MAGIC);
+
+ tsx_id[0] = pj_rand();
+ tsx_id[1] = pj_rand();
+ tsx_id[2] = test_id;
+
+ /* Create BIND request */
+ status = pj_stun_session_create_req(sess->stun_sess,
+ PJ_STUN_BINDING_REQUEST, magic,
+ (pj_uint8_t*)tsx_id,
+ &sess->result[test_id].tdata);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Add CHANGE-REQUEST attribute */
+ status = pj_stun_msg_add_uint_attr(sess->pool,
+ sess->result[test_id].tdata->msg,
+ PJ_STUN_ATTR_CHANGE_REQUEST,
+ change_flag);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Configure alternate address */
+ if (alt_addr)
+ sess->cur_server = (pj_sockaddr_in*) alt_addr;
+ else
+ sess->cur_server = &sess->server;
+
+ PJ_LOG(5,(sess->pool->obj_name,
+ "Performing %s to %s:%d",
+ test_names[test_id],
+ pj_inet_ntoa(sess->cur_server->sin_addr),
+ pj_ntohs(sess->cur_server->sin_port)));
+
+ /* Send the request */
+ status = pj_stun_session_send_msg(sess->stun_sess, NULL, PJ_TRUE,
+ PJ_TRUE, sess->cur_server,
+ sizeof(pj_sockaddr_in),
+ sess->result[test_id].tdata);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ return PJ_SUCCESS;
+
+on_error:
+ sess->result[test_id].complete = PJ_TRUE;
+ sess->result[test_id].status = status;
+
+ return status;
+}
+
+
+/* Timer callback */
+static void on_sess_timer(pj_timer_heap_t *th,
+ pj_timer_entry *te)
+{
+ nat_detect_session *sess;
+
+ sess = (nat_detect_session*) te->user_data;
+
+ if (te->id == TIMER_DESTROY) {
+ pj_mutex_lock(sess->mutex);
+ pj_ioqueue_unregister(sess->key);
+ sess->key = NULL;
+ sess->sock = PJ_INVALID_SOCKET;
+ te->id = 0;
+ pj_mutex_unlock(sess->mutex);
+
+ sess_destroy(sess);
+
+ } else if (te->id == TIMER_TEST) {
+
+ pj_bool_t next_timer;
+
+ pj_mutex_lock(sess->mutex);
+
+ next_timer = PJ_FALSE;
+
+ if (sess->timer_executed == 0) {
+ send_test(sess, ST_TEST_1, NULL, 0);
+ next_timer = PJ_TRUE;
+ } else if (sess->timer_executed == 1) {
+ send_test(sess, ST_TEST_2, NULL, CHANGE_IP_PORT_FLAG);
+ next_timer = PJ_TRUE;
+ } else if (sess->timer_executed == 2) {
+ send_test(sess, ST_TEST_3, NULL, CHANGE_PORT_FLAG);
+ } else {
+ pj_assert(!"Shouldn't have timer at this state");
+ }
+
+ ++sess->timer_executed;
+
+ if (next_timer) {
+ pj_time_val delay = {0, TEST_INTERVAL};
+ pj_timer_heap_schedule(th, te, &delay);
+ } else {
+ te->id = 0;
+ }
+
+ pj_mutex_unlock(sess->mutex);
+
+ } else {
+ pj_assert(!"Invalid timer ID");
+ }
+}
+
diff --git a/pjnath/src/pjnath/stun_auth.c b/pjnath/src/pjnath/stun_auth.c
new file mode 100644
index 0000000..9041186
--- /dev/null
+++ b/pjnath/src/pjnath/stun_auth.c
@@ -0,0 +1,631 @@
+/* $Id: stun_auth.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/stun_auth.h>
+#include <pjnath/errno.h>
+#include <pjlib-util/hmac_sha1.h>
+#include <pjlib-util/md5.h>
+#include <pjlib-util/sha1.h>
+#include <pj/assert.h>
+#include <pj/log.h>
+#include <pj/pool.h>
+#include <pj/string.h>
+
+#define THIS_FILE "stun_auth.c"
+
+/* Duplicate credential */
+PJ_DEF(void) pj_stun_auth_cred_dup( pj_pool_t *pool,
+ pj_stun_auth_cred *dst,
+ const pj_stun_auth_cred *src)
+{
+ dst->type = src->type;
+
+ switch (src->type) {
+ case PJ_STUN_AUTH_CRED_STATIC:
+ pj_strdup(pool, &dst->data.static_cred.realm,
+ &src->data.static_cred.realm);
+ pj_strdup(pool, &dst->data.static_cred.username,
+ &src->data.static_cred.username);
+ dst->data.static_cred.data_type = src->data.static_cred.data_type;
+ pj_strdup(pool, &dst->data.static_cred.data,
+ &src->data.static_cred.data);
+ pj_strdup(pool, &dst->data.static_cred.nonce,
+ &src->data.static_cred.nonce);
+ break;
+ case PJ_STUN_AUTH_CRED_DYNAMIC:
+ pj_memcpy(&dst->data.dyn_cred, &src->data.dyn_cred,
+ sizeof(src->data.dyn_cred));
+ break;
+ }
+}
+
+
+/*
+ * Duplicate request credential.
+ */
+PJ_DEF(void) pj_stun_req_cred_info_dup( pj_pool_t *pool,
+ pj_stun_req_cred_info *dst,
+ const pj_stun_req_cred_info *src)
+{
+ pj_strdup(pool, &dst->realm, &src->realm);
+ pj_strdup(pool, &dst->username, &src->username);
+ pj_strdup(pool, &dst->nonce, &src->nonce);
+ pj_strdup(pool, &dst->auth_key, &src->auth_key);
+}
+
+
+/* Calculate HMAC-SHA1 key for long term credential, by getting
+ * MD5 digest of username, realm, and password.
+ */
+static void calc_md5_key(pj_uint8_t digest[16],
+ const pj_str_t *realm,
+ const pj_str_t *username,
+ const pj_str_t *passwd)
+{
+ /* The 16-byte key for MESSAGE-INTEGRITY HMAC is formed by taking
+ * the MD5 hash of the result of concatenating the following five
+ * fields: (1) The username, with any quotes and trailing nulls
+ * removed, (2) A single colon, (3) The realm, with any quotes and
+ * trailing nulls removed, (4) A single colon, and (5) The
+ * password, with any trailing nulls removed.
+ */
+ pj_md5_context ctx;
+ pj_str_t s;
+
+ pj_md5_init(&ctx);
+
+#define REMOVE_QUOTE(s) if (s.slen && *s.ptr=='"') \
+ s.ptr++, s.slen--; \
+ if (s.slen && s.ptr[s.slen-1]=='"') \
+ s.slen--;
+
+ /* Add username */
+ s = *username;
+ REMOVE_QUOTE(s);
+ pj_md5_update(&ctx, (pj_uint8_t*)s.ptr, s.slen);
+
+ /* Add single colon */
+ pj_md5_update(&ctx, (pj_uint8_t*)":", 1);
+
+ /* Add realm */
+ s = *realm;
+ REMOVE_QUOTE(s);
+ pj_md5_update(&ctx, (pj_uint8_t*)s.ptr, s.slen);
+
+#undef REMOVE_QUOTE
+
+ /* Another colon */
+ pj_md5_update(&ctx, (pj_uint8_t*)":", 1);
+
+ /* Add password */
+ pj_md5_update(&ctx, (pj_uint8_t*)passwd->ptr, passwd->slen);
+
+ /* Done */
+ pj_md5_final(&ctx, digest);
+}
+
+
+/*
+ * Create authentication key to be used for encoding the message with
+ * MESSAGE-INTEGRITY.
+ */
+PJ_DEF(void) pj_stun_create_key(pj_pool_t *pool,
+ pj_str_t *key,
+ const pj_str_t *realm,
+ const pj_str_t *username,
+ pj_stun_passwd_type data_type,
+ const pj_str_t *data)
+{
+ PJ_ASSERT_ON_FAIL(pool && key && username && data, return);
+
+ if (realm && realm->slen) {
+ if (data_type == PJ_STUN_PASSWD_PLAIN) {
+ key->ptr = (char*) pj_pool_alloc(pool, 16);
+ calc_md5_key((pj_uint8_t*)key->ptr, realm, username, data);
+ key->slen = 16;
+ } else {
+ pj_strdup(pool, key, data);
+ }
+ } else {
+ pj_assert(data_type == PJ_STUN_PASSWD_PLAIN);
+ pj_strdup(pool, key, data);
+ }
+}
+
+
+PJ_INLINE(pj_uint16_t) GET_VAL16(const pj_uint8_t *pdu, unsigned pos)
+{
+ return (pj_uint16_t) ((pdu[pos] << 8) + pdu[pos+1]);
+}
+
+
+PJ_INLINE(void) PUT_VAL16(pj_uint8_t *buf, unsigned pos, pj_uint16_t hval)
+{
+ buf[pos+0] = (pj_uint8_t) ((hval & 0xFF00) >> 8);
+ buf[pos+1] = (pj_uint8_t) ((hval & 0x00FF) >> 0);
+}
+
+
+/* Send 401 response */
+static pj_status_t create_challenge(pj_pool_t *pool,
+ const pj_stun_msg *msg,
+ int err_code,
+ const char *errstr,
+ const pj_str_t *realm,
+ const pj_str_t *nonce,
+ pj_stun_msg **p_response)
+{
+ pj_stun_msg *response;
+ pj_str_t tmp_nonce;
+ pj_str_t err_msg;
+ pj_status_t rc;
+
+ rc = pj_stun_msg_create_response(pool, msg, err_code,
+ (errstr?pj_cstr(&err_msg, errstr):NULL),
+ &response);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ /* SHOULD NOT add REALM, NONCE, USERNAME, and M-I on 400 response */
+ if (err_code!=400 && realm && realm->slen) {
+ rc = pj_stun_msg_add_string_attr(pool, response,
+ PJ_STUN_ATTR_REALM,
+ realm);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ /* long term must include nonce */
+ if (!nonce || nonce->slen == 0) {
+ tmp_nonce = pj_str("pjstun");
+ nonce = &tmp_nonce;
+ }
+ }
+
+ if (err_code!=400 && nonce && nonce->slen) {
+ rc = pj_stun_msg_add_string_attr(pool, response,
+ PJ_STUN_ATTR_NONCE,
+ nonce);
+ if (rc != PJ_SUCCESS)
+ return rc;
+ }
+
+ *p_response = response;
+
+ return PJ_SUCCESS;
+}
+
+
+/* Verify credential in the request */
+PJ_DEF(pj_status_t) pj_stun_authenticate_request(const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_stun_msg *msg,
+ pj_stun_auth_cred *cred,
+ pj_pool_t *pool,
+ pj_stun_req_cred_info *p_info,
+ pj_stun_msg **p_response)
+{
+ pj_stun_req_cred_info tmp_info;
+ const pj_stun_msgint_attr *amsgi;
+ unsigned i, amsgi_pos;
+ pj_bool_t has_attr_beyond_mi;
+ const pj_stun_username_attr *auser;
+ const pj_stun_realm_attr *arealm;
+ const pj_stun_realm_attr *anonce;
+ pj_hmac_sha1_context ctx;
+ pj_uint8_t digest[PJ_SHA1_DIGEST_SIZE];
+ pj_stun_status err_code;
+ const char *err_text = NULL;
+ pj_status_t status;
+
+ /* msg and credential MUST be specified */
+ PJ_ASSERT_RETURN(pkt && pkt_len && msg && cred, PJ_EINVAL);
+
+ /* If p_response is specified, pool MUST be specified. */
+ PJ_ASSERT_RETURN(!p_response || pool, PJ_EINVAL);
+
+ if (p_response)
+ *p_response = NULL;
+
+ if (!PJ_STUN_IS_REQUEST(msg->hdr.type))
+ p_response = NULL;
+
+ if (p_info == NULL)
+ p_info = &tmp_info;
+
+ pj_bzero(p_info, sizeof(pj_stun_req_cred_info));
+
+ /* Get realm and nonce from credential */
+ p_info->realm.slen = p_info->nonce.slen = 0;
+ if (cred->type == PJ_STUN_AUTH_CRED_STATIC) {
+ p_info->realm = cred->data.static_cred.realm;
+ p_info->nonce = cred->data.static_cred.nonce;
+ } else if (cred->type == PJ_STUN_AUTH_CRED_DYNAMIC) {
+ status = cred->data.dyn_cred.get_auth(cred->data.dyn_cred.user_data,
+ pool, &p_info->realm,
+ &p_info->nonce);
+ if (status != PJ_SUCCESS)
+ return status;
+ } else {
+ pj_assert(!"Invalid credential type");
+ return PJ_EBUG;
+ }
+
+ /* Look for MESSAGE-INTEGRITY while counting the position */
+ amsgi_pos = 0;
+ has_attr_beyond_mi = PJ_FALSE;
+ amsgi = NULL;
+ for (i=0; i<msg->attr_count; ++i) {
+ if (msg->attr[i]->type == PJ_STUN_ATTR_MESSAGE_INTEGRITY) {
+ amsgi = (const pj_stun_msgint_attr*) msg->attr[i];
+ } else if (amsgi) {
+ has_attr_beyond_mi = PJ_TRUE;
+ break;
+ } else {
+ amsgi_pos += ((msg->attr[i]->length+3) & ~0x03) + 4;
+ }
+ }
+
+ if (amsgi == NULL) {
+ /* According to rfc3489bis-10 Sec 10.1.2/10.2.2, we should return 400
+ for short term, and 401 for long term.
+ The rule has been changed from rfc3489bis-06
+ */
+ err_code = p_info->realm.slen ? PJ_STUN_SC_UNAUTHORIZED :
+ PJ_STUN_SC_BAD_REQUEST;
+ goto on_auth_failed;
+ }
+
+ /* Next check that USERNAME is present */
+ auser = (const pj_stun_username_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_USERNAME, 0);
+ if (auser == NULL) {
+ /* According to rfc3489bis-10 Sec 10.1.2/10.2.2, we should return 400
+ for both short and long term, since M-I is present.
+ The rule has been changed from rfc3489bis-06
+ */
+ err_code = PJ_STUN_SC_BAD_REQUEST;
+ err_text = "Missing USERNAME";
+ goto on_auth_failed;
+ }
+
+ /* Get REALM, if any */
+ arealm = (const pj_stun_realm_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_REALM, 0);
+
+ /* Reject with 400 if we have long term credential and the request
+ * is missing REALM attribute.
+ */
+ if (p_info->realm.slen && arealm==NULL) {
+ err_code = PJ_STUN_SC_BAD_REQUEST;
+ err_text = "Missing REALM";
+ goto on_auth_failed;
+ }
+
+ /* Check if username match */
+ if (cred->type == PJ_STUN_AUTH_CRED_STATIC) {
+ pj_bool_t username_ok;
+ username_ok = !pj_strcmp(&auser->value,
+ &cred->data.static_cred.username);
+ if (username_ok) {
+ pj_strdup(pool, &p_info->username,
+ &cred->data.static_cred.username);
+ pj_stun_create_key(pool, &p_info->auth_key, &p_info->realm,
+ &auser->value, cred->data.static_cred.data_type,
+ &cred->data.static_cred.data);
+ } else {
+ /* Username mismatch */
+ /* According to rfc3489bis-10 Sec 10.1.2/10.2.2, we should
+ * return 401
+ */
+ err_code = PJ_STUN_SC_UNAUTHORIZED;
+ goto on_auth_failed;
+ }
+ } else if (cred->type == PJ_STUN_AUTH_CRED_DYNAMIC) {
+ pj_stun_passwd_type data_type = PJ_STUN_PASSWD_PLAIN;
+ pj_str_t password;
+ pj_status_t rc;
+
+ rc = cred->data.dyn_cred.get_password(msg,
+ cred->data.dyn_cred.user_data,
+ (arealm?&arealm->value:NULL),
+ &auser->value, pool,
+ &data_type, &password);
+ if (rc == PJ_SUCCESS) {
+ pj_strdup(pool, &p_info->username, &auser->value);
+ pj_stun_create_key(pool, &p_info->auth_key,
+ (arealm?&arealm->value:NULL), &auser->value,
+ data_type, &password);
+ } else {
+ err_code = PJ_STUN_SC_UNAUTHORIZED;
+ goto on_auth_failed;
+ }
+ } else {
+ pj_assert(!"Invalid credential type");
+ return PJ_EBUG;
+ }
+
+
+
+ /* Get NONCE attribute */
+ anonce = (pj_stun_nonce_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_NONCE, 0);
+
+ /* Check for long term/short term requirements. */
+ if (p_info->realm.slen != 0 && arealm == NULL) {
+ /* Long term credential is required and REALM is not present */
+ err_code = PJ_STUN_SC_BAD_REQUEST;
+ err_text = "Missing REALM";
+ goto on_auth_failed;
+
+ } else if (p_info->realm.slen != 0 && arealm != NULL) {
+ /* We want long term, and REALM is present */
+
+ /* NONCE must be present. */
+ if (anonce == NULL && p_info->nonce.slen) {
+ err_code = PJ_STUN_SC_BAD_REQUEST;
+ err_text = "Missing NONCE";
+ goto on_auth_failed;
+ }
+
+ /* Verify REALM matches */
+ if (pj_stricmp(&arealm->value, &p_info->realm)) {
+ /* REALM doesn't match */
+ err_code = PJ_STUN_SC_UNAUTHORIZED;
+ err_text = "Invalid REALM";
+ goto on_auth_failed;
+ }
+
+ /* Valid case, will validate the message integrity later */
+
+ } else if (p_info->realm.slen == 0 && arealm != NULL) {
+ /* We want to use short term credential, but client uses long
+ * term credential. The draft doesn't mention anything about
+ * switching between long term and short term.
+ */
+
+ /* For now just accept the credential, anyway it will probably
+ * cause wrong message integrity value later.
+ */
+ } else if (p_info->realm.slen==0 && arealm == NULL) {
+ /* Short term authentication is wanted, and one is supplied */
+
+ /* Application MAY request NONCE to be supplied */
+ if (p_info->nonce.slen != 0) {
+ err_code = PJ_STUN_SC_UNAUTHORIZED;
+ err_text = "NONCE required";
+ goto on_auth_failed;
+ }
+ }
+
+ /* If NONCE is present, validate it */
+ if (anonce) {
+ pj_bool_t ok;
+
+ if (cred->type == PJ_STUN_AUTH_CRED_DYNAMIC &&
+ cred->data.dyn_cred.verify_nonce != NULL)
+ {
+ ok=cred->data.dyn_cred.verify_nonce(msg,
+ cred->data.dyn_cred.user_data,
+ (arealm?&arealm->value:NULL),
+ &auser->value,
+ &anonce->value);
+ } else if (cred->type == PJ_STUN_AUTH_CRED_DYNAMIC) {
+ ok = PJ_TRUE;
+ } else {
+ if (p_info->nonce.slen) {
+ ok = !pj_strcmp(&anonce->value, &p_info->nonce);
+ } else {
+ ok = PJ_TRUE;
+ }
+ }
+
+ if (!ok) {
+ err_code = PJ_STUN_SC_STALE_NONCE;
+ goto on_auth_failed;
+ }
+ }
+
+ /* Now calculate HMAC of the message. */
+ pj_hmac_sha1_init(&ctx, (pj_uint8_t*)p_info->auth_key.ptr,
+ p_info->auth_key.slen);
+
+#if PJ_STUN_OLD_STYLE_MI_FINGERPRINT
+ /* Pre rfc3489bis-06 style of calculation */
+ pj_hmac_sha1_update(&ctx, pkt, 20);
+#else
+ /* First calculate HMAC for the header.
+ * The calculation is different depending on whether FINGERPRINT attribute
+ * is present in the message.
+ */
+ if (has_attr_beyond_mi) {
+ pj_uint8_t hdr_copy[20];
+ pj_memcpy(hdr_copy, pkt, 20);
+ PUT_VAL16(hdr_copy, 2, (pj_uint16_t)(amsgi_pos + 24));
+ pj_hmac_sha1_update(&ctx, hdr_copy, 20);
+ } else {
+ pj_hmac_sha1_update(&ctx, pkt, 20);
+ }
+#endif /* PJ_STUN_OLD_STYLE_MI_FINGERPRINT */
+
+ /* Now update with the message body */
+ pj_hmac_sha1_update(&ctx, pkt+20, amsgi_pos);
+#if PJ_STUN_OLD_STYLE_MI_FINGERPRINT
+ // This is no longer necessary as per rfc3489bis-08
+ if ((amsgi_pos+20) & 0x3F) {
+ pj_uint8_t zeroes[64];
+ pj_bzero(zeroes, sizeof(zeroes));
+ pj_hmac_sha1_update(&ctx, zeroes, 64-((amsgi_pos+20) & 0x3F));
+ }
+#endif
+ pj_hmac_sha1_final(&ctx, digest);
+
+
+ /* Compare HMACs */
+ if (pj_memcmp(amsgi->hmac, digest, 20)) {
+ /* HMAC value mismatch */
+ /* According to rfc3489bis-10 Sec 10.1.2 we should return 401 */
+ err_code = PJ_STUN_SC_UNAUTHORIZED;
+ err_text = "MESSAGE-INTEGRITY mismatch";
+ goto on_auth_failed;
+ }
+
+ /* Everything looks okay! */
+ return PJ_SUCCESS;
+
+on_auth_failed:
+ if (p_response) {
+ create_challenge(pool, msg, err_code, err_text,
+ &p_info->realm, &p_info->nonce, p_response);
+ }
+ return PJ_STATUS_FROM_STUN_CODE(err_code);
+}
+
+
+/* Determine if STUN message can be authenticated */
+PJ_DEF(pj_bool_t) pj_stun_auth_valid_for_msg(const pj_stun_msg *msg)
+{
+ unsigned msg_type = msg->hdr.type;
+ const pj_stun_errcode_attr *err_attr;
+
+ /* STUN requests and success response can be authenticated */
+ if (!PJ_STUN_IS_ERROR_RESPONSE(msg_type) &&
+ !PJ_STUN_IS_INDICATION(msg_type))
+ {
+ return PJ_TRUE;
+ }
+
+ /* STUN Indication cannot be authenticated */
+ if (PJ_STUN_IS_INDICATION(msg_type))
+ return PJ_FALSE;
+
+ /* Authentication for STUN error responses depend on the error
+ * code.
+ */
+ err_attr = (const pj_stun_errcode_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_ERROR_CODE, 0);
+ if (err_attr == NULL) {
+ PJ_LOG(4,(THIS_FILE, "STUN error code attribute not present in "
+ "error response"));
+ return PJ_TRUE;
+ }
+
+ switch (err_attr->err_code) {
+ case PJ_STUN_SC_BAD_REQUEST: /* 400 (Bad Request) */
+ case PJ_STUN_SC_UNAUTHORIZED: /* 401 (Unauthorized) */
+ case PJ_STUN_SC_STALE_NONCE: /* 438 (Stale Nonce) */
+
+ /* Due to the way this response is generated here, we can't really
+ * authenticate 420 (Unknown Attribute) response */
+ case PJ_STUN_SC_UNKNOWN_ATTRIBUTE:
+ return PJ_FALSE;
+ default:
+ return PJ_TRUE;
+ }
+}
+
+
+/* Authenticate MESSAGE-INTEGRITY in the response */
+PJ_DEF(pj_status_t) pj_stun_authenticate_response(const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_stun_msg *msg,
+ const pj_str_t *key)
+{
+ const pj_stun_msgint_attr *amsgi;
+ unsigned i, amsgi_pos;
+ pj_bool_t has_attr_beyond_mi;
+ pj_hmac_sha1_context ctx;
+ pj_uint8_t digest[PJ_SHA1_DIGEST_SIZE];
+
+ PJ_ASSERT_RETURN(pkt && pkt_len && msg && key, PJ_EINVAL);
+
+ /* First check that MESSAGE-INTEGRITY is present */
+ amsgi = (const pj_stun_msgint_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_MESSAGE_INTEGRITY, 0);
+ if (amsgi == NULL) {
+ return PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_UNAUTHORIZED);
+ }
+
+
+ /* Check that message length is valid */
+ if (msg->hdr.length < 24) {
+ return PJNATH_EINSTUNMSGLEN;
+ }
+
+ /* Look for MESSAGE-INTEGRITY while counting the position */
+ amsgi_pos = 0;
+ has_attr_beyond_mi = PJ_FALSE;
+ amsgi = NULL;
+ for (i=0; i<msg->attr_count; ++i) {
+ if (msg->attr[i]->type == PJ_STUN_ATTR_MESSAGE_INTEGRITY) {
+ amsgi = (const pj_stun_msgint_attr*) msg->attr[i];
+ } else if (amsgi) {
+ has_attr_beyond_mi = PJ_TRUE;
+ break;
+ } else {
+ amsgi_pos += ((msg->attr[i]->length+3) & ~0x03) + 4;
+ }
+ }
+
+ if (amsgi == NULL) {
+ return PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_BAD_REQUEST);
+ }
+
+ /* Now calculate HMAC of the message. */
+ pj_hmac_sha1_init(&ctx, (pj_uint8_t*)key->ptr, key->slen);
+
+#if PJ_STUN_OLD_STYLE_MI_FINGERPRINT
+ /* Pre rfc3489bis-06 style of calculation */
+ pj_hmac_sha1_update(&ctx, pkt, 20);
+#else
+ /* First calculate HMAC for the header.
+ * The calculation is different depending on whether FINGERPRINT attribute
+ * is present in the message.
+ */
+ if (has_attr_beyond_mi) {
+ pj_uint8_t hdr_copy[20];
+ pj_memcpy(hdr_copy, pkt, 20);
+ PUT_VAL16(hdr_copy, 2, (pj_uint16_t)(amsgi_pos+24));
+ pj_hmac_sha1_update(&ctx, hdr_copy, 20);
+ } else {
+ pj_hmac_sha1_update(&ctx, pkt, 20);
+ }
+#endif /* PJ_STUN_OLD_STYLE_MI_FINGERPRINT */
+
+ /* Now update with the message body */
+ pj_hmac_sha1_update(&ctx, pkt+20, amsgi_pos);
+#if PJ_STUN_OLD_STYLE_MI_FINGERPRINT
+ // This is no longer necessary as per rfc3489bis-08
+ if ((amsgi_pos+20) & 0x3F) {
+ pj_uint8_t zeroes[64];
+ pj_bzero(zeroes, sizeof(zeroes));
+ pj_hmac_sha1_update(&ctx, zeroes, 64-((amsgi_pos+20) & 0x3F));
+ }
+#endif
+ pj_hmac_sha1_final(&ctx, digest);
+
+ /* Compare HMACs */
+ if (pj_memcmp(amsgi->hmac, digest, 20)) {
+ /* HMAC value mismatch */
+ return PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_UNAUTHORIZED);
+ }
+
+ /* Everything looks okay! */
+ return PJ_SUCCESS;
+}
+
diff --git a/pjnath/src/pjnath/stun_msg.c b/pjnath/src/pjnath/stun_msg.c
new file mode 100644
index 0000000..b295705
--- /dev/null
+++ b/pjnath/src/pjnath/stun_msg.c
@@ -0,0 +1,2827 @@
+/* $Id: stun_msg.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/stun_msg.h>
+#include <pjnath/errno.h>
+#include <pjlib-util/crc32.h>
+#include <pjlib-util/hmac_sha1.h>
+#include <pj/assert.h>
+#include <pj/log.h>
+#include <pj/os.h>
+#include <pj/pool.h>
+#include <pj/rand.h>
+#include <pj/string.h>
+
+#define THIS_FILE "stun_msg.c"
+#define STUN_XOR_FINGERPRINT 0x5354554eL
+
+static int padding_char;
+
+static const char *stun_method_names[PJ_STUN_METHOD_MAX] =
+{
+ "Unknown", /* 0 */
+ "Binding", /* 1 */
+ "SharedSecret", /* 2 */
+ "Allocate", /* 3 */
+ "Refresh", /* 4 */
+ "???", /* 5 */
+ "Send", /* 6 */
+ "Data", /* 7 */
+ "CreatePermission", /* 8 */
+ "ChannelBind", /* 9 */
+};
+
+static struct
+{
+ int err_code;
+ const char *err_msg;
+} stun_err_msg_map[] =
+{
+ { PJ_STUN_SC_TRY_ALTERNATE, "Try Alternate"},
+ { PJ_STUN_SC_BAD_REQUEST, "Bad Request"},
+ { PJ_STUN_SC_UNAUTHORIZED, "Unauthorized"},
+ { PJ_STUN_SC_FORBIDDEN, "Forbidden"},
+ { PJ_STUN_SC_UNKNOWN_ATTRIBUTE, "Unknown Attribute"},
+ //{ PJ_STUN_SC_STALE_CREDENTIALS, "Stale Credentials"},
+ //{ PJ_STUN_SC_INTEGRITY_CHECK_FAILURE, "Integrity Check Failure"},
+ //{ PJ_STUN_SC_MISSING_USERNAME, "Missing Username"},
+ //{ PJ_STUN_SC_USE_TLS, "Use TLS"},
+ //{ PJ_STUN_SC_MISSING_REALM, "Missing Realm"},
+ //{ PJ_STUN_SC_MISSING_NONCE, "Missing Nonce"},
+ //{ PJ_STUN_SC_UNKNOWN_USERNAME, "Unknown Username"},
+ { PJ_STUN_SC_ALLOCATION_MISMATCH, "Allocation Mismatch"},
+ { PJ_STUN_SC_STALE_NONCE, "Stale Nonce"},
+ { PJ_STUN_SC_TRANSITIONING, "Active Destination Already Set"},
+ { PJ_STUN_SC_WRONG_CREDENTIALS, "Wrong Credentials"},
+ { PJ_STUN_SC_UNSUPP_TRANSPORT_PROTO, "Unsupported Transport Protocol"},
+ { PJ_STUN_SC_OPER_TCP_ONLY, "Operation for TCP Only"},
+ { PJ_STUN_SC_CONNECTION_FAILURE, "Connection Failure"},
+ { PJ_STUN_SC_CONNECTION_TIMEOUT, "Connection Timeout"},
+ { PJ_STUN_SC_ALLOCATION_QUOTA_REACHED, "Allocation Quota Reached"},
+ { PJ_STUN_SC_ROLE_CONFLICT, "Role Conflict"},
+ { PJ_STUN_SC_SERVER_ERROR, "Server Error"},
+ { PJ_STUN_SC_INSUFFICIENT_CAPACITY, "Insufficient Capacity"},
+ { PJ_STUN_SC_GLOBAL_FAILURE, "Global Failure"}
+};
+
+
+
+struct attr_desc
+{
+ const char *name;
+ pj_status_t (*decode_attr)(pj_pool_t *pool, const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr, void **p_attr);
+ pj_status_t (*encode_attr)(const void *a, pj_uint8_t *buf,
+ unsigned len, const pj_stun_msg_hdr *msghdr,
+ unsigned *printed);
+ void* (*clone_attr)(pj_pool_t *pool, const void *src);
+};
+
+static pj_status_t decode_sockaddr_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr);
+static pj_status_t decode_xored_sockaddr_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr);
+static pj_status_t encode_sockaddr_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed);
+static void* clone_sockaddr_attr(pj_pool_t *pool, const void *src);
+static pj_status_t decode_string_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr);
+static pj_status_t encode_string_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed);
+static void* clone_string_attr(pj_pool_t *pool, const void *src);
+static pj_status_t decode_msgint_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr);
+static pj_status_t encode_msgint_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed);
+static void* clone_msgint_attr(pj_pool_t *pool, const void *src);
+static pj_status_t decode_errcode_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr);
+static pj_status_t encode_errcode_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed);
+static void* clone_errcode_attr(pj_pool_t *pool, const void *src);
+static pj_status_t decode_unknown_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr);
+static pj_status_t encode_unknown_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed);
+static void* clone_unknown_attr(pj_pool_t *pool, const void *src);
+static pj_status_t decode_uint_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr);
+static pj_status_t encode_uint_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed);
+static void* clone_uint_attr(pj_pool_t *pool, const void *src);
+static pj_status_t decode_uint64_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr);
+static pj_status_t encode_uint64_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed);
+static void* clone_uint64_attr(pj_pool_t *pool, const void *src);
+static pj_status_t decode_binary_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr);
+static pj_status_t encode_binary_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed);
+static void* clone_binary_attr(pj_pool_t *pool, const void *src);
+static pj_status_t decode_empty_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr);
+static pj_status_t encode_empty_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed);
+static void* clone_empty_attr(pj_pool_t *pool, const void *src);
+
+static struct attr_desc mandatory_attr_desc[] =
+{
+ {
+ /* type zero */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* PJ_STUN_ATTR_MAPPED_ADDR, */
+ "MAPPED-ADDRESS",
+ &decode_sockaddr_attr,
+ &encode_sockaddr_attr,
+ &clone_sockaddr_attr
+ },
+ {
+ /* PJ_STUN_ATTR_RESPONSE_ADDR, */
+ "RESPONSE-ADDRESS",
+ &decode_sockaddr_attr,
+ &encode_sockaddr_attr,
+ &clone_sockaddr_attr
+ },
+ {
+ /* PJ_STUN_ATTR_CHANGE_REQUEST, */
+ "CHANGE-REQUEST",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* PJ_STUN_ATTR_SOURCE_ADDR, */
+ "SOURCE-ADDRESS",
+ &decode_sockaddr_attr,
+ &encode_sockaddr_attr,
+ &clone_sockaddr_attr
+ },
+ {
+ /* PJ_STUN_ATTR_CHANGED_ADDR, */
+ "CHANGED-ADDRESS",
+ &decode_sockaddr_attr,
+ &encode_sockaddr_attr,
+ &clone_sockaddr_attr
+ },
+ {
+ /* PJ_STUN_ATTR_USERNAME, */
+ "USERNAME",
+ &decode_string_attr,
+ &encode_string_attr,
+ &clone_string_attr
+ },
+ {
+ /* PJ_STUN_ATTR_PASSWORD, */
+ "PASSWORD",
+ &decode_string_attr,
+ &encode_string_attr,
+ &clone_string_attr
+ },
+ {
+ /* PJ_STUN_ATTR_MESSAGE_INTEGRITY, */
+ "MESSAGE-INTEGRITY",
+ &decode_msgint_attr,
+ &encode_msgint_attr,
+ &clone_msgint_attr
+ },
+ {
+ /* PJ_STUN_ATTR_ERROR_CODE, */
+ "ERROR-CODE",
+ &decode_errcode_attr,
+ &encode_errcode_attr,
+ &clone_errcode_attr
+ },
+ {
+ /* PJ_STUN_ATTR_UNKNOWN_ATTRIBUTES, */
+ "UNKNOWN-ATTRIBUTES",
+ &decode_unknown_attr,
+ &encode_unknown_attr,
+ &clone_unknown_attr
+ },
+ {
+ /* PJ_STUN_ATTR_REFLECTED_FROM, */
+ "REFLECTED-FROM",
+ &decode_sockaddr_attr,
+ &encode_sockaddr_attr,
+ &clone_sockaddr_attr
+ },
+ {
+ /* PJ_STUN_ATTR_CHANNEL_NUMBER (0x000C) */
+ "CHANNEL-NUMBER",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* PJ_STUN_ATTR_LIFETIME, */
+ "LIFETIME",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* ID 0x000E is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* PJ_STUN_ATTR_MAGIC_COOKIE */
+ "MAGIC-COOKIE",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* PJ_STUN_ATTR_BANDWIDTH, */
+ "BANDWIDTH",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* ID 0x0011 is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* PJ_STUN_ATTR_XOR_PEER_ADDRESS, */
+ "XOR-PEER-ADDRESS",
+ &decode_xored_sockaddr_attr,
+ &encode_sockaddr_attr,
+ &clone_sockaddr_attr
+ },
+ {
+ /* PJ_STUN_ATTR_DATA, */
+ "DATA",
+ &decode_binary_attr,
+ &encode_binary_attr,
+ &clone_binary_attr
+ },
+ {
+ /* PJ_STUN_ATTR_REALM, */
+ "REALM",
+ &decode_string_attr,
+ &encode_string_attr,
+ &clone_string_attr
+ },
+ {
+ /* PJ_STUN_ATTR_NONCE, */
+ "NONCE",
+ &decode_string_attr,
+ &encode_string_attr,
+ &clone_string_attr
+ },
+ {
+ /* PJ_STUN_ATTR_XOR_RELAYED_ADDR, */
+ "XOR-RELAYED-ADDRESS",
+ &decode_xored_sockaddr_attr,
+ &encode_sockaddr_attr,
+ &clone_sockaddr_attr
+ },
+ {
+ /* PJ_STUN_ATTR_REQUESTED_ADDR_TYPE, */
+ "REQUESTED-ADDRESS-TYPE",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* PJ_STUN_ATTR_EVEN_PORT, */
+ "EVEN-PORT",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* PJ_STUN_ATTR_REQUESTED_TRANSPORT, */
+ "REQUESTED-TRANSPORT",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* PJ_STUN_ATTR_DONT_FRAGMENT */
+ "DONT-FRAGMENT",
+ &decode_empty_attr,
+ &encode_empty_attr,
+ &clone_empty_attr
+ },
+ {
+ /* ID 0x001B is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x001C is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x001D is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x001E is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x001F is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* PJ_STUN_ATTR_XOR_MAPPED_ADDRESS, */
+ "XOR-MAPPED-ADDRESS",
+ &decode_xored_sockaddr_attr,
+ &encode_sockaddr_attr,
+ &clone_sockaddr_attr
+ },
+ {
+ /* PJ_STUN_ATTR_TIMER_VAL, */
+ "TIMER-VAL",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* PJ_STUN_ATTR_RESERVATION_TOKEN, */
+ "RESERVATION-TOKEN",
+ &decode_uint64_attr,
+ &encode_uint64_attr,
+ &clone_uint64_attr
+ },
+ {
+ /* PJ_STUN_ATTR_XOR_REFLECTED_FROM, */
+ "XOR-REFLECTED-FROM",
+ &decode_xored_sockaddr_attr,
+ &encode_sockaddr_attr,
+ &clone_sockaddr_attr
+ },
+ {
+ /* PJ_STUN_ATTR_PRIORITY, */
+ "PRIORITY",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* PJ_STUN_ATTR_USE_CANDIDATE, */
+ "USE-CANDIDATE",
+ &decode_empty_attr,
+ &encode_empty_attr,
+ &clone_empty_attr
+ },
+ {
+ /* ID 0x0026 is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x0027 is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x0028 is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x0029 is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x002a is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x002b is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x002c is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x002d is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x002e is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* ID 0x002f is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* PJ_STUN_ATTR_ICMP, */
+ "ICMP",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+
+ /* Sentinel */
+ {
+ /* PJ_STUN_ATTR_END_MANDATORY_ATTR */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ }
+};
+
+static struct attr_desc extended_attr_desc[] =
+{
+ {
+ /* ID 0x8021 is not assigned */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* PJ_STUN_ATTR_SOFTWARE, */
+ "SOFTWARE",
+ &decode_string_attr,
+ &encode_string_attr,
+ &clone_string_attr
+ },
+ {
+ /* PJ_STUN_ATTR_ALTERNATE_SERVER, */
+ "ALTERNATE-SERVER",
+ &decode_sockaddr_attr,
+ &encode_sockaddr_attr,
+ &clone_sockaddr_attr
+ },
+ {
+ /* PJ_STUN_ATTR_REFRESH_INTERVAL, */
+ "REFRESH-INTERVAL",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* ID 0x8025 is not assigned*/
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* PADDING, 0x8026 */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* CACHE-TIMEOUT, 0x8027 */
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ },
+ {
+ /* PJ_STUN_ATTR_FINGERPRINT, */
+ "FINGERPRINT",
+ &decode_uint_attr,
+ &encode_uint_attr,
+ &clone_uint_attr
+ },
+ {
+ /* PJ_STUN_ATTR_ICE_CONTROLLED, */
+ "ICE-CONTROLLED",
+ &decode_uint64_attr,
+ &encode_uint64_attr,
+ &clone_uint64_attr
+ },
+ {
+ /* PJ_STUN_ATTR_ICE_CONTROLLING, */
+ "ICE-CONTROLLING",
+ &decode_uint64_attr,
+ &encode_uint64_attr,
+ &clone_uint64_attr
+ }
+};
+
+
+
+/*
+ * Get STUN message type name.
+ */
+PJ_DEF(const char*) pj_stun_get_method_name(unsigned msg_type)
+{
+ unsigned method = PJ_STUN_GET_METHOD(msg_type);
+
+ if (method >= PJ_ARRAY_SIZE(stun_method_names))
+ return "???";
+
+ return stun_method_names[method];
+}
+
+
+/*
+ * Get STUN message class name.
+ */
+PJ_DEF(const char*) pj_stun_get_class_name(unsigned msg_type)
+{
+ if (PJ_STUN_IS_REQUEST(msg_type))
+ return "request";
+ else if (PJ_STUN_IS_SUCCESS_RESPONSE(msg_type))
+ return "success response";
+ else if (PJ_STUN_IS_ERROR_RESPONSE(msg_type))
+ return "error response";
+ else if (PJ_STUN_IS_INDICATION(msg_type))
+ return "indication";
+ else
+ return "???";
+}
+
+
+static const struct attr_desc *find_attr_desc(unsigned attr_type)
+{
+ struct attr_desc *desc;
+
+ /* Check that attr_desc array is valid */
+ pj_assert(PJ_ARRAY_SIZE(mandatory_attr_desc)==
+ PJ_STUN_ATTR_END_MANDATORY_ATTR+1);
+ pj_assert(mandatory_attr_desc[PJ_STUN_ATTR_END_MANDATORY_ATTR].decode_attr
+ == NULL);
+ pj_assert(mandatory_attr_desc[PJ_STUN_ATTR_USE_CANDIDATE].decode_attr
+ == &decode_empty_attr);
+ pj_assert(PJ_ARRAY_SIZE(extended_attr_desc) ==
+ PJ_STUN_ATTR_END_EXTENDED_ATTR-PJ_STUN_ATTR_START_EXTENDED_ATTR);
+
+ if (attr_type < PJ_STUN_ATTR_END_MANDATORY_ATTR)
+ desc = &mandatory_attr_desc[attr_type];
+ else if (attr_type >= PJ_STUN_ATTR_START_EXTENDED_ATTR &&
+ attr_type < PJ_STUN_ATTR_END_EXTENDED_ATTR)
+ desc = &extended_attr_desc[attr_type-PJ_STUN_ATTR_START_EXTENDED_ATTR];
+ else
+ return NULL;
+
+ return desc->decode_attr == NULL ? NULL : desc;
+}
+
+
+/*
+ * Get STUN attribute name.
+ */
+PJ_DEF(const char*) pj_stun_get_attr_name(unsigned attr_type)
+{
+ const struct attr_desc *attr_desc;
+
+ attr_desc = find_attr_desc(attr_type);
+ if (!attr_desc || attr_desc->name==NULL)
+ return "???";
+
+ return attr_desc->name;
+}
+
+
+/**
+ * Get STUN standard reason phrase for the specified error code.
+ */
+PJ_DEF(pj_str_t) pj_stun_get_err_reason(int err_code)
+{
+#if 0
+ /* Find error using linear search */
+ unsigned i;
+
+ for (i=0; i<PJ_ARRAY_SIZE(stun_err_msg_map); ++i) {
+ if (stun_err_msg_map[i].err_code == err_code)
+ return pj_str((char*)stun_err_msg_map[i].err_msg);
+ }
+ return pj_str(NULL);
+#else
+ /* Find error message using binary search */
+ int first = 0;
+ int n = PJ_ARRAY_SIZE(stun_err_msg_map);
+
+ while (n > 0) {
+ int half = n/2;
+ int mid = first + half;
+
+ if (stun_err_msg_map[mid].err_code < err_code) {
+ first = mid+1;
+ n -= (half+1);
+ } else if (stun_err_msg_map[mid].err_code > err_code) {
+ n = half;
+ } else {
+ first = mid;
+ break;
+ }
+ }
+
+
+ if (stun_err_msg_map[first].err_code == err_code) {
+ return pj_str((char*)stun_err_msg_map[first].err_msg);
+ } else {
+ return pj_str(NULL);
+ }
+#endif
+}
+
+
+/*
+ * Set padding character.
+ */
+PJ_DEF(int) pj_stun_set_padding_char(int chr)
+{
+ int old_pad = padding_char;
+ padding_char = chr;
+ return old_pad;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+
+
+#define INIT_ATTR(a,t,l) (a)->hdr.type=(pj_uint16_t)(t), \
+ (a)->hdr.length=(pj_uint16_t)(l)
+#define ATTR_HDR_LEN 4
+
+static pj_uint16_t GETVAL16H(const pj_uint8_t *buf, unsigned pos)
+{
+ return (pj_uint16_t) ((buf[pos + 0] << 8) | \
+ (buf[pos + 1] << 0));
+}
+
+PJ_INLINE(pj_uint16_t) GETVAL16N(const pj_uint8_t *buf, unsigned pos)
+{
+ return pj_htons(GETVAL16H(buf,pos));
+}
+
+static void PUTVAL16H(pj_uint8_t *buf, unsigned pos, pj_uint16_t hval)
+{
+ buf[pos+0] = (pj_uint8_t) ((hval & 0xFF00) >> 8);
+ buf[pos+1] = (pj_uint8_t) ((hval & 0x00FF) >> 0);
+}
+
+PJ_INLINE(pj_uint32_t) GETVAL32H(const pj_uint8_t *buf, unsigned pos)
+{
+ return (pj_uint32_t) ((buf[pos + 0] << 24UL) | \
+ (buf[pos + 1] << 16UL) | \
+ (buf[pos + 2] << 8UL) | \
+ (buf[pos + 3] << 0UL));
+}
+
+PJ_INLINE(pj_uint32_t) GETVAL32N(const pj_uint8_t *buf, unsigned pos)
+{
+ return pj_htonl(GETVAL32H(buf,pos));
+}
+
+static void PUTVAL32H(pj_uint8_t *buf, unsigned pos, pj_uint32_t hval)
+{
+ buf[pos+0] = (pj_uint8_t) ((hval & 0xFF000000UL) >> 24);
+ buf[pos+1] = (pj_uint8_t) ((hval & 0x00FF0000UL) >> 16);
+ buf[pos+2] = (pj_uint8_t) ((hval & 0x0000FF00UL) >> 8);
+ buf[pos+3] = (pj_uint8_t) ((hval & 0x000000FFUL) >> 0);
+}
+
+static void GETVAL64H(const pj_uint8_t *buf, unsigned pos, pj_timestamp *ts)
+{
+ ts->u32.hi = GETVAL32H(buf, pos);
+ ts->u32.lo = GETVAL32H(buf, pos+4);
+}
+
+static void PUTVAL64H(pj_uint8_t *buf, unsigned pos, const pj_timestamp *ts)
+{
+ PUTVAL32H(buf, pos, ts->u32.hi);
+ PUTVAL32H(buf, pos+4, ts->u32.lo);
+}
+
+
+static void GETATTRHDR(const pj_uint8_t *buf, pj_stun_attr_hdr *hdr)
+{
+ hdr->type = GETVAL16H(buf, 0);
+ hdr->length = GETVAL16H(buf, 2);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+/*
+ * STUN generic IP address container
+ */
+#define STUN_GENERIC_IPV4_ADDR_LEN 8
+#define STUN_GENERIC_IPV6_ADDR_LEN 20
+
+/*
+ * Init sockaddr attr
+ */
+PJ_DEF(pj_status_t) pj_stun_sockaddr_attr_init( pj_stun_sockaddr_attr *attr,
+ int attr_type,
+ pj_bool_t xor_ed,
+ const pj_sockaddr_t *addr,
+ unsigned addr_len)
+{
+ unsigned attr_len;
+
+ PJ_ASSERT_RETURN(attr && addr_len && addr, PJ_EINVAL);
+ PJ_ASSERT_RETURN(addr_len == sizeof(pj_sockaddr_in) ||
+ addr_len == sizeof(pj_sockaddr_in6), PJ_EINVAL);
+
+ attr_len = pj_sockaddr_get_addr_len(addr) + 4;
+ INIT_ATTR(attr, attr_type, attr_len);
+
+ pj_memcpy(&attr->sockaddr, addr, addr_len);
+ attr->xor_ed = xor_ed;
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Create a generic STUN IP address attribute for IPv4 address.
+ */
+PJ_DEF(pj_status_t) pj_stun_sockaddr_attr_create(pj_pool_t *pool,
+ int attr_type,
+ pj_bool_t xor_ed,
+ const pj_sockaddr_t *addr,
+ unsigned addr_len,
+ pj_stun_sockaddr_attr **p_attr)
+{
+ pj_stun_sockaddr_attr *attr;
+
+ PJ_ASSERT_RETURN(pool && p_attr, PJ_EINVAL);
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_sockaddr_attr);
+ *p_attr = attr;
+ return pj_stun_sockaddr_attr_init(attr, attr_type, xor_ed,
+ addr, addr_len);
+}
+
+
+/*
+ * Create and add generic STUN IP address attribute to a STUN message.
+ */
+PJ_DEF(pj_status_t) pj_stun_msg_add_sockaddr_attr(pj_pool_t *pool,
+ pj_stun_msg *msg,
+ int attr_type,
+ pj_bool_t xor_ed,
+ const pj_sockaddr_t *addr,
+ unsigned addr_len)
+{
+ pj_stun_sockaddr_attr *attr;
+ pj_status_t status;
+
+ status = pj_stun_sockaddr_attr_create(pool, attr_type, xor_ed,
+ addr, addr_len, &attr);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ return pj_stun_msg_add_attr(msg, &attr->hdr);
+}
+
+static pj_status_t decode_sockaddr_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr)
+{
+ pj_stun_sockaddr_attr *attr;
+ int af;
+ unsigned addr_len;
+ pj_uint32_t val;
+
+ PJ_CHECK_STACK();
+
+ PJ_UNUSED_ARG(msghdr);
+
+ /* Create the attribute */
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_sockaddr_attr);
+ GETATTRHDR(buf, &attr->hdr);
+
+ /* Check that the attribute length is valid */
+ if (attr->hdr.length != STUN_GENERIC_IPV4_ADDR_LEN &&
+ attr->hdr.length != STUN_GENERIC_IPV6_ADDR_LEN)
+ {
+ return PJNATH_ESTUNINATTRLEN;
+ }
+
+ /* Check address family */
+ val = *(pj_uint8_t*)(buf + ATTR_HDR_LEN + 1);
+
+ /* Check address family is valid */
+ if (val == 1) {
+ if (attr->hdr.length != STUN_GENERIC_IPV4_ADDR_LEN)
+ return PJNATH_ESTUNINATTRLEN;
+ af = pj_AF_INET();
+ addr_len = 4;
+ } else if (val == 2) {
+ if (attr->hdr.length != STUN_GENERIC_IPV6_ADDR_LEN)
+ return PJNATH_ESTUNINATTRLEN;
+ af = pj_AF_INET6();
+ addr_len = 16;
+ } else {
+ /* Invalid address family */
+ return PJNATH_EINVAF;
+ }
+
+ /* Get port and address */
+ pj_sockaddr_init(af, &attr->sockaddr, NULL, 0);
+ pj_sockaddr_set_port(&attr->sockaddr,
+ GETVAL16H(buf, ATTR_HDR_LEN+2));
+ pj_memcpy(pj_sockaddr_get_addr(&attr->sockaddr),
+ buf+ATTR_HDR_LEN+4,
+ addr_len);
+
+ /* Done */
+ *p_attr = (void*)attr;
+
+ return PJ_SUCCESS;
+}
+
+
+static pj_status_t decode_xored_sockaddr_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr)
+{
+ pj_stun_sockaddr_attr *attr;
+ pj_status_t status;
+
+ status = decode_sockaddr_attr(pool, buf, msghdr, p_attr);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ attr = *(pj_stun_sockaddr_attr**)p_attr;
+
+ attr->xor_ed = PJ_TRUE;
+
+ if (attr->sockaddr.addr.sa_family == pj_AF_INET()) {
+ attr->sockaddr.ipv4.sin_port ^= pj_htons(PJ_STUN_MAGIC >> 16);
+ attr->sockaddr.ipv4.sin_addr.s_addr ^= pj_htonl(PJ_STUN_MAGIC);
+ } else if (attr->sockaddr.addr.sa_family == pj_AF_INET6()) {
+ unsigned i;
+ pj_uint8_t *dst = (pj_uint8_t*) &attr->sockaddr.ipv6.sin6_addr;
+ pj_uint32_t magic = pj_htonl(PJ_STUN_MAGIC);
+
+ attr->sockaddr.ipv6.sin6_port ^= pj_htons(PJ_STUN_MAGIC >> 16);
+
+ /* If the IP address family is IPv6, X-Address is computed by
+ * taking the mapped IP address in host byte order, XOR'ing it
+ * with the concatenation of the magic cookie and the 96-bit
+ * transaction ID, and converting the result to network byte
+ * order.
+ */
+ for (i=0; i<4; ++i) {
+ dst[i] ^= ((const pj_uint8_t*)&magic)[i];
+ }
+ pj_assert(sizeof(msghdr->tsx_id[0]) == 1);
+ for (i=0; i<12; ++i) {
+ dst[i+4] ^= msghdr->tsx_id[i];
+ }
+
+ } else {
+ return PJNATH_EINVAF;
+ }
+
+ /* Done */
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+
+static pj_status_t encode_sockaddr_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed)
+{
+ pj_uint8_t *start_buf = buf;
+ const pj_stun_sockaddr_attr *ca =
+ (const pj_stun_sockaddr_attr *)a;
+
+ PJ_CHECK_STACK();
+
+ /* Common: attribute type */
+ PUTVAL16H(buf, 0, ca->hdr.type);
+
+ if (ca->sockaddr.addr.sa_family == pj_AF_INET()) {
+ enum {
+ ATTR_LEN = ATTR_HDR_LEN + STUN_GENERIC_IPV4_ADDR_LEN
+ };
+
+ if (len < ATTR_LEN)
+ return PJ_ETOOSMALL;
+
+ /* attribute len */
+ PUTVAL16H(buf, 2, STUN_GENERIC_IPV4_ADDR_LEN);
+ buf += ATTR_HDR_LEN;
+
+ /* Ignored */
+ *buf++ = '\0';
+
+ /* Address family, 1 for IPv4 */
+ *buf++ = 1;
+
+ /* IPv4 address */
+ if (ca->xor_ed) {
+ pj_uint32_t addr;
+ pj_uint16_t port;
+
+ addr = ca->sockaddr.ipv4.sin_addr.s_addr;
+ port = ca->sockaddr.ipv4.sin_port;
+
+ port ^= pj_htons(PJ_STUN_MAGIC >> 16);
+ addr ^= pj_htonl(PJ_STUN_MAGIC);
+
+ /* Port */
+ pj_memcpy(buf, &port, 2);
+ buf += 2;
+
+ /* Address */
+ pj_memcpy(buf, &addr, 4);
+ buf += 4;
+
+ } else {
+ /* Port */
+ pj_memcpy(buf, &ca->sockaddr.ipv4.sin_port, 2);
+ buf += 2;
+
+ /* Address */
+ pj_memcpy(buf, &ca->sockaddr.ipv4.sin_addr, 4);
+ buf += 4;
+ }
+
+ pj_assert(buf - start_buf == ATTR_LEN);
+
+ } else if (ca->sockaddr.addr.sa_family == pj_AF_INET6()) {
+ /* IPv6 address */
+ enum {
+ ATTR_LEN = ATTR_HDR_LEN + STUN_GENERIC_IPV6_ADDR_LEN
+ };
+
+ if (len < ATTR_LEN)
+ return PJ_ETOOSMALL;
+
+ /* attribute len */
+ PUTVAL16H(buf, 2, STUN_GENERIC_IPV6_ADDR_LEN);
+ buf += ATTR_HDR_LEN;
+
+ /* Ignored */
+ *buf++ = '\0';
+
+ /* Address family, 2 for IPv6 */
+ *buf++ = 2;
+
+ /* IPv6 address */
+ if (ca->xor_ed) {
+ unsigned i;
+ pj_uint8_t *dst;
+ const pj_uint8_t *src;
+ pj_uint32_t magic = pj_htonl(PJ_STUN_MAGIC);
+ pj_uint16_t port = ca->sockaddr.ipv6.sin6_port;
+
+ /* Port */
+ port ^= pj_htons(PJ_STUN_MAGIC >> 16);
+ pj_memcpy(buf, &port, 2);
+ buf += 2;
+
+ /* Address */
+ dst = buf;
+ src = (const pj_uint8_t*) &ca->sockaddr.ipv6.sin6_addr;
+ for (i=0; i<4; ++i) {
+ dst[i] = (pj_uint8_t)(src[i] ^ ((const pj_uint8_t*)&magic)[i]);
+ }
+ pj_assert(sizeof(msghdr->tsx_id[0]) == 1);
+ for (i=0; i<12; ++i) {
+ dst[i+4] = (pj_uint8_t)(src[i+4] ^ msghdr->tsx_id[i]);
+ }
+
+ buf += 16;
+
+ } else {
+ /* Port */
+ pj_memcpy(buf, &ca->sockaddr.ipv6.sin6_port, 2);
+ buf += 2;
+
+ /* Address */
+ pj_memcpy(buf, &ca->sockaddr.ipv6.sin6_addr, 16);
+ buf += 16;
+ }
+
+ pj_assert(buf - start_buf == ATTR_LEN);
+
+ } else {
+ return PJNATH_EINVAF;
+ }
+
+ /* Done */
+ *printed = buf - start_buf;
+
+ return PJ_SUCCESS;
+}
+
+
+static void* clone_sockaddr_attr(pj_pool_t *pool, const void *src)
+{
+ pj_stun_sockaddr_attr *dst = PJ_POOL_ALLOC_T(pool, pj_stun_sockaddr_attr);
+ pj_memcpy(dst, src, sizeof(pj_stun_sockaddr_attr));
+ return (void*)dst;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+/*
+ * STUN generic string attribute
+ */
+
+/*
+ * Initialize a STUN generic string attribute.
+ */
+PJ_DEF(pj_status_t) pj_stun_string_attr_init( pj_stun_string_attr *attr,
+ pj_pool_t *pool,
+ int attr_type,
+ const pj_str_t *value)
+{
+ INIT_ATTR(attr, attr_type, value->slen);
+ if (value && value->slen)
+ pj_strdup(pool, &attr->value, value);
+ else
+ attr->value.slen = 0;
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Create a STUN generic string attribute.
+ */
+PJ_DEF(pj_status_t) pj_stun_string_attr_create(pj_pool_t *pool,
+ int attr_type,
+ const pj_str_t *value,
+ pj_stun_string_attr **p_attr)
+{
+ pj_stun_string_attr *attr;
+
+ PJ_ASSERT_RETURN(pool && value && p_attr, PJ_EINVAL);
+
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_string_attr);
+ *p_attr = attr;
+
+ return pj_stun_string_attr_init(attr, pool, attr_type, value);
+}
+
+
+/*
+ * Create and add STUN generic string attribute to the message.
+ */
+PJ_DEF(pj_status_t) pj_stun_msg_add_string_attr(pj_pool_t *pool,
+ pj_stun_msg *msg,
+ int attr_type,
+ const pj_str_t *value)
+{
+ pj_stun_string_attr *attr = NULL;
+ pj_status_t status;
+
+ status = pj_stun_string_attr_create(pool, attr_type, value,
+ &attr);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ return pj_stun_msg_add_attr(msg, &attr->hdr);
+}
+
+
+static pj_status_t decode_string_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr)
+{
+ pj_stun_string_attr *attr;
+ pj_str_t value;
+
+ PJ_UNUSED_ARG(msghdr);
+
+ /* Create the attribute */
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_string_attr);
+ GETATTRHDR(buf, &attr->hdr);
+
+ /* Get pointer to the string in the message */
+ value.ptr = ((char*)buf + ATTR_HDR_LEN);
+ value.slen = attr->hdr.length;
+
+ /* Copy the string to the attribute */
+ pj_strdup(pool, &attr->value, &value);
+
+ /* Done */
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+
+}
+
+
+static pj_status_t encode_string_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed)
+{
+ const pj_stun_string_attr *ca =
+ (const pj_stun_string_attr*)a;
+
+ PJ_CHECK_STACK();
+
+ PJ_UNUSED_ARG(msghdr);
+
+ /* Calculated total attr_len (add padding if necessary) */
+ *printed = (ca->value.slen + ATTR_HDR_LEN + 3) & (~3);
+ if (len < *printed) {
+ *printed = 0;
+ return PJ_ETOOSMALL;
+ }
+
+ PUTVAL16H(buf, 0, ca->hdr.type);
+
+ /* Special treatment for SOFTWARE attribute:
+ * This attribute had caused interop problem when talking to
+ * legacy RFC 3489 STUN servers, due to different "length"
+ * rules with RFC 5389.
+ */
+ if (msghdr->magic != PJ_STUN_MAGIC ||
+ ca->hdr.type == PJ_STUN_ATTR_SOFTWARE)
+ {
+ /* Set the length to be 4-bytes aligned so that we can
+ * communicate with RFC 3489 endpoints
+ */
+ PUTVAL16H(buf, 2, (pj_uint16_t)((ca->value.slen + 3) & (~3)));
+ } else {
+ /* Use RFC 5389 rule */
+ PUTVAL16H(buf, 2, (pj_uint16_t)ca->value.slen);
+ }
+
+ /* Copy the string */
+ pj_memcpy(buf+ATTR_HDR_LEN, ca->value.ptr, ca->value.slen);
+
+ /* Add padding character, if string is not 4-bytes aligned. */
+ if (ca->value.slen & 0x03) {
+ pj_uint8_t pad[3];
+ pj_memset(pad, padding_char, sizeof(pad));
+ pj_memcpy(buf+ATTR_HDR_LEN+ca->value.slen, pad,
+ 4-(ca->value.slen & 0x03));
+ }
+
+ /* Done */
+ return PJ_SUCCESS;
+}
+
+
+static void* clone_string_attr(pj_pool_t *pool, const void *src)
+{
+ const pj_stun_string_attr *asrc = (const pj_stun_string_attr*)src;
+ pj_stun_string_attr *dst = PJ_POOL_ALLOC_T(pool, pj_stun_string_attr);
+
+ pj_memcpy(dst, src, sizeof(pj_stun_attr_hdr));
+ pj_strdup(pool, &dst->value, &asrc->value);
+
+ return (void*)dst;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+/*
+ * STUN empty attribute (used by USE-CANDIDATE).
+ */
+
+/*
+ * Create a STUN empty attribute.
+ */
+PJ_DEF(pj_status_t) pj_stun_empty_attr_create(pj_pool_t *pool,
+ int attr_type,
+ pj_stun_empty_attr **p_attr)
+{
+ pj_stun_empty_attr *attr;
+
+ PJ_ASSERT_RETURN(pool && p_attr, PJ_EINVAL);
+
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_empty_attr);
+ INIT_ATTR(attr, attr_type, 0);
+
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Create STUN empty attribute and add the attribute to the message.
+ */
+PJ_DEF(pj_status_t) pj_stun_msg_add_empty_attr( pj_pool_t *pool,
+ pj_stun_msg *msg,
+ int attr_type)
+{
+ pj_stun_empty_attr *attr = NULL;
+ pj_status_t status;
+
+ status = pj_stun_empty_attr_create(pool, attr_type, &attr);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ return pj_stun_msg_add_attr(msg, &attr->hdr);
+}
+
+static pj_status_t decode_empty_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr)
+{
+ pj_stun_empty_attr *attr;
+
+ PJ_UNUSED_ARG(msghdr);
+
+ /* Check that the struct address is valid */
+ pj_assert(sizeof(pj_stun_empty_attr) == ATTR_HDR_LEN);
+
+ /* Create the attribute */
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_empty_attr);
+ GETATTRHDR(buf, &attr->hdr);
+
+ /* Check that the attribute length is valid */
+ if (attr->hdr.length != 0)
+ return PJNATH_ESTUNINATTRLEN;
+
+ /* Done */
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+
+static pj_status_t encode_empty_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed)
+{
+ const pj_stun_empty_attr *ca = (pj_stun_empty_attr*)a;
+
+ PJ_UNUSED_ARG(msghdr);
+
+ if (len < ATTR_HDR_LEN)
+ return PJ_ETOOSMALL;
+
+ PUTVAL16H(buf, 0, ca->hdr.type);
+ PUTVAL16H(buf, 2, 0);
+
+ /* Done */
+ *printed = ATTR_HDR_LEN;
+
+ return PJ_SUCCESS;
+}
+
+
+static void* clone_empty_attr(pj_pool_t *pool, const void *src)
+{
+ pj_stun_empty_attr *dst = PJ_POOL_ALLOC_T(pool, pj_stun_empty_attr);
+
+ pj_memcpy(dst, src, sizeof(pj_stun_empty_attr));
+
+ return (void*) dst;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+/*
+ * STUN generic 32bit integer attribute.
+ */
+
+/*
+ * Create a STUN generic 32bit value attribute.
+ */
+PJ_DEF(pj_status_t) pj_stun_uint_attr_create(pj_pool_t *pool,
+ int attr_type,
+ pj_uint32_t value,
+ pj_stun_uint_attr **p_attr)
+{
+ pj_stun_uint_attr *attr;
+
+ PJ_ASSERT_RETURN(pool && p_attr, PJ_EINVAL);
+
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_uint_attr);
+ INIT_ATTR(attr, attr_type, 4);
+ attr->value = value;
+
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+/* Create and add STUN generic 32bit value attribute to the message. */
+PJ_DEF(pj_status_t) pj_stun_msg_add_uint_attr(pj_pool_t *pool,
+ pj_stun_msg *msg,
+ int attr_type,
+ pj_uint32_t value)
+{
+ pj_stun_uint_attr *attr = NULL;
+ pj_status_t status;
+
+ status = pj_stun_uint_attr_create(pool, attr_type, value, &attr);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ return pj_stun_msg_add_attr(msg, &attr->hdr);
+}
+
+static pj_status_t decode_uint_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr)
+{
+ pj_stun_uint_attr *attr;
+
+ PJ_UNUSED_ARG(msghdr);
+
+ /* Create the attribute */
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_uint_attr);
+ GETATTRHDR(buf, &attr->hdr);
+
+ attr->value = GETVAL32H(buf, 4);
+
+ /* Check that the attribute length is valid */
+ if (attr->hdr.length != 4)
+ return PJNATH_ESTUNINATTRLEN;
+
+ /* Done */
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+
+static pj_status_t encode_uint_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed)
+{
+ const pj_stun_uint_attr *ca = (const pj_stun_uint_attr*)a;
+
+ PJ_CHECK_STACK();
+
+ PJ_UNUSED_ARG(msghdr);
+
+ if (len < 8)
+ return PJ_ETOOSMALL;
+
+ PUTVAL16H(buf, 0, ca->hdr.type);
+ PUTVAL16H(buf, 2, (pj_uint16_t)4);
+ PUTVAL32H(buf, 4, ca->value);
+
+ /* Done */
+ *printed = 8;
+
+ return PJ_SUCCESS;
+}
+
+
+static void* clone_uint_attr(pj_pool_t *pool, const void *src)
+{
+ pj_stun_uint_attr *dst = PJ_POOL_ALLOC_T(pool, pj_stun_uint_attr);
+
+ pj_memcpy(dst, src, sizeof(pj_stun_uint_attr));
+
+ return (void*)dst;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+/*
+ * Create a STUN generic 64bit value attribute.
+ */
+PJ_DEF(pj_status_t) pj_stun_uint64_attr_create(pj_pool_t *pool,
+ int attr_type,
+ const pj_timestamp *value,
+ pj_stun_uint64_attr **p_attr)
+{
+ pj_stun_uint64_attr *attr;
+
+ PJ_ASSERT_RETURN(pool && p_attr, PJ_EINVAL);
+
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_uint64_attr);
+ INIT_ATTR(attr, attr_type, 8);
+
+ if (value) {
+ attr->value.u32.hi = value->u32.hi;
+ attr->value.u32.lo = value->u32.lo;
+ }
+
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+/* Create and add STUN generic 64bit value attribute to the message. */
+PJ_DEF(pj_status_t) pj_stun_msg_add_uint64_attr(pj_pool_t *pool,
+ pj_stun_msg *msg,
+ int attr_type,
+ const pj_timestamp *value)
+{
+ pj_stun_uint64_attr *attr = NULL;
+ pj_status_t status;
+
+ status = pj_stun_uint64_attr_create(pool, attr_type, value, &attr);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ return pj_stun_msg_add_attr(msg, &attr->hdr);
+}
+
+static pj_status_t decode_uint64_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr)
+{
+ pj_stun_uint64_attr *attr;
+
+ PJ_UNUSED_ARG(msghdr);
+
+ /* Create the attribute */
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_uint64_attr);
+ GETATTRHDR(buf, &attr->hdr);
+
+ if (attr->hdr.length != 8)
+ return PJNATH_ESTUNINATTRLEN;
+
+ GETVAL64H(buf, 4, &attr->value);
+
+ /* Done */
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+
+static pj_status_t encode_uint64_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed)
+{
+ const pj_stun_uint64_attr *ca = (const pj_stun_uint64_attr*)a;
+
+ PJ_CHECK_STACK();
+
+ PJ_UNUSED_ARG(msghdr);
+
+ if (len < 12)
+ return PJ_ETOOSMALL;
+
+ PUTVAL16H(buf, 0, ca->hdr.type);
+ PUTVAL16H(buf, 2, (pj_uint16_t)8);
+ PUTVAL64H(buf, 4, &ca->value);
+
+ /* Done */
+ *printed = 12;
+
+ return PJ_SUCCESS;
+}
+
+
+static void* clone_uint64_attr(pj_pool_t *pool, const void *src)
+{
+ pj_stun_uint64_attr *dst = PJ_POOL_ALLOC_T(pool, pj_stun_uint64_attr);
+
+ pj_memcpy(dst, src, sizeof(pj_stun_uint64_attr));
+
+ return (void*)dst;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+/*
+ * STUN MESSAGE-INTEGRITY attribute.
+ */
+
+/*
+ * Create a STUN MESSAGE-INTEGRITY attribute.
+ */
+PJ_DEF(pj_status_t) pj_stun_msgint_attr_create(pj_pool_t *pool,
+ pj_stun_msgint_attr **p_attr)
+{
+ pj_stun_msgint_attr *attr;
+
+ PJ_ASSERT_RETURN(pool && p_attr, PJ_EINVAL);
+
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_msgint_attr);
+ INIT_ATTR(attr, PJ_STUN_ATTR_MESSAGE_INTEGRITY, 20);
+
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+
+PJ_DEF(pj_status_t) pj_stun_msg_add_msgint_attr(pj_pool_t *pool,
+ pj_stun_msg *msg)
+{
+ pj_stun_msgint_attr *attr = NULL;
+ pj_status_t status;
+
+ status = pj_stun_msgint_attr_create(pool, &attr);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ return pj_stun_msg_add_attr(msg, &attr->hdr);
+}
+
+static pj_status_t decode_msgint_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr)
+{
+ pj_stun_msgint_attr *attr;
+
+ PJ_UNUSED_ARG(msghdr);
+
+ /* Create attribute */
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_msgint_attr);
+ GETATTRHDR(buf, &attr->hdr);
+
+ /* Check that the attribute length is valid */
+ if (attr->hdr.length != 20)
+ return PJNATH_ESTUNINATTRLEN;
+
+ /* Copy hmac */
+ pj_memcpy(attr->hmac, buf+4, 20);
+
+ /* Done */
+ *p_attr = attr;
+ return PJ_SUCCESS;
+}
+
+
+static pj_status_t encode_msgint_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed)
+{
+ const pj_stun_msgint_attr *ca = (const pj_stun_msgint_attr*)a;
+
+ PJ_CHECK_STACK();
+
+ PJ_UNUSED_ARG(msghdr);
+
+ if (len < 24)
+ return PJ_ETOOSMALL;
+
+ /* Copy and convert attribute to network byte order */
+ PUTVAL16H(buf, 0, ca->hdr.type);
+ PUTVAL16H(buf, 2, ca->hdr.length);
+
+ pj_memcpy(buf+4, ca->hmac, 20);
+
+ /* Done */
+ *printed = 24;
+
+ return PJ_SUCCESS;
+}
+
+
+static void* clone_msgint_attr(pj_pool_t *pool, const void *src)
+{
+ pj_stun_msgint_attr *dst = PJ_POOL_ALLOC_T(pool, pj_stun_msgint_attr);
+
+ pj_memcpy(dst, src, sizeof(pj_stun_msgint_attr));
+
+ return (void*) dst;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+/*
+ * STUN ERROR-CODE
+ */
+
+/*
+ * Create a STUN ERROR-CODE attribute.
+ */
+PJ_DEF(pj_status_t) pj_stun_errcode_attr_create(pj_pool_t *pool,
+ int err_code,
+ const pj_str_t *err_reason,
+ pj_stun_errcode_attr **p_attr)
+{
+ pj_stun_errcode_attr *attr;
+ char err_buf[80];
+ pj_str_t str;
+
+ PJ_ASSERT_RETURN(pool && err_code && p_attr, PJ_EINVAL);
+
+ if (err_reason == NULL) {
+ str = pj_stun_get_err_reason(err_code);
+ if (str.slen == 0) {
+ str.slen = pj_ansi_snprintf(err_buf, sizeof(err_buf),
+ "Unknown error %d", err_code);
+ str.ptr = err_buf;
+ }
+ err_reason = &str;
+ }
+
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_errcode_attr);
+ INIT_ATTR(attr, PJ_STUN_ATTR_ERROR_CODE, 4+err_reason->slen);
+ attr->err_code = err_code;
+ pj_strdup(pool, &attr->reason, err_reason);
+
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+
+PJ_DEF(pj_status_t) pj_stun_msg_add_errcode_attr(pj_pool_t *pool,
+ pj_stun_msg *msg,
+ int err_code,
+ const pj_str_t *err_reason)
+{
+ pj_stun_errcode_attr *err_attr = NULL;
+ pj_status_t status;
+
+ status = pj_stun_errcode_attr_create(pool, err_code, err_reason,
+ &err_attr);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ return pj_stun_msg_add_attr(msg, &err_attr->hdr);
+}
+
+static pj_status_t decode_errcode_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr)
+{
+ pj_stun_errcode_attr *attr;
+ pj_str_t value;
+
+ PJ_UNUSED_ARG(msghdr);
+
+ /* Create the attribute */
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_errcode_attr);
+ GETATTRHDR(buf, &attr->hdr);
+
+ attr->err_code = buf[6] * 100 + buf[7];
+
+ /* Get pointer to the string in the message */
+ value.ptr = ((char*)buf + ATTR_HDR_LEN + 4);
+ value.slen = attr->hdr.length - 4;
+
+ /* Copy the string to the attribute */
+ pj_strdup(pool, &attr->reason, &value);
+
+ /* Done */
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+
+static pj_status_t encode_errcode_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed)
+{
+ const pj_stun_errcode_attr *ca =
+ (const pj_stun_errcode_attr*)a;
+
+ PJ_CHECK_STACK();
+
+ PJ_UNUSED_ARG(msghdr);
+
+ if (len < ATTR_HDR_LEN + 4 + (unsigned)ca->reason.slen)
+ return PJ_ETOOSMALL;
+
+ /* Copy and convert attribute to network byte order */
+ PUTVAL16H(buf, 0, ca->hdr.type);
+ PUTVAL16H(buf, 2, (pj_uint16_t)(4 + ca->reason.slen));
+ PUTVAL16H(buf, 4, 0);
+ buf[6] = (pj_uint8_t)(ca->err_code / 100);
+ buf[7] = (pj_uint8_t)(ca->err_code % 100);
+
+ /* Copy error string */
+ pj_memcpy(buf + ATTR_HDR_LEN + 4, ca->reason.ptr, ca->reason.slen);
+
+ /* Done */
+ *printed = (ATTR_HDR_LEN + 4 + ca->reason.slen + 3) & (~3);
+
+ return PJ_SUCCESS;
+}
+
+
+static void* clone_errcode_attr(pj_pool_t *pool, const void *src)
+{
+ const pj_stun_errcode_attr *asrc = (const pj_stun_errcode_attr*)src;
+ pj_stun_errcode_attr *dst = PJ_POOL_ALLOC_T(pool, pj_stun_errcode_attr);
+
+ pj_memcpy(dst, src, sizeof(pj_stun_errcode_attr));
+ pj_strdup(pool, &dst->reason, &asrc->reason);
+
+ return (void*)dst;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+/*
+ * STUN UNKNOWN-ATTRIBUTES attribute
+ */
+
+/*
+ * Create an empty instance of STUN UNKNOWN-ATTRIBUTES attribute.
+ *
+ * @param pool The pool to allocate memory from.
+ * @param p_attr Pointer to receive the attribute.
+ *
+ * @return PJ_SUCCESS on success or the appropriate error code.
+ */
+PJ_DEF(pj_status_t) pj_stun_unknown_attr_create(pj_pool_t *pool,
+ unsigned attr_cnt,
+ const pj_uint16_t attr_array[],
+ pj_stun_unknown_attr **p_attr)
+{
+ pj_stun_unknown_attr *attr;
+ unsigned i;
+
+ PJ_ASSERT_RETURN(pool && attr_cnt < PJ_STUN_MAX_ATTR && p_attr, PJ_EINVAL);
+
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_unknown_attr);
+ INIT_ATTR(attr, PJ_STUN_ATTR_UNKNOWN_ATTRIBUTES, attr_cnt * 2);
+
+ attr->attr_count = attr_cnt;
+ for (i=0; i<attr_cnt; ++i) {
+ attr->attrs[i] = attr_array[i];
+ }
+
+ /* If the number of unknown attributes is an odd number, one of the
+ * attributes MUST be repeated in the list.
+ */
+ /* No longer necessary
+ if ((attr_cnt & 0x01)) {
+ attr->attrs[attr_cnt] = attr_array[attr_cnt-1];
+ }
+ */
+
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+
+/* Create and add STUN UNKNOWN-ATTRIBUTES attribute to the message. */
+PJ_DEF(pj_status_t) pj_stun_msg_add_unknown_attr(pj_pool_t *pool,
+ pj_stun_msg *msg,
+ unsigned attr_cnt,
+ const pj_uint16_t attr_type[])
+{
+ pj_stun_unknown_attr *attr = NULL;
+ pj_status_t status;
+
+ status = pj_stun_unknown_attr_create(pool, attr_cnt, attr_type, &attr);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ return pj_stun_msg_add_attr(msg, &attr->hdr);
+}
+
+static pj_status_t decode_unknown_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr)
+{
+ pj_stun_unknown_attr *attr;
+ const pj_uint16_t *punk_attr;
+ unsigned i;
+
+ PJ_UNUSED_ARG(msghdr);
+
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_unknown_attr);
+ GETATTRHDR(buf, &attr->hdr);
+
+ attr->attr_count = (attr->hdr.length >> 1);
+ if (attr->attr_count > PJ_STUN_MAX_ATTR)
+ return PJ_ETOOMANY;
+
+ punk_attr = (const pj_uint16_t*)(buf + ATTR_HDR_LEN);
+ for (i=0; i<attr->attr_count; ++i) {
+ attr->attrs[i] = pj_ntohs(punk_attr[i]);
+ }
+
+ /* Done */
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+}
+
+
+static pj_status_t encode_unknown_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed)
+{
+ const pj_stun_unknown_attr *ca = (const pj_stun_unknown_attr*) a;
+ pj_uint16_t *dst_unk_attr;
+ unsigned i;
+
+ PJ_CHECK_STACK();
+
+ PJ_UNUSED_ARG(msghdr);
+
+ /* Check that buffer is enough */
+ if (len < ATTR_HDR_LEN + (ca->attr_count << 1))
+ return PJ_ETOOSMALL;
+
+ PUTVAL16H(buf, 0, ca->hdr.type);
+ PUTVAL16H(buf, 2, (pj_uint16_t)(ca->attr_count << 1));
+
+ /* Copy individual attribute */
+ dst_unk_attr = (pj_uint16_t*)(buf + ATTR_HDR_LEN);
+ for (i=0; i < ca->attr_count; ++i, ++dst_unk_attr) {
+ *dst_unk_attr = pj_htons(ca->attrs[i]);
+ }
+
+ /* Done */
+ *printed = (ATTR_HDR_LEN + (ca->attr_count << 1) + 3) & (~3);
+
+ return PJ_SUCCESS;
+}
+
+
+static void* clone_unknown_attr(pj_pool_t *pool, const void *src)
+{
+ pj_stun_unknown_attr *dst = PJ_POOL_ALLOC_T(pool, pj_stun_unknown_attr);
+
+ pj_memcpy(dst, src, sizeof(pj_stun_unknown_attr));
+
+ return (void*)dst;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+/*
+ * STUN generic binary attribute
+ */
+
+/*
+ * Initialize STUN binary attribute.
+ */
+PJ_DEF(pj_status_t) pj_stun_binary_attr_init( pj_stun_binary_attr *attr,
+ pj_pool_t *pool,
+ int attr_type,
+ const pj_uint8_t *data,
+ unsigned length)
+{
+ PJ_ASSERT_RETURN(attr_type, PJ_EINVAL);
+
+ INIT_ATTR(attr, attr_type, length);
+
+ attr->magic = PJ_STUN_MAGIC;
+
+ if (data && length) {
+ attr->length = length;
+ attr->data = (pj_uint8_t*) pj_pool_alloc(pool, length);
+ pj_memcpy(attr->data, data, length);
+ } else {
+ attr->data = NULL;
+ attr->length = 0;
+ }
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Create a blank binary attribute.
+ */
+PJ_DEF(pj_status_t) pj_stun_binary_attr_create(pj_pool_t *pool,
+ int attr_type,
+ const pj_uint8_t *data,
+ unsigned length,
+ pj_stun_binary_attr **p_attr)
+{
+ pj_stun_binary_attr *attr;
+
+ PJ_ASSERT_RETURN(pool && attr_type && p_attr, PJ_EINVAL);
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_binary_attr);
+ *p_attr = attr;
+ return pj_stun_binary_attr_init(attr, pool, attr_type, data, length);
+}
+
+
+/* Create and add binary attr. */
+PJ_DEF(pj_status_t) pj_stun_msg_add_binary_attr(pj_pool_t *pool,
+ pj_stun_msg *msg,
+ int attr_type,
+ const pj_uint8_t *data,
+ unsigned length)
+{
+ pj_stun_binary_attr *attr = NULL;
+ pj_status_t status;
+
+ status = pj_stun_binary_attr_create(pool, attr_type,
+ data, length, &attr);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ return pj_stun_msg_add_attr(msg, &attr->hdr);
+}
+
+
+static pj_status_t decode_binary_attr(pj_pool_t *pool,
+ const pj_uint8_t *buf,
+ const pj_stun_msg_hdr *msghdr,
+ void **p_attr)
+{
+ pj_stun_binary_attr *attr;
+
+ PJ_UNUSED_ARG(msghdr);
+
+ /* Create the attribute */
+ attr = PJ_POOL_ZALLOC_T(pool, pj_stun_binary_attr);
+ GETATTRHDR(buf, &attr->hdr);
+
+ /* Copy the data to the attribute */
+ attr->length = attr->hdr.length;
+ attr->data = (pj_uint8_t*) pj_pool_alloc(pool, attr->length);
+ pj_memcpy(attr->data, buf+ATTR_HDR_LEN, attr->length);
+
+ /* Done */
+ *p_attr = attr;
+
+ return PJ_SUCCESS;
+
+}
+
+
+static pj_status_t encode_binary_attr(const void *a, pj_uint8_t *buf,
+ unsigned len,
+ const pj_stun_msg_hdr *msghdr,
+ unsigned *printed)
+{
+ const pj_stun_binary_attr *ca = (const pj_stun_binary_attr*)a;
+
+ PJ_CHECK_STACK();
+
+ PJ_UNUSED_ARG(msghdr);
+
+ /* Calculated total attr_len (add padding if necessary) */
+ *printed = (ca->length + ATTR_HDR_LEN + 3) & (~3);
+ if (len < *printed)
+ return PJ_ETOOSMALL;
+
+ PUTVAL16H(buf, 0, ca->hdr.type);
+ PUTVAL16H(buf, 2, (pj_uint16_t) ca->length);
+
+ /* Copy the data */
+ pj_memcpy(buf+ATTR_HDR_LEN, ca->data, ca->length);
+
+ /* Done */
+ return PJ_SUCCESS;
+}
+
+
+static void* clone_binary_attr(pj_pool_t *pool, const void *src)
+{
+ const pj_stun_binary_attr *asrc = (const pj_stun_binary_attr*)src;
+ pj_stun_binary_attr *dst = PJ_POOL_ALLOC_T(pool, pj_stun_binary_attr);
+
+ pj_memcpy(dst, src, sizeof(pj_stun_binary_attr));
+
+ if (asrc->length) {
+ dst->data = (pj_uint8_t*) pj_pool_alloc(pool, asrc->length);
+ pj_memcpy(dst->data, asrc->data, asrc->length);
+ }
+
+ return (void*)dst;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+
+/*
+ * Initialize a generic STUN message.
+ */
+PJ_DEF(pj_status_t) pj_stun_msg_init( pj_stun_msg *msg,
+ unsigned msg_type,
+ pj_uint32_t magic,
+ const pj_uint8_t tsx_id[12])
+{
+ PJ_ASSERT_RETURN(msg && msg_type, PJ_EINVAL);
+
+ msg->hdr.type = (pj_uint16_t) msg_type;
+ msg->hdr.length = 0;
+ msg->hdr.magic = magic;
+ msg->attr_count = 0;
+
+ if (tsx_id) {
+ pj_memcpy(&msg->hdr.tsx_id, tsx_id, sizeof(msg->hdr.tsx_id));
+ } else {
+ struct transaction_id
+ {
+ pj_uint32_t proc_id;
+ pj_uint32_t random;
+ pj_uint32_t counter;
+ } id;
+ static pj_uint32_t pj_stun_tsx_id_counter;
+
+ if (!pj_stun_tsx_id_counter)
+ pj_stun_tsx_id_counter = pj_rand();
+
+ id.proc_id = pj_getpid();
+ id.random = pj_rand();
+ id.counter = pj_stun_tsx_id_counter++;
+
+ pj_memcpy(&msg->hdr.tsx_id, &id, sizeof(msg->hdr.tsx_id));
+ }
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Create a blank STUN message.
+ */
+PJ_DEF(pj_status_t) pj_stun_msg_create( pj_pool_t *pool,
+ unsigned msg_type,
+ pj_uint32_t magic,
+ const pj_uint8_t tsx_id[12],
+ pj_stun_msg **p_msg)
+{
+ pj_stun_msg *msg;
+
+ PJ_ASSERT_RETURN(pool && msg_type && p_msg, PJ_EINVAL);
+
+ msg = PJ_POOL_ZALLOC_T(pool, pj_stun_msg);
+ *p_msg = msg;
+ return pj_stun_msg_init(msg, msg_type, magic, tsx_id);
+}
+
+
+/*
+ * Clone a STUN message with all of its attributes.
+ */
+PJ_DEF(pj_stun_msg*) pj_stun_msg_clone( pj_pool_t *pool,
+ const pj_stun_msg *src)
+{
+ pj_stun_msg *dst;
+ unsigned i;
+
+ PJ_ASSERT_RETURN(pool && src, NULL);
+
+ dst = PJ_POOL_ZALLOC_T(pool, pj_stun_msg);
+ pj_memcpy(dst, src, sizeof(pj_stun_msg));
+
+ /* Duplicate the attributes */
+ for (i=0, dst->attr_count=0; i<src->attr_count; ++i) {
+ dst->attr[dst->attr_count] = pj_stun_attr_clone(pool, src->attr[i]);
+ if (dst->attr[dst->attr_count])
+ ++dst->attr_count;
+ }
+
+ return dst;
+}
+
+
+/*
+ * Add STUN attribute to STUN message.
+ */
+PJ_DEF(pj_status_t) pj_stun_msg_add_attr(pj_stun_msg *msg,
+ pj_stun_attr_hdr *attr)
+{
+ PJ_ASSERT_RETURN(msg && attr, PJ_EINVAL);
+ PJ_ASSERT_RETURN(msg->attr_count < PJ_STUN_MAX_ATTR, PJ_ETOOMANY);
+
+ msg->attr[msg->attr_count++] = attr;
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Check that the PDU is potentially a valid STUN message.
+ */
+PJ_DEF(pj_status_t) pj_stun_msg_check(const pj_uint8_t *pdu, pj_size_t pdu_len,
+ unsigned options)
+{
+ pj_size_t msg_len;
+
+ PJ_ASSERT_RETURN(pdu, PJ_EINVAL);
+
+ if (pdu_len < sizeof(pj_stun_msg_hdr))
+ return PJNATH_EINSTUNMSGLEN;
+
+ /* First byte of STUN message is always 0x00 or 0x01. */
+ if (*pdu != 0x00 && *pdu != 0x01)
+ return PJNATH_EINSTUNMSGTYPE;
+
+ /* Check the PDU length */
+ msg_len = GETVAL16H(pdu, 2);
+ if ((msg_len + 20 > pdu_len) ||
+ ((options & PJ_STUN_IS_DATAGRAM) && msg_len + 20 != pdu_len))
+ {
+ return PJNATH_EINSTUNMSGLEN;
+ }
+
+ /* STUN message is always padded to the nearest 4 bytes, thus
+ * the last two bits of the length field are always zero.
+ */
+ if ((msg_len & 0x03) != 0) {
+ return PJNATH_EINSTUNMSGLEN;
+ }
+
+ /* If magic is set, then there is great possibility that this is
+ * a STUN message.
+ */
+ if (GETVAL32H(pdu, 4) == PJ_STUN_MAGIC) {
+
+ /* Check if FINGERPRINT attribute is present */
+ if ((options & PJ_STUN_NO_FINGERPRINT_CHECK )==0 &&
+ GETVAL16H(pdu, msg_len + 20 - 8) == PJ_STUN_ATTR_FINGERPRINT)
+ {
+ pj_uint16_t attr_len = GETVAL16H(pdu, msg_len + 20 - 8 + 2);
+ pj_uint32_t fingerprint = GETVAL32H(pdu, msg_len + 20 - 8 + 4);
+ pj_uint32_t crc;
+
+ if (attr_len != 4)
+ return PJNATH_ESTUNINATTRLEN;
+
+ crc = pj_crc32_calc(pdu, msg_len + 20 - 8);
+ crc ^= STUN_XOR_FINGERPRINT;
+
+ if (crc != fingerprint)
+ return PJNATH_ESTUNFINGERPRINT;
+ }
+ }
+
+ /* Could be a STUN message */
+ return PJ_SUCCESS;
+}
+
+
+/* Create error response */
+PJ_DEF(pj_status_t) pj_stun_msg_create_response(pj_pool_t *pool,
+ const pj_stun_msg *req_msg,
+ unsigned err_code,
+ const pj_str_t *err_msg,
+ pj_stun_msg **p_response)
+{
+ unsigned msg_type = req_msg->hdr.type;
+ pj_stun_msg *response = NULL;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(pool && p_response, PJ_EINVAL);
+
+ PJ_ASSERT_RETURN(PJ_STUN_IS_REQUEST(msg_type),
+ PJNATH_EINSTUNMSGTYPE);
+
+ /* Create response or error response */
+ if (err_code)
+ msg_type |= PJ_STUN_ERROR_RESPONSE_BIT;
+ else
+ msg_type |= PJ_STUN_SUCCESS_RESPONSE_BIT;
+
+ status = pj_stun_msg_create(pool, msg_type, req_msg->hdr.magic,
+ req_msg->hdr.tsx_id, &response);
+ if (status != PJ_SUCCESS) {
+ return status;
+ }
+
+ /* Add error code attribute */
+ if (err_code) {
+ status = pj_stun_msg_add_errcode_attr(pool, response,
+ err_code, err_msg);
+ if (status != PJ_SUCCESS) {
+ return status;
+ }
+ }
+
+ *p_response = response;
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Parse incoming packet into STUN message.
+ */
+PJ_DEF(pj_status_t) pj_stun_msg_decode(pj_pool_t *pool,
+ const pj_uint8_t *pdu,
+ pj_size_t pdu_len,
+ unsigned options,
+ pj_stun_msg **p_msg,
+ pj_size_t *p_parsed_len,
+ pj_stun_msg **p_response)
+{
+
+ pj_stun_msg *msg;
+ unsigned uattr_cnt;
+ const pj_uint8_t *start_pdu = pdu;
+ pj_bool_t has_msg_int = PJ_FALSE;
+ pj_bool_t has_fingerprint = PJ_FALSE;
+ pj_status_t status;
+
+ PJ_UNUSED_ARG(options);
+
+ PJ_ASSERT_RETURN(pool && pdu && pdu_len && p_msg, PJ_EINVAL);
+ PJ_ASSERT_RETURN(sizeof(pj_stun_msg_hdr) == 20, PJ_EBUG);
+
+ if (p_parsed_len)
+ *p_parsed_len = 0;
+ if (p_response)
+ *p_response = NULL;
+
+ /* Check if this is a STUN message, if necessary */
+ if (options & PJ_STUN_CHECK_PACKET) {
+ status = pj_stun_msg_check(pdu, pdu_len, options);
+ if (status != PJ_SUCCESS)
+ return status;
+ }
+
+ /* Create the message, copy the header, and convert to host byte order */
+ msg = PJ_POOL_ZALLOC_T(pool, pj_stun_msg);
+ pj_memcpy(&msg->hdr, pdu, sizeof(pj_stun_msg_hdr));
+ msg->hdr.type = pj_ntohs(msg->hdr.type);
+ msg->hdr.length = pj_ntohs(msg->hdr.length);
+ msg->hdr.magic = pj_ntohl(msg->hdr.magic);
+
+ pdu += sizeof(pj_stun_msg_hdr);
+ /* pdu_len -= sizeof(pj_stun_msg_hdr); */
+ pdu_len = msg->hdr.length;
+
+ /* No need to create response if this is not a request */
+ if (!PJ_STUN_IS_REQUEST(msg->hdr.type))
+ p_response = NULL;
+
+ /* Parse attributes */
+ uattr_cnt = 0;
+ while (pdu_len >= 4) {
+ unsigned attr_type, attr_val_len;
+ const struct attr_desc *adesc;
+
+ /* Get attribute type and length. If length is not aligned
+ * to 4 bytes boundary, add padding.
+ */
+ attr_type = GETVAL16H(pdu, 0);
+ attr_val_len = GETVAL16H(pdu, 2);
+ attr_val_len = (attr_val_len + 3) & (~3);
+
+ /* Check length */
+ if (pdu_len < attr_val_len) {
+ pj_str_t err_msg;
+ char err_msg_buf[80];
+
+ err_msg.ptr = err_msg_buf;
+ err_msg.slen = pj_ansi_snprintf(err_msg_buf, sizeof(err_msg_buf),
+ "Attribute %s has invalid length",
+ pj_stun_get_attr_name(attr_type));
+
+ PJ_LOG(4,(THIS_FILE, "Error decoding message: %.*s",
+ (int)err_msg.slen, err_msg.ptr));
+
+ if (p_response) {
+ pj_stun_msg_create_response(pool, msg,
+ PJ_STUN_SC_BAD_REQUEST,
+ &err_msg, p_response);
+ }
+ return PJNATH_ESTUNINATTRLEN;
+ }
+
+ /* Get the attribute descriptor */
+ adesc = find_attr_desc(attr_type);
+
+ if (adesc == NULL) {
+ /* Unrecognized attribute */
+ pj_stun_binary_attr *attr = NULL;
+
+ PJ_LOG(5,(THIS_FILE, "Unrecognized attribute type 0x%x",
+ attr_type));
+
+ /* Is this a fatal condition? */
+ if (attr_type <= 0x7FFF) {
+ /* This is a mandatory attribute, we must return error
+ * if we don't understand the attribute.
+ */
+ if (p_response) {
+ unsigned err_code = PJ_STUN_SC_UNKNOWN_ATTRIBUTE;
+
+ status = pj_stun_msg_create_response(pool, msg,
+ err_code, NULL,
+ p_response);
+ if (status==PJ_SUCCESS) {
+ pj_uint16_t d = (pj_uint16_t)attr_type;
+ pj_stun_msg_add_unknown_attr(pool, *p_response, 1, &d);
+ }
+ }
+
+ return PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_UNKNOWN_ATTRIBUTE);
+ }
+
+ /* Make sure we have rooms for the new attribute */
+ if (msg->attr_count >= PJ_STUN_MAX_ATTR) {
+ if (p_response) {
+ pj_stun_msg_create_response(pool, msg,
+ PJ_STUN_SC_SERVER_ERROR,
+ NULL, p_response);
+ }
+ return PJNATH_ESTUNTOOMANYATTR;
+ }
+
+ /* Create binary attribute to represent this */
+ status = pj_stun_binary_attr_create(pool, attr_type, pdu+4,
+ GETVAL16H(pdu, 2), &attr);
+ if (status != PJ_SUCCESS) {
+ if (p_response) {
+ pj_stun_msg_create_response(pool, msg,
+ PJ_STUN_SC_SERVER_ERROR,
+ NULL, p_response);
+ }
+
+ PJ_LOG(4,(THIS_FILE,
+ "Error parsing unknown STUN attribute type %d",
+ attr_type));
+
+ return status;
+ }
+
+ /* Add the attribute */
+ msg->attr[msg->attr_count++] = &attr->hdr;
+
+ } else {
+ void *attr;
+ char err_msg1[PJ_ERR_MSG_SIZE],
+ err_msg2[PJ_ERR_MSG_SIZE];
+
+ /* Parse the attribute */
+ status = (adesc->decode_attr)(pool, pdu, &msg->hdr, &attr);
+
+ if (status != PJ_SUCCESS) {
+ pj_strerror(status, err_msg1, sizeof(err_msg1));
+
+ if (p_response) {
+ pj_str_t e;
+
+ e.ptr = err_msg2;
+ e.slen= pj_ansi_snprintf(err_msg2, sizeof(err_msg2),
+ "%s in %s",
+ err_msg1,
+ pj_stun_get_attr_name(attr_type));
+
+ pj_stun_msg_create_response(pool, msg,
+ PJ_STUN_SC_BAD_REQUEST,
+ &e, p_response);
+ }
+
+ PJ_LOG(4,(THIS_FILE,
+ "Error parsing STUN attribute %s: %s",
+ pj_stun_get_attr_name(attr_type),
+ err_msg1));
+
+ return status;
+ }
+
+ if (attr_type == PJ_STUN_ATTR_MESSAGE_INTEGRITY &&
+ !has_fingerprint)
+ {
+ if (has_msg_int) {
+ /* Already has MESSAGE-INTEGRITY */
+ if (p_response) {
+ pj_stun_msg_create_response(pool, msg,
+ PJ_STUN_SC_BAD_REQUEST,
+ NULL, p_response);
+ }
+ return PJNATH_ESTUNDUPATTR;
+ }
+ has_msg_int = PJ_TRUE;
+
+ } else if (attr_type == PJ_STUN_ATTR_FINGERPRINT) {
+ if (has_fingerprint) {
+ /* Already has FINGERPRINT */
+ if (p_response) {
+ pj_stun_msg_create_response(pool, msg,
+ PJ_STUN_SC_BAD_REQUEST,
+ NULL, p_response);
+ }
+ return PJNATH_ESTUNDUPATTR;
+ }
+ has_fingerprint = PJ_TRUE;
+ } else {
+ if (has_fingerprint) {
+ /* Another attribute is found which is not FINGERPRINT
+ * after FINGERPRINT. Note that non-FINGERPRINT is
+ * allowed to appear after M-I
+ */
+ if (p_response) {
+ pj_stun_msg_create_response(pool, msg,
+ PJ_STUN_SC_BAD_REQUEST,
+ NULL, p_response);
+ }
+ return PJNATH_ESTUNFINGERPOS;
+ }
+ }
+
+ /* Make sure we have rooms for the new attribute */
+ if (msg->attr_count >= PJ_STUN_MAX_ATTR) {
+ if (p_response) {
+ pj_stun_msg_create_response(pool, msg,
+ PJ_STUN_SC_SERVER_ERROR,
+ NULL, p_response);
+ }
+ return PJNATH_ESTUNTOOMANYATTR;
+ }
+
+ /* Add the attribute */
+ msg->attr[msg->attr_count++] = (pj_stun_attr_hdr*)attr;
+ }
+
+ /* Next attribute */
+ if (attr_val_len + 4 >= pdu_len) {
+ pdu += pdu_len;
+ pdu_len = 0;
+ } else {
+ pdu += (attr_val_len + 4);
+ pdu_len -= (attr_val_len + 4);
+ }
+ }
+
+ if (pdu_len > 0) {
+ /* Stray trailing bytes */
+ PJ_LOG(4,(THIS_FILE,
+ "Error decoding STUN message: unparsed trailing %d bytes",
+ pdu_len));
+ return PJNATH_EINSTUNMSGLEN;
+ }
+
+ *p_msg = msg;
+
+ if (p_parsed_len)
+ *p_parsed_len = (pdu - start_pdu);
+
+ return PJ_SUCCESS;
+}
+
+/*
+static char *print_binary(const pj_uint8_t *data, unsigned data_len)
+{
+ static char static_buffer[1024];
+ char *buffer = static_buffer;
+ unsigned length=sizeof(static_buffer), i;
+
+ if (length < data_len * 2 + 8)
+ return "";
+
+ pj_ansi_sprintf(buffer, ", data=");
+ buffer += 7;
+
+ for (i=0; i<data_len; ++i) {
+ pj_ansi_sprintf(buffer, "%02x", (*data) & 0xFF);
+ buffer += 2;
+ data++;
+ }
+
+ pj_ansi_sprintf(buffer, "\n");
+ buffer++;
+
+ return static_buffer;
+}
+*/
+
+/*
+ * Print the message structure to a buffer.
+ */
+PJ_DEF(pj_status_t) pj_stun_msg_encode(pj_stun_msg *msg,
+ pj_uint8_t *buf, pj_size_t buf_size,
+ unsigned options,
+ const pj_str_t *key,
+ pj_size_t *p_msg_len)
+{
+ pj_uint8_t *start = buf;
+ pj_stun_msgint_attr *amsgint = NULL;
+ pj_stun_fingerprint_attr *afingerprint = NULL;
+ unsigned printed = 0, body_len;
+ pj_status_t status;
+ unsigned i;
+
+
+ PJ_ASSERT_RETURN(msg && buf && buf_size, PJ_EINVAL);
+
+ PJ_UNUSED_ARG(options);
+ PJ_ASSERT_RETURN(options == 0, PJ_EINVAL);
+
+ /* Copy the message header part and convert the header fields to
+ * network byte order
+ */
+ if (buf_size < sizeof(pj_stun_msg_hdr))
+ return PJ_ETOOSMALL;
+
+ PUTVAL16H(buf, 0, msg->hdr.type);
+ PUTVAL16H(buf, 2, 0); /* length will be calculated later */
+ PUTVAL32H(buf, 4, msg->hdr.magic);
+ pj_memcpy(buf+8, msg->hdr.tsx_id, sizeof(msg->hdr.tsx_id));
+
+ buf += sizeof(pj_stun_msg_hdr);
+ buf_size -= sizeof(pj_stun_msg_hdr);
+
+ /* Encode each attribute to the message */
+ for (i=0; i<msg->attr_count; ++i) {
+ const struct attr_desc *adesc;
+ const pj_stun_attr_hdr *attr_hdr = msg->attr[i];
+
+ if (attr_hdr->type == PJ_STUN_ATTR_MESSAGE_INTEGRITY) {
+ pj_assert(amsgint == NULL);
+ amsgint = (pj_stun_msgint_attr*) attr_hdr;
+
+ /* Stop when encountering MESSAGE-INTEGRITY */
+ break;
+
+ } else if (attr_hdr->type == PJ_STUN_ATTR_FINGERPRINT) {
+ afingerprint = (pj_stun_fingerprint_attr*) attr_hdr;
+ break;
+ }
+
+ adesc = find_attr_desc(attr_hdr->type);
+ if (adesc) {
+ status = adesc->encode_attr(attr_hdr, buf, buf_size, &msg->hdr,
+ &printed);
+ } else {
+ /* This may be a generic attribute */
+ const pj_stun_binary_attr *bin_attr = (const pj_stun_binary_attr*)
+ attr_hdr;
+ PJ_ASSERT_RETURN(bin_attr->magic == PJ_STUN_MAGIC, PJ_EBUG);
+ status = encode_binary_attr(bin_attr, buf, buf_size, &msg->hdr,
+ &printed);
+ }
+
+ if (status != PJ_SUCCESS)
+ return status;
+
+ buf += printed;
+ buf_size -= printed;
+ }
+
+ /* We may have stopped printing attribute because we found
+ * MESSAGE-INTEGRITY or FINGERPRINT. Scan the rest of the
+ * attributes.
+ */
+ for ( ++i; i<msg->attr_count; ++i) {
+ const pj_stun_attr_hdr *attr_hdr = msg->attr[i];
+
+ /* There mustn't any attribute after FINGERPRINT */
+ PJ_ASSERT_RETURN(afingerprint == NULL, PJNATH_ESTUNFINGERPOS);
+
+ if (attr_hdr->type == PJ_STUN_ATTR_MESSAGE_INTEGRITY) {
+ /* There mustn't be MESSAGE-INTEGRITY before */
+ PJ_ASSERT_RETURN(amsgint == NULL,
+ PJNATH_ESTUNMSGINTPOS);
+ amsgint = (pj_stun_msgint_attr*) attr_hdr;
+
+ } else if (attr_hdr->type == PJ_STUN_ATTR_FINGERPRINT) {
+ afingerprint = (pj_stun_fingerprint_attr*) attr_hdr;
+ }
+ }
+
+#if PJ_STUN_OLD_STYLE_MI_FINGERPRINT
+ /*
+ * This is the old style MESSAGE-INTEGRITY and FINGERPRINT
+ * calculation, used in rfc3489bis-06 and older.
+ */
+ /* We MUST update the message length in the header NOW before
+ * calculating MESSAGE-INTEGRITY and FINGERPRINT.
+ * Note that length is not including the 20 bytes header.
+ */
+ if (amsgint && afingerprint) {
+ body_len = (pj_uint16_t)((buf - start) - 20 + 24 + 8);
+ } else if (amsgint) {
+ body_len = (pj_uint16_t)((buf - start) - 20 + 24);
+ } else if (afingerprint) {
+ body_len = (pj_uint16_t)((buf - start) - 20 + 8);
+ } else {
+ body_len = (pj_uint16_t)((buf - start) - 20);
+ }
+#else
+ /* If MESSAGE-INTEGRITY is present, include the M-I attribute
+ * in message length before calculating M-I
+ */
+ if (amsgint) {
+ body_len = (pj_uint16_t)((buf - start) - 20 + 24);
+ } else {
+ body_len = (pj_uint16_t)((buf - start) - 20);
+ }
+#endif /* PJ_STUN_OLD_STYLE_MI_FINGERPRINT */
+
+ /* hdr->length = pj_htons(length); */
+ PUTVAL16H(start, 2, (pj_uint16_t)body_len);
+
+ /* Calculate message integrity, if present */
+ if (amsgint != NULL) {
+ pj_hmac_sha1_context ctx;
+
+ /* Key MUST be specified */
+ PJ_ASSERT_RETURN(key, PJ_EINVALIDOP);
+
+ /* MESSAGE-INTEGRITY must be the last attribute in the message, or
+ * the last attribute before FINGERPRINT.
+ */
+ if (msg->attr_count>1 && i < msg->attr_count-2) {
+ /* Should not happen for message generated by us */
+ pj_assert(PJ_FALSE);
+ return PJNATH_ESTUNMSGINTPOS;
+
+ } else if (i == msg->attr_count-2) {
+ if (msg->attr[i+1]->type != PJ_STUN_ATTR_FINGERPRINT) {
+ /* Should not happen for message generated by us */
+ pj_assert(PJ_FALSE);
+ return PJNATH_ESTUNMSGINTPOS;
+ } else {
+ afingerprint = (pj_stun_fingerprint_attr*) msg->attr[i+1];
+ }
+ }
+
+ /* Calculate HMAC-SHA1 digest, add zero padding to input
+ * if necessary to make the input 64 bytes aligned.
+ */
+ pj_hmac_sha1_init(&ctx, (const pj_uint8_t*)key->ptr, key->slen);
+ pj_hmac_sha1_update(&ctx, (const pj_uint8_t*)start, buf-start);
+#if PJ_STUN_OLD_STYLE_MI_FINGERPRINT
+ // These are obsoleted in rfc3489bis-08
+ if ((buf-start) & 0x3F) {
+ pj_uint8_t zeroes[64];
+ pj_bzero(zeroes, sizeof(zeroes));
+ pj_hmac_sha1_update(&ctx, zeroes, 64-((buf-start) & 0x3F));
+ }
+#endif /* PJ_STUN_OLD_STYLE_MI_FINGERPRINT */
+ pj_hmac_sha1_final(&ctx, amsgint->hmac);
+
+ /* Put this attribute in the message */
+ status = encode_msgint_attr(amsgint, buf, buf_size,
+ &msg->hdr, &printed);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ buf += printed;
+ buf_size -= printed;
+ }
+
+ /* Calculate FINGERPRINT if present */
+ if (afingerprint != NULL) {
+
+#if !PJ_STUN_OLD_STYLE_MI_FINGERPRINT
+ /* Update message length */
+ PUTVAL16H(start, 2,
+ (pj_uint16_t)(GETVAL16H(start, 2)+8));
+#endif
+
+ afingerprint->value = pj_crc32_calc(start, buf-start);
+ afingerprint->value ^= STUN_XOR_FINGERPRINT;
+
+ /* Put this attribute in the message */
+ status = encode_uint_attr(afingerprint, buf, buf_size,
+ &msg->hdr, &printed);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ buf += printed;
+ buf_size -= printed;
+ }
+
+ /* Update message length. */
+ msg->hdr.length = (pj_uint16_t) ((buf - start) - 20);
+
+ /* Return the length */
+ if (p_msg_len)
+ *p_msg_len = (buf - start);
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Find STUN attribute in the STUN message, starting from the specified
+ * index.
+ */
+PJ_DEF(pj_stun_attr_hdr*) pj_stun_msg_find_attr( const pj_stun_msg *msg,
+ int attr_type,
+ unsigned index)
+{
+ PJ_ASSERT_RETURN(msg, NULL);
+
+ for (; index < msg->attr_count; ++index) {
+ if (msg->attr[index]->type == attr_type)
+ return (pj_stun_attr_hdr*) msg->attr[index];
+ }
+
+ return NULL;
+}
+
+
+/*
+ * Clone a STUN attribute.
+ */
+PJ_DEF(pj_stun_attr_hdr*) pj_stun_attr_clone( pj_pool_t *pool,
+ const pj_stun_attr_hdr *attr)
+{
+ const struct attr_desc *adesc;
+
+ /* Get the attribute descriptor */
+ adesc = find_attr_desc(attr->type);
+ if (adesc) {
+ return (pj_stun_attr_hdr*) (*adesc->clone_attr)(pool, attr);
+ } else {
+ /* Clone generic attribute */
+ const pj_stun_binary_attr *bin_attr = (const pj_stun_binary_attr*)
+ attr;
+ PJ_ASSERT_RETURN(bin_attr->magic == PJ_STUN_MAGIC, NULL);
+ if (bin_attr->magic == PJ_STUN_MAGIC) {
+ return (pj_stun_attr_hdr*) clone_binary_attr(pool, attr);
+ } else {
+ return NULL;
+ }
+ }
+}
+
+
diff --git a/pjnath/src/pjnath/stun_msg_dump.c b/pjnath/src/pjnath/stun_msg_dump.c
new file mode 100644
index 0000000..c7488c9
--- /dev/null
+++ b/pjnath/src/pjnath/stun_msg_dump.c
@@ -0,0 +1,298 @@
+/* $Id: stun_msg_dump.c 3553 2011-05-05 06:14:19Z nanang $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/stun_msg.h>
+#include <pjnath/errno.h>
+#include <pj/assert.h>
+#include <pj/os.h>
+#include <pj/string.h>
+
+#if PJ_LOG_MAX_LEVEL > 0
+
+
+#define APPLY() if (len < 1 || len >= (end-p)) \
+ goto on_return; \
+ p += len
+
+static int print_binary(char *buffer, unsigned length,
+ const pj_uint8_t *data, unsigned data_len)
+{
+ unsigned i;
+
+ if (length < data_len * 2 + 8)
+ return -1;
+
+ pj_ansi_sprintf(buffer, ", data=");
+ buffer += 7;
+
+ for (i=0; i<data_len; ++i) {
+ pj_ansi_sprintf(buffer, "%02x", (*data) & 0xFF);
+ buffer += 2;
+ data++;
+ }
+
+ pj_ansi_sprintf(buffer, "\n");
+ buffer++;
+
+ return data_len * 2 + 8;
+}
+
+static int print_attr(char *buffer, unsigned length,
+ const pj_stun_attr_hdr *ahdr)
+{
+ char *p = buffer, *end = buffer + length;
+ const char *attr_name = pj_stun_get_attr_name(ahdr->type);
+ char attr_buf[32];
+ int len;
+
+ if (*attr_name == '?') {
+ pj_ansi_snprintf(attr_buf, sizeof(attr_buf), "Attr 0x%x",
+ ahdr->type);
+ attr_name = attr_buf;
+ }
+
+ len = pj_ansi_snprintf(p, end-p,
+ " %s: length=%d",
+ attr_name,
+ (int)ahdr->length);
+ APPLY();
+
+
+ switch (ahdr->type) {
+ case PJ_STUN_ATTR_MAPPED_ADDR:
+ case PJ_STUN_ATTR_RESPONSE_ADDR:
+ case PJ_STUN_ATTR_SOURCE_ADDR:
+ case PJ_STUN_ATTR_CHANGED_ADDR:
+ case PJ_STUN_ATTR_REFLECTED_FROM:
+ case PJ_STUN_ATTR_XOR_PEER_ADDR:
+ case PJ_STUN_ATTR_XOR_RELAYED_ADDR:
+ case PJ_STUN_ATTR_XOR_MAPPED_ADDR:
+ case PJ_STUN_ATTR_XOR_REFLECTED_FROM:
+ case PJ_STUN_ATTR_ALTERNATE_SERVER:
+ {
+ const pj_stun_sockaddr_attr *attr;
+
+ attr = (const pj_stun_sockaddr_attr*)ahdr;
+
+ if (attr->sockaddr.addr.sa_family == pj_AF_INET()) {
+ len = pj_ansi_snprintf(p, end-p,
+ ", IPv4 addr=%s:%d\n",
+ pj_inet_ntoa(attr->sockaddr.ipv4.sin_addr),
+ pj_ntohs(attr->sockaddr.ipv4.sin_port));
+
+ } else if (attr->sockaddr.addr.sa_family == pj_AF_INET6()) {
+ len = pj_ansi_snprintf(p, end-p,
+ ", IPv6 addr present\n");
+ } else {
+ len = pj_ansi_snprintf(p, end-p,
+ ", INVALID ADDRESS FAMILY!\n");
+ }
+ APPLY();
+ }
+ break;
+
+ case PJ_STUN_ATTR_CHANNEL_NUMBER:
+ {
+ const pj_stun_uint_attr *attr;
+
+ attr = (const pj_stun_uint_attr*)ahdr;
+ len = pj_ansi_snprintf(p, end-p,
+ ", chnum=%u (0x%x)\n",
+ (int)PJ_STUN_GET_CH_NB(attr->value),
+ (int)PJ_STUN_GET_CH_NB(attr->value));
+ APPLY();
+ }
+ break;
+
+ case PJ_STUN_ATTR_CHANGE_REQUEST:
+ case PJ_STUN_ATTR_LIFETIME:
+ case PJ_STUN_ATTR_BANDWIDTH:
+ case PJ_STUN_ATTR_REQ_ADDR_TYPE:
+ case PJ_STUN_ATTR_EVEN_PORT:
+ case PJ_STUN_ATTR_REQ_TRANSPORT:
+ case PJ_STUN_ATTR_TIMER_VAL:
+ case PJ_STUN_ATTR_PRIORITY:
+ case PJ_STUN_ATTR_FINGERPRINT:
+ case PJ_STUN_ATTR_REFRESH_INTERVAL:
+ case PJ_STUN_ATTR_ICMP:
+ {
+ const pj_stun_uint_attr *attr;
+
+ attr = (const pj_stun_uint_attr*)ahdr;
+ len = pj_ansi_snprintf(p, end-p,
+ ", value=%u (0x%x)\n",
+ (pj_uint32_t)attr->value,
+ (pj_uint32_t)attr->value);
+ APPLY();
+ }
+ break;
+
+ case PJ_STUN_ATTR_USERNAME:
+ case PJ_STUN_ATTR_PASSWORD:
+ case PJ_STUN_ATTR_REALM:
+ case PJ_STUN_ATTR_NONCE:
+ case PJ_STUN_ATTR_SOFTWARE:
+ {
+ const pj_stun_string_attr *attr;
+
+ attr = (pj_stun_string_attr*)ahdr;
+ len = pj_ansi_snprintf(p, end-p,
+ ", value=\"%.*s\"\n",
+ (int)attr->value.slen,
+ attr->value.ptr);
+ APPLY();
+ }
+ break;
+
+ case PJ_STUN_ATTR_ERROR_CODE:
+ {
+ const pj_stun_errcode_attr *attr;
+
+ attr = (const pj_stun_errcode_attr*) ahdr;
+ len = pj_ansi_snprintf(p, end-p,
+ ", err_code=%d, reason=\"%.*s\"\n",
+ attr->err_code,
+ (int)attr->reason.slen,
+ attr->reason.ptr);
+ APPLY();
+ }
+ break;
+
+ case PJ_STUN_ATTR_UNKNOWN_ATTRIBUTES:
+ {
+ const pj_stun_unknown_attr *attr;
+ unsigned j;
+
+ attr = (const pj_stun_unknown_attr*) ahdr;
+
+ len = pj_ansi_snprintf(p, end-p,
+ ", unknown list:");
+ APPLY();
+
+ for (j=0; j<attr->attr_count; ++j) {
+ len = pj_ansi_snprintf(p, end-p,
+ " %d",
+ (int)attr->attrs[j]);
+ APPLY();
+ }
+ }
+ break;
+
+ case PJ_STUN_ATTR_MESSAGE_INTEGRITY:
+ {
+ const pj_stun_msgint_attr *attr;
+
+ attr = (const pj_stun_msgint_attr*) ahdr;
+ len = print_binary(p, end-p, attr->hmac, 20);
+ APPLY();
+ }
+ break;
+
+ case PJ_STUN_ATTR_DATA:
+ {
+ const pj_stun_binary_attr *attr;
+
+ attr = (const pj_stun_binary_attr*) ahdr;
+ len = print_binary(p, end-p, attr->data, attr->length);
+ APPLY();
+ }
+ break;
+ case PJ_STUN_ATTR_ICE_CONTROLLED:
+ case PJ_STUN_ATTR_ICE_CONTROLLING:
+ case PJ_STUN_ATTR_RESERVATION_TOKEN:
+ {
+ const pj_stun_uint64_attr *attr;
+ pj_uint8_t data[8];
+ int i;
+
+ attr = (const pj_stun_uint64_attr*) ahdr;
+
+ for (i=0; i<8; ++i)
+ data[i] = ((const pj_uint8_t*)&attr->value)[7-i];
+
+ len = print_binary(p, end-p, data, 8);
+ APPLY();
+ }
+ break;
+ case PJ_STUN_ATTR_USE_CANDIDATE:
+ case PJ_STUN_ATTR_DONT_FRAGMENT:
+ default:
+ len = pj_ansi_snprintf(p, end-p, "\n");
+ APPLY();
+ break;
+ }
+
+ return (p-buffer);
+
+on_return:
+ return len;
+}
+
+
+/*
+ * Dump STUN message to a printable string output.
+ */
+PJ_DEF(char*) pj_stun_msg_dump(const pj_stun_msg *msg,
+ char *buffer,
+ unsigned length,
+ unsigned *printed_len)
+{
+ char *p, *end;
+ int len;
+ unsigned i;
+
+ PJ_ASSERT_RETURN(msg && buffer && length, NULL);
+
+ PJ_CHECK_STACK();
+
+ p = buffer;
+ end = buffer + length;
+
+ len = pj_ansi_snprintf(p, end-p, "STUN %s %s\n",
+ pj_stun_get_method_name(msg->hdr.type),
+ pj_stun_get_class_name(msg->hdr.type));
+ APPLY();
+
+ len = pj_ansi_snprintf(p, end-p,
+ " Hdr: length=%d, magic=%08x, tsx_id=%08x%08x%08x\n"
+ " Attributes:\n",
+ msg->hdr.length,
+ msg->hdr.magic,
+ *(pj_uint32_t*)&msg->hdr.tsx_id[0],
+ *(pj_uint32_t*)&msg->hdr.tsx_id[4],
+ *(pj_uint32_t*)&msg->hdr.tsx_id[8]);
+ APPLY();
+
+ for (i=0; i<msg->attr_count; ++i) {
+ len = print_attr(p, end-p, msg->attr[i]);
+ APPLY();
+ }
+
+on_return:
+ *p = '\0';
+ if (printed_len)
+ *printed_len = (p-buffer);
+ return buffer;
+
+#undef APPLY
+}
+
+
+#endif /* PJ_LOG_MAX_LEVEL > 0 */
+
diff --git a/pjnath/src/pjnath/stun_session.c b/pjnath/src/pjnath/stun_session.c
new file mode 100644
index 0000000..45d5313
--- /dev/null
+++ b/pjnath/src/pjnath/stun_session.c
@@ -0,0 +1,1436 @@
+/* $Id: stun_session.c 3843 2011-10-24 14:13:35Z bennylp $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/stun_session.h>
+#include <pjnath/errno.h>
+#include <pjlib.h>
+
+struct pj_stun_session
+{
+ pj_stun_config *cfg;
+ pj_pool_t *pool;
+ pj_lock_t *lock;
+ pj_bool_t delete_lock;
+ pj_stun_session_cb cb;
+ void *user_data;
+
+ pj_atomic_t *busy;
+ pj_bool_t destroy_request;
+
+ pj_bool_t use_fingerprint;
+
+ pj_pool_t *rx_pool;
+
+#if PJ_LOG_MAX_LEVEL >= 5
+ char dump_buf[1000];
+#endif
+ unsigned log_flag;
+
+ pj_stun_auth_type auth_type;
+ pj_stun_auth_cred cred;
+ int auth_retry;
+ pj_str_t next_nonce;
+ pj_str_t server_realm;
+
+ pj_str_t srv_name;
+
+ pj_stun_tx_data pending_request_list;
+ pj_stun_tx_data cached_response_list;
+};
+
+#define SNAME(s_) ((s_)->pool->obj_name)
+
+#if PJ_LOG_MAX_LEVEL >= 5
+# define TRACE_(expr) PJ_LOG(5,expr)
+#else
+# define TRACE_(expr)
+#endif
+
+#define LOG_ERR_(sess,title,rc) pjnath_perror(sess->pool->obj_name,title,rc)
+
+#define TDATA_POOL_SIZE PJNATH_POOL_LEN_STUN_TDATA
+#define TDATA_POOL_INC PJNATH_POOL_INC_STUN_TDATA
+
+
+static void stun_tsx_on_complete(pj_stun_client_tsx *tsx,
+ pj_status_t status,
+ const pj_stun_msg *response,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len);
+static pj_status_t stun_tsx_on_send_msg(pj_stun_client_tsx *tsx,
+ const void *stun_pkt,
+ pj_size_t pkt_size);
+static void stun_tsx_on_destroy(pj_stun_client_tsx *tsx);
+
+static pj_stun_tsx_cb tsx_cb =
+{
+ &stun_tsx_on_complete,
+ &stun_tsx_on_send_msg,
+ &stun_tsx_on_destroy
+};
+
+
+static pj_status_t tsx_add(pj_stun_session *sess,
+ pj_stun_tx_data *tdata)
+{
+ pj_list_push_front(&sess->pending_request_list, tdata);
+ return PJ_SUCCESS;
+}
+
+static pj_status_t tsx_erase(pj_stun_session *sess,
+ pj_stun_tx_data *tdata)
+{
+ PJ_UNUSED_ARG(sess);
+ pj_list_erase(tdata);
+ return PJ_SUCCESS;
+}
+
+static pj_stun_tx_data* tsx_lookup(pj_stun_session *sess,
+ const pj_stun_msg *msg)
+{
+ pj_stun_tx_data *tdata;
+
+ tdata = sess->pending_request_list.next;
+ while (tdata != &sess->pending_request_list) {
+ pj_assert(sizeof(tdata->msg_key)==sizeof(msg->hdr.tsx_id));
+ if (tdata->msg_magic == msg->hdr.magic &&
+ pj_memcmp(tdata->msg_key, msg->hdr.tsx_id,
+ sizeof(msg->hdr.tsx_id))==0)
+ {
+ return tdata;
+ }
+ tdata = tdata->next;
+ }
+
+ return NULL;
+}
+
+static pj_status_t create_tdata(pj_stun_session *sess,
+ pj_stun_tx_data **p_tdata)
+{
+ pj_pool_t *pool;
+ pj_stun_tx_data *tdata;
+
+ /* Create pool and initialize basic tdata attributes */
+ pool = pj_pool_create(sess->cfg->pf, "tdata%p",
+ TDATA_POOL_SIZE, TDATA_POOL_INC, NULL);
+ PJ_ASSERT_RETURN(pool, PJ_ENOMEM);
+
+ tdata = PJ_POOL_ZALLOC_T(pool, pj_stun_tx_data);
+ tdata->pool = pool;
+ tdata->sess = sess;
+
+ pj_list_init(tdata);
+
+ *p_tdata = tdata;
+
+ return PJ_SUCCESS;
+}
+
+static void stun_tsx_on_destroy(pj_stun_client_tsx *tsx)
+{
+ pj_stun_tx_data *tdata;
+
+ tdata = (pj_stun_tx_data*) pj_stun_client_tsx_get_data(tsx);
+ tsx_erase(tdata->sess, tdata);
+
+ pj_stun_client_tsx_destroy(tsx);
+ pj_pool_release(tdata->pool);
+}
+
+static void destroy_tdata(pj_stun_tx_data *tdata, pj_bool_t force)
+{
+ if (tdata->res_timer.id != PJ_FALSE) {
+ pj_timer_heap_cancel(tdata->sess->cfg->timer_heap,
+ &tdata->res_timer);
+ tdata->res_timer.id = PJ_FALSE;
+ pj_list_erase(tdata);
+ }
+
+ if (force) {
+ if (tdata->client_tsx) {
+ tsx_erase(tdata->sess, tdata);
+ pj_stun_client_tsx_destroy(tdata->client_tsx);
+ }
+ pj_pool_release(tdata->pool);
+
+ } else {
+ if (tdata->client_tsx) {
+ pj_time_val delay = {2, 0};
+ pj_stun_client_tsx_schedule_destroy(tdata->client_tsx, &delay);
+
+ } else {
+ pj_pool_release(tdata->pool);
+ }
+ }
+}
+
+/*
+ * Destroy the transmit data.
+ */
+PJ_DEF(void) pj_stun_msg_destroy_tdata( pj_stun_session *sess,
+ pj_stun_tx_data *tdata)
+{
+ PJ_UNUSED_ARG(sess);
+ destroy_tdata(tdata, PJ_FALSE);
+}
+
+
+/* Timer callback to be called when it's time to destroy response cache */
+static void on_cache_timeout(pj_timer_heap_t *timer_heap,
+ struct pj_timer_entry *entry)
+{
+ pj_stun_tx_data *tdata;
+
+ PJ_UNUSED_ARG(timer_heap);
+
+ entry->id = PJ_FALSE;
+ tdata = (pj_stun_tx_data*) entry->user_data;
+
+ PJ_LOG(5,(SNAME(tdata->sess), "Response cache deleted"));
+
+ pj_list_erase(tdata);
+ pj_stun_msg_destroy_tdata(tdata->sess, tdata);
+}
+
+static pj_status_t apply_msg_options(pj_stun_session *sess,
+ pj_pool_t *pool,
+ const pj_stun_req_cred_info *auth_info,
+ pj_stun_msg *msg)
+{
+ pj_status_t status = 0;
+ pj_str_t realm, username, nonce, auth_key;
+
+ /* If the agent is sending a request, it SHOULD add a SOFTWARE attribute
+ * to the request. The server SHOULD include a SOFTWARE attribute in all
+ * responses.
+ *
+ * If magic value is not PJ_STUN_MAGIC, only apply the attribute for
+ * responses.
+ */
+ if (sess->srv_name.slen &&
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_SOFTWARE, 0)==NULL &&
+ (PJ_STUN_IS_RESPONSE(msg->hdr.type) ||
+ (PJ_STUN_IS_REQUEST(msg->hdr.type) && msg->hdr.magic==PJ_STUN_MAGIC)))
+ {
+ pj_stun_msg_add_string_attr(pool, msg, PJ_STUN_ATTR_SOFTWARE,
+ &sess->srv_name);
+ }
+
+ if (pj_stun_auth_valid_for_msg(msg) && auth_info) {
+ realm = auth_info->realm;
+ username = auth_info->username;
+ nonce = auth_info->nonce;
+ auth_key = auth_info->auth_key;
+ } else {
+ realm.slen = username.slen = nonce.slen = auth_key.slen = 0;
+ }
+
+ /* Create and add USERNAME attribute if needed */
+ if (username.slen && PJ_STUN_IS_REQUEST(msg->hdr.type)) {
+ status = pj_stun_msg_add_string_attr(pool, msg,
+ PJ_STUN_ATTR_USERNAME,
+ &username);
+ PJ_ASSERT_RETURN(status==PJ_SUCCESS, status);
+ }
+
+ /* Add REALM only when long term credential is used */
+ if (realm.slen && PJ_STUN_IS_REQUEST(msg->hdr.type)) {
+ status = pj_stun_msg_add_string_attr(pool, msg,
+ PJ_STUN_ATTR_REALM,
+ &realm);
+ PJ_ASSERT_RETURN(status==PJ_SUCCESS, status);
+ }
+
+ /* Add NONCE when desired */
+ if (nonce.slen &&
+ (PJ_STUN_IS_REQUEST(msg->hdr.type) ||
+ PJ_STUN_IS_ERROR_RESPONSE(msg->hdr.type)))
+ {
+ status = pj_stun_msg_add_string_attr(pool, msg,
+ PJ_STUN_ATTR_NONCE,
+ &nonce);
+ }
+
+ /* Add MESSAGE-INTEGRITY attribute */
+ if (username.slen && auth_key.slen) {
+ status = pj_stun_msg_add_msgint_attr(pool, msg);
+ PJ_ASSERT_RETURN(status==PJ_SUCCESS, status);
+ }
+
+
+ /* Add FINGERPRINT attribute if necessary */
+ if (sess->use_fingerprint) {
+ status = pj_stun_msg_add_uint_attr(pool, msg,
+ PJ_STUN_ATTR_FINGERPRINT, 0);
+ PJ_ASSERT_RETURN(status==PJ_SUCCESS, status);
+ }
+
+ return PJ_SUCCESS;
+}
+
+static pj_status_t handle_auth_challenge(pj_stun_session *sess,
+ const pj_stun_tx_data *request,
+ const pj_stun_msg *response,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len,
+ pj_bool_t *notify_user)
+{
+ const pj_stun_errcode_attr *ea;
+
+ *notify_user = PJ_TRUE;
+
+ if (response==NULL)
+ return PJ_SUCCESS;
+
+ if (sess->auth_type != PJ_STUN_AUTH_LONG_TERM)
+ return PJ_SUCCESS;
+
+ if (!PJ_STUN_IS_ERROR_RESPONSE(response->hdr.type)) {
+ sess->auth_retry = 0;
+ return PJ_SUCCESS;
+ }
+
+ ea = (const pj_stun_errcode_attr*)
+ pj_stun_msg_find_attr(response, PJ_STUN_ATTR_ERROR_CODE, 0);
+ if (!ea) {
+ PJ_LOG(4,(SNAME(sess), "Invalid error response: no ERROR-CODE"
+ " attribute"));
+ *notify_user = PJ_FALSE;
+ return PJNATH_EINSTUNMSG;
+ }
+
+ if (ea->err_code == PJ_STUN_SC_UNAUTHORIZED ||
+ ea->err_code == PJ_STUN_SC_STALE_NONCE)
+ {
+ const pj_stun_nonce_attr *anonce;
+ const pj_stun_realm_attr *arealm;
+ pj_stun_tx_data *tdata;
+ unsigned i;
+ pj_status_t status;
+
+ anonce = (const pj_stun_nonce_attr*)
+ pj_stun_msg_find_attr(response, PJ_STUN_ATTR_NONCE, 0);
+ if (!anonce) {
+ PJ_LOG(4,(SNAME(sess), "Invalid response: missing NONCE"));
+ *notify_user = PJ_FALSE;
+ return PJNATH_EINSTUNMSG;
+ }
+
+ /* Bail out if we've supplied the correct nonce */
+ if (pj_strcmp(&anonce->value, &sess->next_nonce)==0) {
+ return PJ_SUCCESS;
+ }
+
+ /* Bail out if we've tried too many */
+ if (++sess->auth_retry > 3) {
+ PJ_LOG(4,(SNAME(sess), "Error: authentication failed (too "
+ "many retries)"));
+ return PJ_STATUS_FROM_STUN_CODE(401);
+ }
+
+ /* Save next_nonce */
+ pj_strdup(sess->pool, &sess->next_nonce, &anonce->value);
+
+ /* Copy the realm from the response */
+ arealm = (pj_stun_realm_attr*)
+ pj_stun_msg_find_attr(response, PJ_STUN_ATTR_REALM, 0);
+ if (arealm) {
+ pj_strdup(sess->pool, &sess->server_realm, &arealm->value);
+ while (sess->server_realm.slen &&
+ !sess->server_realm.ptr[sess->server_realm.slen-1])
+ {
+ --sess->server_realm.slen;
+ }
+ }
+
+ /* Create new request */
+ status = pj_stun_session_create_req(sess, request->msg->hdr.type,
+ request->msg->hdr.magic,
+ NULL, &tdata);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ /* Duplicate all the attributes in the old request, except
+ * USERNAME, REALM, M-I, and NONCE, which will be filled in
+ * later.
+ */
+ for (i=0; i<request->msg->attr_count; ++i) {
+ const pj_stun_attr_hdr *asrc = request->msg->attr[i];
+
+ if (asrc->type == PJ_STUN_ATTR_USERNAME ||
+ asrc->type == PJ_STUN_ATTR_REALM ||
+ asrc->type == PJ_STUN_ATTR_MESSAGE_INTEGRITY ||
+ asrc->type == PJ_STUN_ATTR_NONCE)
+ {
+ continue;
+ }
+
+ tdata->msg->attr[tdata->msg->attr_count++] =
+ pj_stun_attr_clone(tdata->pool, asrc);
+ }
+
+ /* Will retry the request with authentication, no need to
+ * notify user.
+ */
+ *notify_user = PJ_FALSE;
+
+ PJ_LOG(4,(SNAME(sess), "Retrying request with new authentication"));
+
+ /* Retry the request */
+ status = pj_stun_session_send_msg(sess, request->token, PJ_TRUE,
+ request->retransmit, src_addr,
+ src_addr_len, tdata);
+
+ } else {
+ sess->auth_retry = 0;
+ }
+
+ return PJ_SUCCESS;
+}
+
+static void stun_tsx_on_complete(pj_stun_client_tsx *tsx,
+ pj_status_t status,
+ const pj_stun_msg *response,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_stun_session *sess;
+ pj_bool_t notify_user = PJ_TRUE;
+ pj_stun_tx_data *tdata;
+
+ tdata = (pj_stun_tx_data*) pj_stun_client_tsx_get_data(tsx);
+ sess = tdata->sess;
+
+ /* Lock the session and prevent user from destroying us in the callback */
+ pj_atomic_inc(sess->busy);
+ pj_lock_acquire(sess->lock);
+
+ /* Handle authentication challenge */
+ handle_auth_challenge(sess, tdata, response, src_addr,
+ src_addr_len, &notify_user);
+
+ if (notify_user && sess->cb.on_request_complete) {
+ (*sess->cb.on_request_complete)(sess, status, tdata->token, tdata,
+ response, src_addr, src_addr_len);
+ }
+
+ /* Destroy the transmit data. This will remove the transaction
+ * from the pending list too.
+ */
+ pj_stun_msg_destroy_tdata(sess, tdata);
+ tdata = NULL;
+
+ pj_lock_release(sess->lock);
+
+ if (pj_atomic_dec_and_get(sess->busy)==0 && sess->destroy_request) {
+ pj_stun_session_destroy(sess);
+ return;
+ }
+}
+
+static pj_status_t stun_tsx_on_send_msg(pj_stun_client_tsx *tsx,
+ const void *stun_pkt,
+ pj_size_t pkt_size)
+{
+ pj_stun_tx_data *tdata;
+ pj_stun_session *sess;
+ pj_status_t status;
+
+ tdata = (pj_stun_tx_data*) pj_stun_client_tsx_get_data(tsx);
+ sess = tdata->sess;
+
+ /* Lock the session and prevent user from destroying us in the callback */
+ pj_atomic_inc(sess->busy);
+ pj_lock_acquire(sess->lock);
+
+ status = sess->cb.on_send_msg(tdata->sess, tdata->token, stun_pkt,
+ pkt_size, tdata->dst_addr,
+ tdata->addr_len);
+ pj_lock_release(sess->lock);
+
+ if (pj_atomic_dec_and_get(sess->busy)==0 && sess->destroy_request) {
+ pj_stun_session_destroy(sess);
+ return PJNATH_ESTUNDESTROYED;
+ } else {
+ return status;
+ }
+}
+
+/* **************************************************************************/
+
+PJ_DEF(pj_status_t) pj_stun_session_create( pj_stun_config *cfg,
+ const char *name,
+ const pj_stun_session_cb *cb,
+ pj_bool_t fingerprint,
+ pj_stun_session **p_sess)
+{
+ pj_pool_t *pool;
+ pj_stun_session *sess;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(cfg && cb && p_sess, PJ_EINVAL);
+
+ if (name==NULL)
+ name = "stuse%p";
+
+ pool = pj_pool_create(cfg->pf, name, PJNATH_POOL_LEN_STUN_SESS,
+ PJNATH_POOL_INC_STUN_SESS, NULL);
+ PJ_ASSERT_RETURN(pool, PJ_ENOMEM);
+
+ sess = PJ_POOL_ZALLOC_T(pool, pj_stun_session);
+ sess->cfg = cfg;
+ sess->pool = pool;
+ pj_memcpy(&sess->cb, cb, sizeof(*cb));
+ sess->use_fingerprint = fingerprint;
+ sess->log_flag = 0xFFFF;
+
+ sess->srv_name.ptr = (char*) pj_pool_alloc(pool, 32);
+ sess->srv_name.slen = pj_ansi_snprintf(sess->srv_name.ptr, 32,
+ "pjnath-%s", pj_get_version());
+
+ sess->rx_pool = pj_pool_create(sess->cfg->pf, name,
+ PJNATH_POOL_LEN_STUN_TDATA,
+ PJNATH_POOL_INC_STUN_TDATA, NULL);
+
+ pj_list_init(&sess->pending_request_list);
+ pj_list_init(&sess->cached_response_list);
+
+ status = pj_lock_create_recursive_mutex(pool, name, &sess->lock);
+ if (status != PJ_SUCCESS) {
+ pj_pool_release(pool);
+ return status;
+ }
+ sess->delete_lock = PJ_TRUE;
+
+ status = pj_atomic_create(pool, 0, &sess->busy);
+ if (status != PJ_SUCCESS) {
+ pj_lock_destroy(sess->lock);
+ pj_pool_release(pool);
+ return status;
+ }
+
+ *p_sess = sess;
+
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_stun_session_destroy(pj_stun_session *sess)
+{
+ PJ_ASSERT_RETURN(sess, PJ_EINVAL);
+
+ pj_lock_acquire(sess->lock);
+
+ /* Can't destroy if we're in a callback */
+ sess->destroy_request = PJ_TRUE;
+ if (pj_atomic_get(sess->busy)) {
+ pj_lock_release(sess->lock);
+ return PJ_EPENDING;
+ }
+
+ while (!pj_list_empty(&sess->pending_request_list)) {
+ pj_stun_tx_data *tdata = sess->pending_request_list.next;
+ destroy_tdata(tdata, PJ_TRUE);
+ }
+
+ while (!pj_list_empty(&sess->cached_response_list)) {
+ pj_stun_tx_data *tdata = sess->cached_response_list.next;
+ destroy_tdata(tdata, PJ_TRUE);
+ }
+ pj_lock_release(sess->lock);
+
+ if (sess->delete_lock) {
+ pj_lock_destroy(sess->lock);
+ }
+
+ if (sess->rx_pool) {
+ pj_pool_release(sess->rx_pool);
+ sess->rx_pool = NULL;
+ }
+
+ pj_pool_release(sess->pool);
+
+ return PJ_SUCCESS;
+}
+
+
+PJ_DEF(pj_status_t) pj_stun_session_set_user_data( pj_stun_session *sess,
+ void *user_data)
+{
+ PJ_ASSERT_RETURN(sess, PJ_EINVAL);
+ pj_lock_acquire(sess->lock);
+ sess->user_data = user_data;
+ pj_lock_release(sess->lock);
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(void*) pj_stun_session_get_user_data(pj_stun_session *sess)
+{
+ PJ_ASSERT_RETURN(sess, NULL);
+ return sess->user_data;
+}
+
+PJ_DEF(pj_status_t) pj_stun_session_set_lock( pj_stun_session *sess,
+ pj_lock_t *lock,
+ pj_bool_t auto_del)
+{
+ pj_lock_t *old_lock = sess->lock;
+ pj_bool_t old_del;
+
+ PJ_ASSERT_RETURN(sess && lock, PJ_EINVAL);
+
+ pj_lock_acquire(old_lock);
+ sess->lock = lock;
+ old_del = sess->delete_lock;
+ sess->delete_lock = auto_del;
+ pj_lock_release(old_lock);
+
+ if (old_lock)
+ pj_lock_destroy(old_lock);
+
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_stun_session_set_software_name(pj_stun_session *sess,
+ const pj_str_t *sw)
+{
+ PJ_ASSERT_RETURN(sess, PJ_EINVAL);
+ if (sw && sw->slen)
+ pj_strdup(sess->pool, &sess->srv_name, sw);
+ else
+ sess->srv_name.slen = 0;
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_stun_session_set_credential(pj_stun_session *sess,
+ pj_stun_auth_type auth_type,
+ const pj_stun_auth_cred *cred)
+{
+ PJ_ASSERT_RETURN(sess, PJ_EINVAL);
+
+ sess->auth_type = auth_type;
+ if (cred) {
+ pj_stun_auth_cred_dup(sess->pool, &sess->cred, cred);
+ } else {
+ sess->auth_type = PJ_STUN_AUTH_NONE;
+ pj_bzero(&sess->cred, sizeof(sess->cred));
+ }
+
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(void) pj_stun_session_set_log( pj_stun_session *sess,
+ unsigned flags)
+{
+ PJ_ASSERT_ON_FAIL(sess, return);
+ sess->log_flag = flags;
+}
+
+PJ_DEF(pj_bool_t) pj_stun_session_use_fingerprint(pj_stun_session *sess,
+ pj_bool_t use)
+{
+ pj_bool_t old_use;
+
+ PJ_ASSERT_RETURN(sess, PJ_FALSE);
+
+ old_use = sess->use_fingerprint;
+ sess->use_fingerprint = use;
+ return old_use;
+}
+
+static pj_status_t get_auth(pj_stun_session *sess,
+ pj_stun_tx_data *tdata)
+{
+ if (sess->cred.type == PJ_STUN_AUTH_CRED_STATIC) {
+ //tdata->auth_info.realm = sess->cred.data.static_cred.realm;
+ tdata->auth_info.realm = sess->server_realm;
+ tdata->auth_info.username = sess->cred.data.static_cred.username;
+ tdata->auth_info.nonce = sess->cred.data.static_cred.nonce;
+
+ pj_stun_create_key(tdata->pool, &tdata->auth_info.auth_key,
+ &tdata->auth_info.realm,
+ &tdata->auth_info.username,
+ sess->cred.data.static_cred.data_type,
+ &sess->cred.data.static_cred.data);
+
+ } else if (sess->cred.type == PJ_STUN_AUTH_CRED_DYNAMIC) {
+ pj_str_t password;
+ void *user_data = sess->cred.data.dyn_cred.user_data;
+ pj_stun_passwd_type data_type = PJ_STUN_PASSWD_PLAIN;
+ pj_status_t rc;
+
+ rc = (*sess->cred.data.dyn_cred.get_cred)(tdata->msg, user_data,
+ tdata->pool,
+ &tdata->auth_info.realm,
+ &tdata->auth_info.username,
+ &tdata->auth_info.nonce,
+ &data_type, &password);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ pj_stun_create_key(tdata->pool, &tdata->auth_info.auth_key,
+ &tdata->auth_info.realm, &tdata->auth_info.username,
+ data_type, &password);
+
+ } else {
+ pj_assert(!"Unknown credential type");
+ return PJ_EBUG;
+ }
+
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_stun_session_create_req(pj_stun_session *sess,
+ int method,
+ pj_uint32_t magic,
+ const pj_uint8_t tsx_id[12],
+ pj_stun_tx_data **p_tdata)
+{
+ pj_stun_tx_data *tdata = NULL;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(sess && p_tdata, PJ_EINVAL);
+
+ status = create_tdata(sess, &tdata);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ /* Create STUN message */
+ status = pj_stun_msg_create(tdata->pool, method, magic,
+ tsx_id, &tdata->msg);
+ if (status != PJ_SUCCESS) {
+ pj_pool_release(tdata->pool);
+ return status;
+ }
+
+ /* copy the request's transaction ID as the transaction key. */
+ pj_assert(sizeof(tdata->msg_key)==sizeof(tdata->msg->hdr.tsx_id));
+ tdata->msg_magic = tdata->msg->hdr.magic;
+ pj_memcpy(tdata->msg_key, tdata->msg->hdr.tsx_id,
+ sizeof(tdata->msg->hdr.tsx_id));
+
+
+ /* Get authentication information for the request */
+ if (sess->auth_type == PJ_STUN_AUTH_NONE) {
+ /* No authentication */
+
+ } else if (sess->auth_type == PJ_STUN_AUTH_SHORT_TERM) {
+ /* MUST put authentication in request */
+ status = get_auth(sess, tdata);
+ if (status != PJ_SUCCESS) {
+ pj_pool_release(tdata->pool);
+ return status;
+ }
+
+ } else if (sess->auth_type == PJ_STUN_AUTH_LONG_TERM) {
+ /* Only put authentication information if we've received
+ * response from server.
+ */
+ if (sess->next_nonce.slen != 0) {
+ status = get_auth(sess, tdata);
+ if (status != PJ_SUCCESS) {
+ pj_pool_release(tdata->pool);
+ return status;
+ }
+ tdata->auth_info.nonce = sess->next_nonce;
+ tdata->auth_info.realm = sess->server_realm;
+ }
+
+ } else {
+ pj_assert(!"Invalid authentication type");
+ pj_pool_release(tdata->pool);
+ return PJ_EBUG;
+ }
+
+ *p_tdata = tdata;
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_stun_session_create_ind(pj_stun_session *sess,
+ int msg_type,
+ pj_stun_tx_data **p_tdata)
+{
+ pj_stun_tx_data *tdata = NULL;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(sess && p_tdata, PJ_EINVAL);
+
+ status = create_tdata(sess, &tdata);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ /* Create STUN message */
+ msg_type |= PJ_STUN_INDICATION_BIT;
+ status = pj_stun_msg_create(tdata->pool, msg_type, PJ_STUN_MAGIC,
+ NULL, &tdata->msg);
+ if (status != PJ_SUCCESS) {
+ pj_pool_release(tdata->pool);
+ return status;
+ }
+
+ *p_tdata = tdata;
+ return PJ_SUCCESS;
+}
+
+/*
+ * Create a STUN response message.
+ */
+PJ_DEF(pj_status_t) pj_stun_session_create_res( pj_stun_session *sess,
+ const pj_stun_rx_data *rdata,
+ unsigned err_code,
+ const pj_str_t *err_msg,
+ pj_stun_tx_data **p_tdata)
+{
+ pj_status_t status;
+ pj_stun_tx_data *tdata = NULL;
+
+ status = create_tdata(sess, &tdata);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ /* Create STUN response message */
+ status = pj_stun_msg_create_response(tdata->pool, rdata->msg,
+ err_code, err_msg, &tdata->msg);
+ if (status != PJ_SUCCESS) {
+ pj_pool_release(tdata->pool);
+ return status;
+ }
+
+ /* copy the request's transaction ID as the transaction key. */
+ pj_assert(sizeof(tdata->msg_key)==sizeof(rdata->msg->hdr.tsx_id));
+ tdata->msg_magic = rdata->msg->hdr.magic;
+ pj_memcpy(tdata->msg_key, rdata->msg->hdr.tsx_id,
+ sizeof(rdata->msg->hdr.tsx_id));
+
+ /* copy the credential found in the request */
+ pj_stun_req_cred_info_dup(tdata->pool, &tdata->auth_info, &rdata->info);
+
+ *p_tdata = tdata;
+
+ return PJ_SUCCESS;
+}
+
+
+/* Print outgoing message to log */
+static void dump_tx_msg(pj_stun_session *sess, const pj_stun_msg *msg,
+ unsigned pkt_size, const pj_sockaddr_t *addr)
+{
+ char dst_name[PJ_INET6_ADDRSTRLEN+10];
+
+ if ((PJ_STUN_IS_REQUEST(msg->hdr.type) &&
+ (sess->log_flag & PJ_STUN_SESS_LOG_TX_REQ)==0) ||
+ (PJ_STUN_IS_RESPONSE(msg->hdr.type) &&
+ (sess->log_flag & PJ_STUN_SESS_LOG_TX_RES)==0) ||
+ (PJ_STUN_IS_INDICATION(msg->hdr.type) &&
+ (sess->log_flag & PJ_STUN_SESS_LOG_TX_IND)==0))
+ {
+ return;
+ }
+
+ pj_sockaddr_print(addr, dst_name, sizeof(dst_name), 3);
+
+ PJ_LOG(5,(SNAME(sess),
+ "TX %d bytes STUN message to %s:\n"
+ "--- begin STUN message ---\n"
+ "%s"
+ "--- end of STUN message ---\n",
+ pkt_size, dst_name,
+ pj_stun_msg_dump(msg, sess->dump_buf, sizeof(sess->dump_buf),
+ NULL)));
+
+}
+
+
+PJ_DEF(pj_status_t) pj_stun_session_send_msg( pj_stun_session *sess,
+ void *token,
+ pj_bool_t cache_res,
+ pj_bool_t retransmit,
+ const pj_sockaddr_t *server,
+ unsigned addr_len,
+ pj_stun_tx_data *tdata)
+{
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(sess && addr_len && server && tdata, PJ_EINVAL);
+
+ pj_log_push_indent();
+
+ /* Allocate packet */
+ tdata->max_len = PJ_STUN_MAX_PKT_LEN;
+ tdata->pkt = pj_pool_alloc(tdata->pool, tdata->max_len);
+
+ tdata->token = token;
+ tdata->retransmit = retransmit;
+
+ /* Lock the session and prevent user from destroying us in the callback */
+ pj_atomic_inc(sess->busy);
+ pj_lock_acquire(sess->lock);
+
+ /* Apply options */
+ status = apply_msg_options(sess, tdata->pool, &tdata->auth_info,
+ tdata->msg);
+ if (status != PJ_SUCCESS) {
+ pj_stun_msg_destroy_tdata(sess, tdata);
+ LOG_ERR_(sess, "Error applying options", status);
+ goto on_return;
+ }
+
+ /* Encode message */
+ status = pj_stun_msg_encode(tdata->msg, (pj_uint8_t*)tdata->pkt,
+ tdata->max_len, 0,
+ &tdata->auth_info.auth_key,
+ &tdata->pkt_size);
+ if (status != PJ_SUCCESS) {
+ pj_stun_msg_destroy_tdata(sess, tdata);
+ LOG_ERR_(sess, "STUN encode() error", status);
+ goto on_return;
+ }
+
+ /* Dump packet */
+ dump_tx_msg(sess, tdata->msg, tdata->pkt_size, server);
+
+ /* If this is a STUN request message, then send the request with
+ * a new STUN client transaction.
+ */
+ if (PJ_STUN_IS_REQUEST(tdata->msg->hdr.type)) {
+
+ /* Create STUN client transaction */
+ status = pj_stun_client_tsx_create(sess->cfg, tdata->pool,
+ &tsx_cb, &tdata->client_tsx);
+ PJ_ASSERT_RETURN(status==PJ_SUCCESS, status);
+ pj_stun_client_tsx_set_data(tdata->client_tsx, (void*)tdata);
+
+ /* Save the remote address */
+ tdata->addr_len = addr_len;
+ tdata->dst_addr = server;
+
+ /* Send the request! */
+ status = pj_stun_client_tsx_send_msg(tdata->client_tsx, retransmit,
+ tdata->pkt, tdata->pkt_size);
+ if (status != PJ_SUCCESS && status != PJ_EPENDING) {
+ pj_stun_msg_destroy_tdata(sess, tdata);
+ LOG_ERR_(sess, "Error sending STUN request", status);
+ goto on_return;
+ }
+
+ /* Add to pending request list */
+ tsx_add(sess, tdata);
+
+ } else {
+ if (cache_res &&
+ (PJ_STUN_IS_SUCCESS_RESPONSE(tdata->msg->hdr.type) ||
+ PJ_STUN_IS_ERROR_RESPONSE(tdata->msg->hdr.type)))
+ {
+ /* Requested to keep the response in the cache */
+ pj_time_val timeout;
+
+ pj_memset(&tdata->res_timer, 0, sizeof(tdata->res_timer));
+ pj_timer_entry_init(&tdata->res_timer, PJ_TRUE, tdata,
+ &on_cache_timeout);
+
+ timeout.sec = sess->cfg->res_cache_msec / 1000;
+ timeout.msec = sess->cfg->res_cache_msec % 1000;
+
+ status = pj_timer_heap_schedule(sess->cfg->timer_heap,
+ &tdata->res_timer,
+ &timeout);
+ if (status != PJ_SUCCESS) {
+ tdata->res_timer.id = PJ_FALSE;
+ pj_stun_msg_destroy_tdata(sess, tdata);
+ LOG_ERR_(sess, "Error scheduling response timer", status);
+ goto on_return;
+ }
+
+ pj_list_push_back(&sess->cached_response_list, tdata);
+ }
+
+ /* Otherwise for non-request message, send directly to transport. */
+ status = sess->cb.on_send_msg(sess, token, tdata->pkt,
+ tdata->pkt_size, server, addr_len);
+
+ if (status != PJ_SUCCESS && status != PJ_EPENDING) {
+ pj_stun_msg_destroy_tdata(sess, tdata);
+ LOG_ERR_(sess, "Error sending STUN request", status);
+ goto on_return;
+ }
+
+ /* Destroy only when response is not cached*/
+ if (tdata->res_timer.id == 0) {
+ pj_stun_msg_destroy_tdata(sess, tdata);
+ }
+ }
+
+on_return:
+ pj_lock_release(sess->lock);
+
+ pj_log_pop_indent();
+
+ /* Check if application has called destroy() in the callback */
+ if (pj_atomic_dec_and_get(sess->busy)==0 && sess->destroy_request) {
+ pj_stun_session_destroy(sess);
+ return PJNATH_ESTUNDESTROYED;
+ }
+
+ return status;
+}
+
+
+/*
+ * Create and send STUN response message.
+ */
+PJ_DEF(pj_status_t) pj_stun_session_respond( pj_stun_session *sess,
+ const pj_stun_rx_data *rdata,
+ unsigned code,
+ const char *errmsg,
+ void *token,
+ pj_bool_t cache,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len)
+{
+ pj_status_t status;
+ pj_str_t reason;
+ pj_stun_tx_data *tdata;
+
+ status = pj_stun_session_create_res(sess, rdata, code,
+ (errmsg?pj_cstr(&reason,errmsg):NULL),
+ &tdata);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ return pj_stun_session_send_msg(sess, token, cache, PJ_FALSE,
+ dst_addr, addr_len, tdata);
+}
+
+
+/*
+ * Cancel outgoing STUN transaction.
+ */
+PJ_DEF(pj_status_t) pj_stun_session_cancel_req( pj_stun_session *sess,
+ pj_stun_tx_data *tdata,
+ pj_bool_t notify,
+ pj_status_t notify_status)
+{
+ PJ_ASSERT_RETURN(sess && tdata, PJ_EINVAL);
+ PJ_ASSERT_RETURN(!notify || notify_status!=PJ_SUCCESS, PJ_EINVAL);
+ PJ_ASSERT_RETURN(PJ_STUN_IS_REQUEST(tdata->msg->hdr.type), PJ_EINVAL);
+
+ /* Lock the session and prevent user from destroying us in the callback */
+ pj_atomic_inc(sess->busy);
+ pj_lock_acquire(sess->lock);
+
+ if (notify) {
+ (sess->cb.on_request_complete)(sess, notify_status, tdata->token,
+ tdata, NULL, NULL, 0);
+ }
+
+ /* Just destroy tdata. This will destroy the transaction as well */
+ pj_stun_msg_destroy_tdata(sess, tdata);
+
+ pj_lock_release(sess->lock);
+
+ if (pj_atomic_dec_and_get(sess->busy)==0 && sess->destroy_request) {
+ pj_stun_session_destroy(sess);
+ return PJNATH_ESTUNDESTROYED;
+ }
+
+ return PJ_SUCCESS;
+}
+
+/*
+ * Explicitly request retransmission of the request.
+ */
+PJ_DEF(pj_status_t) pj_stun_session_retransmit_req(pj_stun_session *sess,
+ pj_stun_tx_data *tdata)
+{
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(sess && tdata, PJ_EINVAL);
+ PJ_ASSERT_RETURN(PJ_STUN_IS_REQUEST(tdata->msg->hdr.type), PJ_EINVAL);
+
+ /* Lock the session and prevent user from destroying us in the callback */
+ pj_atomic_inc(sess->busy);
+ pj_lock_acquire(sess->lock);
+
+ status = pj_stun_client_tsx_retransmit(tdata->client_tsx);
+
+ pj_lock_release(sess->lock);
+
+ if (pj_atomic_dec_and_get(sess->busy)==0 && sess->destroy_request) {
+ pj_stun_session_destroy(sess);
+ return PJNATH_ESTUNDESTROYED;
+ }
+
+ return status;
+}
+
+
+/* Send response */
+static pj_status_t send_response(pj_stun_session *sess, void *token,
+ pj_pool_t *pool, pj_stun_msg *response,
+ const pj_stun_req_cred_info *auth_info,
+ pj_bool_t retransmission,
+ const pj_sockaddr_t *addr, unsigned addr_len)
+{
+ pj_uint8_t *out_pkt;
+ pj_size_t out_max_len, out_len;
+ pj_status_t status;
+
+ /* Apply options */
+ if (!retransmission) {
+ status = apply_msg_options(sess, pool, auth_info, response);
+ if (status != PJ_SUCCESS)
+ return status;
+ }
+
+ /* Alloc packet buffer */
+ out_max_len = PJ_STUN_MAX_PKT_LEN;
+ out_pkt = (pj_uint8_t*) pj_pool_alloc(pool, out_max_len);
+
+ /* Encode */
+ status = pj_stun_msg_encode(response, out_pkt, out_max_len, 0,
+ &auth_info->auth_key, &out_len);
+ if (status != PJ_SUCCESS) {
+ LOG_ERR_(sess, "Error encoding message", status);
+ return status;
+ }
+
+ /* Print log */
+ dump_tx_msg(sess, response, out_len, addr);
+
+ /* Send packet */
+ status = sess->cb.on_send_msg(sess, token, out_pkt, out_len,
+ addr, addr_len);
+
+ return status;
+}
+
+/* Authenticate incoming message */
+static pj_status_t authenticate_req(pj_stun_session *sess,
+ void *token,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ pj_stun_rx_data *rdata,
+ pj_pool_t *tmp_pool,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_stun_msg *response;
+ pj_status_t status;
+
+ if (PJ_STUN_IS_ERROR_RESPONSE(rdata->msg->hdr.type) ||
+ sess->auth_type == PJ_STUN_AUTH_NONE)
+ {
+ return PJ_SUCCESS;
+ }
+
+ status = pj_stun_authenticate_request(pkt, pkt_len, rdata->msg,
+ &sess->cred, tmp_pool, &rdata->info,
+ &response);
+ if (status != PJ_SUCCESS && response != NULL) {
+ PJ_LOG(5,(SNAME(sess), "Message authentication failed"));
+ send_response(sess, token, tmp_pool, response, &rdata->info,
+ PJ_FALSE, src_addr, src_addr_len);
+ }
+
+ return status;
+}
+
+
+/* Handle incoming response */
+static pj_status_t on_incoming_response(pj_stun_session *sess,
+ unsigned options,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ pj_stun_msg *msg,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_stun_tx_data *tdata;
+ pj_status_t status;
+
+ /* Lookup pending client transaction */
+ tdata = tsx_lookup(sess, msg);
+ if (tdata == NULL) {
+ PJ_LOG(5,(SNAME(sess),
+ "Transaction not found, response silently discarded"));
+ return PJ_SUCCESS;
+ }
+
+ if (sess->auth_type == PJ_STUN_AUTH_NONE)
+ options |= PJ_STUN_NO_AUTHENTICATE;
+
+ /* Authenticate the message, unless PJ_STUN_NO_AUTHENTICATE
+ * is specified in the option.
+ */
+ if ((options & PJ_STUN_NO_AUTHENTICATE) == 0 &&
+ tdata->auth_info.auth_key.slen != 0 &&
+ pj_stun_auth_valid_for_msg(msg))
+ {
+ status = pj_stun_authenticate_response(pkt, pkt_len, msg,
+ &tdata->auth_info.auth_key);
+ if (status != PJ_SUCCESS) {
+ PJ_LOG(5,(SNAME(sess),
+ "Response authentication failed"));
+ return status;
+ }
+ }
+
+ /* Pass the response to the transaction.
+ * If the message is accepted, transaction callback will be called,
+ * and this will call the session callback too.
+ */
+ status = pj_stun_client_tsx_on_rx_msg(tdata->client_tsx, msg,
+ src_addr, src_addr_len);
+ if (status != PJ_SUCCESS) {
+ return status;
+ }
+
+ return PJ_SUCCESS;
+}
+
+
+/* For requests, check if we cache the response */
+static pj_status_t check_cached_response(pj_stun_session *sess,
+ pj_pool_t *tmp_pool,
+ const pj_stun_msg *msg,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_stun_tx_data *t;
+
+ /* First lookup response in response cache */
+ t = sess->cached_response_list.next;
+ while (t != &sess->cached_response_list) {
+ if (t->msg_magic == msg->hdr.magic &&
+ t->msg->hdr.type == msg->hdr.type &&
+ pj_memcmp(t->msg_key, msg->hdr.tsx_id,
+ sizeof(msg->hdr.tsx_id))==0)
+ {
+ break;
+ }
+ t = t->next;
+ }
+
+ if (t != &sess->cached_response_list) {
+ /* Found response in the cache */
+
+ PJ_LOG(5,(SNAME(sess),
+ "Request retransmission, sending cached response"));
+
+ send_response(sess, t->token, tmp_pool, t->msg, &t->auth_info,
+ PJ_TRUE, src_addr, src_addr_len);
+ return PJ_SUCCESS;
+ }
+
+ return PJ_ENOTFOUND;
+}
+
+/* Handle incoming request */
+static pj_status_t on_incoming_request(pj_stun_session *sess,
+ unsigned options,
+ void *token,
+ pj_pool_t *tmp_pool,
+ const pj_uint8_t *in_pkt,
+ unsigned in_pkt_len,
+ pj_stun_msg *msg,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_stun_rx_data rdata;
+ pj_status_t status;
+
+ /* Init rdata */
+ rdata.msg = msg;
+ pj_bzero(&rdata.info, sizeof(rdata.info));
+
+ if (sess->auth_type == PJ_STUN_AUTH_NONE)
+ options |= PJ_STUN_NO_AUTHENTICATE;
+
+ /* Authenticate the message, unless PJ_STUN_NO_AUTHENTICATE
+ * is specified in the option.
+ */
+ if ((options & PJ_STUN_NO_AUTHENTICATE) == 0) {
+ status = authenticate_req(sess, token, (const pj_uint8_t*) in_pkt,
+ in_pkt_len,&rdata, tmp_pool, src_addr,
+ src_addr_len);
+ if (status != PJ_SUCCESS) {
+ return status;
+ }
+ }
+
+ /* Distribute to handler, or respond with Bad Request */
+ if (sess->cb.on_rx_request) {
+ status = (*sess->cb.on_rx_request)(sess, in_pkt, in_pkt_len, &rdata,
+ token, src_addr, src_addr_len);
+ } else {
+ pj_str_t err_text;
+ pj_stun_msg *response;
+
+ err_text = pj_str("Callback is not set to handle request");
+ status = pj_stun_msg_create_response(tmp_pool, msg,
+ PJ_STUN_SC_BAD_REQUEST,
+ &err_text, &response);
+ if (status == PJ_SUCCESS && response) {
+ status = send_response(sess, token, tmp_pool, response,
+ NULL, PJ_FALSE, src_addr, src_addr_len);
+ }
+ }
+
+ return status;
+}
+
+
+/* Handle incoming indication */
+static pj_status_t on_incoming_indication(pj_stun_session *sess,
+ void *token,
+ pj_pool_t *tmp_pool,
+ const pj_uint8_t *in_pkt,
+ unsigned in_pkt_len,
+ const pj_stun_msg *msg,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ PJ_UNUSED_ARG(tmp_pool);
+
+ /* Distribute to handler */
+ if (sess->cb.on_rx_indication) {
+ return (*sess->cb.on_rx_indication)(sess, in_pkt, in_pkt_len, msg,
+ token, src_addr, src_addr_len);
+ } else {
+ return PJ_SUCCESS;
+ }
+}
+
+
+/* Print outgoing message to log */
+static void dump_rx_msg(pj_stun_session *sess, const pj_stun_msg *msg,
+ unsigned pkt_size, const pj_sockaddr_t *addr)
+{
+ char src_info[PJ_INET6_ADDRSTRLEN+10];
+
+ if ((PJ_STUN_IS_REQUEST(msg->hdr.type) &&
+ (sess->log_flag & PJ_STUN_SESS_LOG_RX_REQ)==0) ||
+ (PJ_STUN_IS_RESPONSE(msg->hdr.type) &&
+ (sess->log_flag & PJ_STUN_SESS_LOG_RX_RES)==0) ||
+ (PJ_STUN_IS_INDICATION(msg->hdr.type) &&
+ (sess->log_flag & PJ_STUN_SESS_LOG_RX_IND)==0))
+ {
+ return;
+ }
+
+ pj_sockaddr_print(addr, src_info, sizeof(src_info), 3);
+
+ PJ_LOG(5,(SNAME(sess),
+ "RX %d bytes STUN message from %s:\n"
+ "--- begin STUN message ---\n"
+ "%s"
+ "--- end of STUN message ---\n",
+ pkt_size, src_info,
+ pj_stun_msg_dump(msg, sess->dump_buf, sizeof(sess->dump_buf),
+ NULL)));
+
+}
+
+/* Incoming packet */
+PJ_DEF(pj_status_t) pj_stun_session_on_rx_pkt(pj_stun_session *sess,
+ const void *packet,
+ pj_size_t pkt_size,
+ unsigned options,
+ void *token,
+ pj_size_t *parsed_len,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_stun_msg *msg, *response;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(sess && packet && pkt_size, PJ_EINVAL);
+
+ pj_log_push_indent();
+
+ /* Lock the session and prevent user from destroying us in the callback */
+ pj_atomic_inc(sess->busy);
+ pj_lock_acquire(sess->lock);
+
+ /* Reset pool */
+ pj_pool_reset(sess->rx_pool);
+
+ /* Try to parse the message */
+ status = pj_stun_msg_decode(sess->rx_pool, (const pj_uint8_t*)packet,
+ pkt_size, options,
+ &msg, parsed_len, &response);
+ if (status != PJ_SUCCESS) {
+ LOG_ERR_(sess, "STUN msg_decode() error", status);
+ if (response) {
+ send_response(sess, token, sess->rx_pool, response, NULL,
+ PJ_FALSE, src_addr, src_addr_len);
+ }
+ goto on_return;
+ }
+
+ dump_rx_msg(sess, msg, pkt_size, src_addr);
+
+ /* For requests, check if we have cached response */
+ status = check_cached_response(sess, sess->rx_pool, msg,
+ src_addr, src_addr_len);
+ if (status == PJ_SUCCESS) {
+ goto on_return;
+ }
+
+ /* Handle message */
+ if (PJ_STUN_IS_SUCCESS_RESPONSE(msg->hdr.type) ||
+ PJ_STUN_IS_ERROR_RESPONSE(msg->hdr.type))
+ {
+ status = on_incoming_response(sess, options,
+ (const pj_uint8_t*) packet, pkt_size,
+ msg, src_addr, src_addr_len);
+
+ } else if (PJ_STUN_IS_REQUEST(msg->hdr.type)) {
+
+ status = on_incoming_request(sess, options, token, sess->rx_pool,
+ (const pj_uint8_t*) packet, pkt_size,
+ msg, src_addr, src_addr_len);
+
+ } else if (PJ_STUN_IS_INDICATION(msg->hdr.type)) {
+
+ status = on_incoming_indication(sess, token, sess->rx_pool,
+ (const pj_uint8_t*) packet, pkt_size,
+ msg, src_addr, src_addr_len);
+
+ } else {
+ pj_assert(!"Unexpected!");
+ status = PJ_EBUG;
+ }
+
+on_return:
+ pj_lock_release(sess->lock);
+
+ pj_log_pop_indent();
+
+ /* If we've received destroy request while we're on the callback,
+ * destroy the session now.
+ */
+ if (pj_atomic_dec_and_get(sess->busy)==0 && sess->destroy_request) {
+ pj_stun_session_destroy(sess);
+ return PJNATH_ESTUNDESTROYED;
+ }
+
+ return status;
+}
+
diff --git a/pjnath/src/pjnath/stun_sock.c b/pjnath/src/pjnath/stun_sock.c
new file mode 100644
index 0000000..ff7dc16
--- /dev/null
+++ b/pjnath/src/pjnath/stun_sock.c
@@ -0,0 +1,856 @@
+/* $Id: stun_sock.c 3999 2012-03-30 07:10:13Z bennylp $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/stun_sock.h>
+#include <pjnath/errno.h>
+#include <pjnath/stun_transaction.h>
+#include <pjnath/stun_session.h>
+#include <pjlib-util/srv_resolver.h>
+#include <pj/activesock.h>
+#include <pj/addr_resolv.h>
+#include <pj/array.h>
+#include <pj/assert.h>
+#include <pj/ip_helper.h>
+#include <pj/log.h>
+#include <pj/pool.h>
+#include <pj/rand.h>
+
+
+struct pj_stun_sock
+{
+ char *obj_name; /* Log identification */
+ pj_pool_t *pool; /* Pool */
+ void *user_data; /* Application user data */
+
+ int af; /* Address family */
+ pj_stun_config stun_cfg; /* STUN config (ioqueue etc)*/
+ pj_stun_sock_cb cb; /* Application callbacks */
+
+ int ka_interval; /* Keep alive interval */
+ pj_timer_entry ka_timer; /* Keep alive timer. */
+
+ pj_sockaddr srv_addr; /* Resolved server addr */
+ pj_sockaddr mapped_addr; /* Our public address */
+
+ pj_dns_srv_async_query *q; /* Pending DNS query */
+ pj_sock_t sock_fd; /* Socket descriptor */
+ pj_activesock_t *active_sock; /* Active socket object */
+ pj_ioqueue_op_key_t send_key; /* Default send key for app */
+ pj_ioqueue_op_key_t int_send_key; /* Send key for internal */
+
+ pj_uint16_t tsx_id[6]; /* .. to match STUN msg */
+ pj_stun_session *stun_sess; /* STUN session */
+
+};
+
+/*
+ * Prototypes for static functions
+ */
+
+/* This callback is called by the STUN session to send packet */
+static pj_status_t sess_on_send_msg(pj_stun_session *sess,
+ void *token,
+ const void *pkt,
+ pj_size_t pkt_size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len);
+
+/* This callback is called by the STUN session when outgoing transaction
+ * is complete
+ */
+static void sess_on_request_complete(pj_stun_session *sess,
+ pj_status_t status,
+ void *token,
+ pj_stun_tx_data *tdata,
+ const pj_stun_msg *response,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len);
+/* DNS resolver callback */
+static void dns_srv_resolver_cb(void *user_data,
+ pj_status_t status,
+ const pj_dns_srv_record *rec);
+
+/* Start sending STUN Binding request */
+static pj_status_t get_mapped_addr(pj_stun_sock *stun_sock);
+
+/* Callback from active socket when incoming packet is received */
+static pj_bool_t on_data_recvfrom(pj_activesock_t *asock,
+ void *data,
+ pj_size_t size,
+ const pj_sockaddr_t *src_addr,
+ int addr_len,
+ pj_status_t status);
+
+/* Callback from active socket about send status */
+static pj_bool_t on_data_sent(pj_activesock_t *asock,
+ pj_ioqueue_op_key_t *send_key,
+ pj_ssize_t sent);
+
+/* Schedule keep-alive timer */
+static void start_ka_timer(pj_stun_sock *stun_sock);
+
+/* Keep-alive timer callback */
+static void ka_timer_cb(pj_timer_heap_t *th, pj_timer_entry *te);
+
+#define INTERNAL_MSG_TOKEN (void*)1
+
+
+/*
+ * Retrieve the name representing the specified operation.
+ */
+PJ_DEF(const char*) pj_stun_sock_op_name(pj_stun_sock_op op)
+{
+ const char *names[] = {
+ "?",
+ "DNS resolution",
+ "STUN Binding request",
+ "Keep-alive",
+ "Mapped addr. changed"
+ };
+
+ return op < PJ_ARRAY_SIZE(names) ? names[op] : "???";
+};
+
+
+/*
+ * Initialize the STUN transport setting with its default values.
+ */
+PJ_DEF(void) pj_stun_sock_cfg_default(pj_stun_sock_cfg *cfg)
+{
+ pj_bzero(cfg, sizeof(*cfg));
+ cfg->max_pkt_size = PJ_STUN_SOCK_PKT_LEN;
+ cfg->async_cnt = 1;
+ cfg->ka_interval = PJ_STUN_KEEP_ALIVE_SEC;
+ cfg->qos_type = PJ_QOS_TYPE_BEST_EFFORT;
+ cfg->qos_ignore_error = PJ_TRUE;
+}
+
+
+/* Check that configuration setting is valid */
+static pj_bool_t pj_stun_sock_cfg_is_valid(const pj_stun_sock_cfg *cfg)
+{
+ return cfg->max_pkt_size > 1 && cfg->async_cnt >= 1;
+}
+
+/*
+ * Create the STUN transport using the specified configuration.
+ */
+PJ_DEF(pj_status_t) pj_stun_sock_create( pj_stun_config *stun_cfg,
+ const char *name,
+ int af,
+ const pj_stun_sock_cb *cb,
+ const pj_stun_sock_cfg *cfg,
+ void *user_data,
+ pj_stun_sock **p_stun_sock)
+{
+ pj_pool_t *pool;
+ pj_stun_sock *stun_sock;
+ pj_stun_sock_cfg default_cfg;
+ unsigned i;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(stun_cfg && cb && p_stun_sock, PJ_EINVAL);
+ PJ_ASSERT_RETURN(af==pj_AF_INET()||af==pj_AF_INET6(), PJ_EAFNOTSUP);
+ PJ_ASSERT_RETURN(!cfg || pj_stun_sock_cfg_is_valid(cfg), PJ_EINVAL);
+ PJ_ASSERT_RETURN(cb->on_status, PJ_EINVAL);
+
+ status = pj_stun_config_check_valid(stun_cfg);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ if (name == NULL)
+ name = "stuntp%p";
+
+ if (cfg == NULL) {
+ pj_stun_sock_cfg_default(&default_cfg);
+ cfg = &default_cfg;
+ }
+
+
+ /* Create structure */
+ pool = pj_pool_create(stun_cfg->pf, name, 256, 512, NULL);
+ stun_sock = PJ_POOL_ZALLOC_T(pool, pj_stun_sock);
+ stun_sock->pool = pool;
+ stun_sock->obj_name = pool->obj_name;
+ stun_sock->user_data = user_data;
+ stun_sock->af = af;
+ stun_sock->sock_fd = PJ_INVALID_SOCKET;
+ pj_memcpy(&stun_sock->stun_cfg, stun_cfg, sizeof(*stun_cfg));
+ pj_memcpy(&stun_sock->cb, cb, sizeof(*cb));
+
+ stun_sock->ka_interval = cfg->ka_interval;
+ if (stun_sock->ka_interval == 0)
+ stun_sock->ka_interval = PJ_STUN_KEEP_ALIVE_SEC;
+
+ /* Create socket and bind socket */
+ status = pj_sock_socket(af, pj_SOCK_DGRAM(), 0, &stun_sock->sock_fd);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Apply QoS, if specified */
+ status = pj_sock_apply_qos2(stun_sock->sock_fd, cfg->qos_type,
+ &cfg->qos_params, 2, stun_sock->obj_name,
+ NULL);
+ if (status != PJ_SUCCESS && !cfg->qos_ignore_error)
+ goto on_error;
+
+ /* Bind socket */
+ if (pj_sockaddr_has_addr(&cfg->bound_addr)) {
+ status = pj_sock_bind(stun_sock->sock_fd, &cfg->bound_addr,
+ pj_sockaddr_get_len(&cfg->bound_addr));
+ } else {
+ pj_sockaddr bound_addr;
+
+ pj_sockaddr_init(af, &bound_addr, NULL, 0);
+ status = pj_sock_bind(stun_sock->sock_fd, &bound_addr,
+ pj_sockaddr_get_len(&bound_addr));
+ }
+
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Create more useful information string about this transport */
+#if 0
+ {
+ pj_sockaddr bound_addr;
+ int addr_len = sizeof(bound_addr);
+
+ status = pj_sock_getsockname(stun_sock->sock_fd, &bound_addr,
+ &addr_len);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ stun_sock->info = pj_pool_alloc(pool, PJ_INET6_ADDRSTRLEN+10);
+ pj_sockaddr_print(&bound_addr, stun_sock->info,
+ PJ_INET6_ADDRSTRLEN, 3);
+ }
+#endif
+
+ /* Init active socket configuration */
+ {
+ pj_activesock_cfg activesock_cfg;
+ pj_activesock_cb activesock_cb;
+
+ pj_activesock_cfg_default(&activesock_cfg);
+ activesock_cfg.async_cnt = cfg->async_cnt;
+ activesock_cfg.concurrency = 0;
+
+ /* Create the active socket */
+ pj_bzero(&activesock_cb, sizeof(activesock_cb));
+ activesock_cb.on_data_recvfrom = &on_data_recvfrom;
+ activesock_cb.on_data_sent = &on_data_sent;
+ status = pj_activesock_create(pool, stun_sock->sock_fd,
+ pj_SOCK_DGRAM(),
+ &activesock_cfg, stun_cfg->ioqueue,
+ &activesock_cb, stun_sock,
+ &stun_sock->active_sock);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Start asynchronous read operations */
+ status = pj_activesock_start_recvfrom(stun_sock->active_sock, pool,
+ cfg->max_pkt_size, 0);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Init send keys */
+ pj_ioqueue_op_key_init(&stun_sock->send_key,
+ sizeof(stun_sock->send_key));
+ pj_ioqueue_op_key_init(&stun_sock->int_send_key,
+ sizeof(stun_sock->int_send_key));
+ }
+
+ /* Create STUN session */
+ {
+ pj_stun_session_cb sess_cb;
+
+ pj_bzero(&sess_cb, sizeof(sess_cb));
+ sess_cb.on_request_complete = &sess_on_request_complete;
+ sess_cb.on_send_msg = &sess_on_send_msg;
+ status = pj_stun_session_create(&stun_sock->stun_cfg,
+ stun_sock->obj_name,
+ &sess_cb, PJ_FALSE,
+ &stun_sock->stun_sess);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+ }
+
+ /* Associate us with the STUN session */
+ pj_stun_session_set_user_data(stun_sock->stun_sess, stun_sock);
+
+ /* Initialize random numbers to be used as STUN transaction ID for
+ * outgoing Binding request. We use the 80bit number to distinguish
+ * STUN messages we sent with STUN messages that the application sends.
+ * The last 16bit value in the array is a counter.
+ */
+ for (i=0; i<PJ_ARRAY_SIZE(stun_sock->tsx_id); ++i) {
+ stun_sock->tsx_id[i] = (pj_uint16_t) pj_rand();
+ }
+ stun_sock->tsx_id[5] = 0;
+
+
+ /* Init timer entry */
+ stun_sock->ka_timer.cb = &ka_timer_cb;
+ stun_sock->ka_timer.user_data = stun_sock;
+
+ /* Done */
+ *p_stun_sock = stun_sock;
+ return PJ_SUCCESS;
+
+on_error:
+ pj_stun_sock_destroy(stun_sock);
+ return status;
+}
+
+/* Start socket. */
+PJ_DEF(pj_status_t) pj_stun_sock_start( pj_stun_sock *stun_sock,
+ const pj_str_t *domain,
+ pj_uint16_t default_port,
+ pj_dns_resolver *resolver)
+{
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(stun_sock && domain && default_port, PJ_EINVAL);
+
+ /* Check whether the domain contains IP address */
+ stun_sock->srv_addr.addr.sa_family = (pj_uint16_t)stun_sock->af;
+ status = pj_inet_pton(stun_sock->af, domain,
+ pj_sockaddr_get_addr(&stun_sock->srv_addr));
+ if (status != PJ_SUCCESS) {
+ stun_sock->srv_addr.addr.sa_family = (pj_uint16_t)0;
+ }
+
+ /* If resolver is set, try to resolve with DNS SRV first. It
+ * will fallback to DNS A/AAAA when no SRV record is found.
+ */
+ if (status != PJ_SUCCESS && resolver) {
+ const pj_str_t res_name = pj_str("_stun._udp.");
+ unsigned opt;
+
+ pj_assert(stun_sock->q == NULL);
+
+ opt = PJ_DNS_SRV_FALLBACK_A;
+ if (stun_sock->af == pj_AF_INET6()) {
+ opt |= (PJ_DNS_SRV_RESOLVE_AAAA | PJ_DNS_SRV_FALLBACK_AAAA);
+ }
+
+ status = pj_dns_srv_resolve(domain, &res_name, default_port,
+ stun_sock->pool, resolver, opt,
+ stun_sock, &dns_srv_resolver_cb,
+ &stun_sock->q);
+
+ /* Processing will resume when the DNS SRV callback is called */
+ return status;
+
+ } else {
+
+ if (status != PJ_SUCCESS) {
+ pj_addrinfo ai;
+ unsigned cnt = 1;
+
+ status = pj_getaddrinfo(stun_sock->af, domain, &cnt, &ai);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ pj_sockaddr_cp(&stun_sock->srv_addr, &ai.ai_addr);
+ }
+
+ pj_sockaddr_set_port(&stun_sock->srv_addr, (pj_uint16_t)default_port);
+
+ /* Start sending Binding request */
+ return get_mapped_addr(stun_sock);
+ }
+}
+
+/* Destroy */
+PJ_DEF(pj_status_t) pj_stun_sock_destroy(pj_stun_sock *stun_sock)
+{
+ if (stun_sock->q) {
+ pj_dns_srv_cancel_query(stun_sock->q, PJ_FALSE);
+ stun_sock->q = NULL;
+ }
+
+ /* Destroy the active socket first just in case we'll get
+ * stray callback.
+ */
+ if (stun_sock->active_sock != NULL) {
+ pj_activesock_close(stun_sock->active_sock);
+ stun_sock->active_sock = NULL;
+ stun_sock->sock_fd = PJ_INVALID_SOCKET;
+ } else if (stun_sock->sock_fd != PJ_INVALID_SOCKET) {
+ pj_sock_close(stun_sock->sock_fd);
+ stun_sock->sock_fd = PJ_INVALID_SOCKET;
+ }
+
+ if (stun_sock->ka_timer.id != 0) {
+ pj_timer_heap_cancel(stun_sock->stun_cfg.timer_heap,
+ &stun_sock->ka_timer);
+ stun_sock->ka_timer.id = 0;
+ }
+
+ if (stun_sock->stun_sess) {
+ pj_stun_session_destroy(stun_sock->stun_sess);
+ stun_sock->stun_sess = NULL;
+ }
+
+ if (stun_sock->pool) {
+ pj_pool_t *pool = stun_sock->pool;
+ stun_sock->pool = NULL;
+ pj_pool_release(pool);
+ }
+
+ return PJ_SUCCESS;
+}
+
+/* Associate user data */
+PJ_DEF(pj_status_t) pj_stun_sock_set_user_data( pj_stun_sock *stun_sock,
+ void *user_data)
+{
+ PJ_ASSERT_RETURN(stun_sock, PJ_EINVAL);
+ stun_sock->user_data = user_data;
+ return PJ_SUCCESS;
+}
+
+
+/* Get user data */
+PJ_DEF(void*) pj_stun_sock_get_user_data(pj_stun_sock *stun_sock)
+{
+ PJ_ASSERT_RETURN(stun_sock, NULL);
+ return stun_sock->user_data;
+}
+
+/* Notify application that session has failed */
+static pj_bool_t sess_fail(pj_stun_sock *stun_sock,
+ pj_stun_sock_op op,
+ pj_status_t status)
+{
+ pj_bool_t ret;
+
+ PJ_PERROR(4,(stun_sock->obj_name, status,
+ "Session failed because %s failed",
+ pj_stun_sock_op_name(op)));
+
+ ret = (*stun_sock->cb.on_status)(stun_sock, op, status);
+
+ return ret;
+}
+
+/* DNS resolver callback */
+static void dns_srv_resolver_cb(void *user_data,
+ pj_status_t status,
+ const pj_dns_srv_record *rec)
+{
+ pj_stun_sock *stun_sock = (pj_stun_sock*) user_data;
+
+ /* Clear query */
+ stun_sock->q = NULL;
+
+ /* Handle error */
+ if (status != PJ_SUCCESS) {
+ sess_fail(stun_sock, PJ_STUN_SOCK_DNS_OP, status);
+ return;
+ }
+
+ pj_assert(rec->count);
+ pj_assert(rec->entry[0].server.addr_count);
+
+ PJ_TODO(SUPPORT_IPV6_IN_RESOLVER);
+ pj_assert(stun_sock->af == pj_AF_INET());
+
+ /* Set the address */
+ pj_sockaddr_in_init(&stun_sock->srv_addr.ipv4, NULL,
+ rec->entry[0].port);
+ stun_sock->srv_addr.ipv4.sin_addr = rec->entry[0].server.addr[0];
+
+ /* Start sending Binding request */
+ get_mapped_addr(stun_sock);
+}
+
+
+/* Start sending STUN Binding request */
+static pj_status_t get_mapped_addr(pj_stun_sock *stun_sock)
+{
+ pj_stun_tx_data *tdata;
+ pj_status_t status;
+
+ /* Increment request counter and create STUN Binding request */
+ ++stun_sock->tsx_id[5];
+ status = pj_stun_session_create_req(stun_sock->stun_sess,
+ PJ_STUN_BINDING_REQUEST,
+ PJ_STUN_MAGIC,
+ (const pj_uint8_t*)stun_sock->tsx_id,
+ &tdata);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Send request */
+ status=pj_stun_session_send_msg(stun_sock->stun_sess, INTERNAL_MSG_TOKEN,
+ PJ_FALSE, PJ_TRUE, &stun_sock->srv_addr,
+ pj_sockaddr_get_len(&stun_sock->srv_addr),
+ tdata);
+ if (status != PJ_SUCCESS && status != PJ_EPENDING)
+ goto on_error;
+
+ return PJ_SUCCESS;
+
+on_error:
+ sess_fail(stun_sock, PJ_STUN_SOCK_BINDING_OP, status);
+ return status;
+}
+
+/* Get info */
+PJ_DEF(pj_status_t) pj_stun_sock_get_info( pj_stun_sock *stun_sock,
+ pj_stun_sock_info *info)
+{
+ int addr_len;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(stun_sock && info, PJ_EINVAL);
+
+ /* Copy STUN server address and mapped address */
+ pj_memcpy(&info->srv_addr, &stun_sock->srv_addr,
+ sizeof(pj_sockaddr));
+ pj_memcpy(&info->mapped_addr, &stun_sock->mapped_addr,
+ sizeof(pj_sockaddr));
+
+ /* Retrieve bound address */
+ addr_len = sizeof(info->bound_addr);
+ status = pj_sock_getsockname(stun_sock->sock_fd, &info->bound_addr,
+ &addr_len);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ /* If socket is bound to a specific interface, then only put that
+ * interface in the alias list. Otherwise query all the interfaces
+ * in the host.
+ */
+ if (pj_sockaddr_has_addr(&info->bound_addr)) {
+ info->alias_cnt = 1;
+ pj_sockaddr_cp(&info->aliases[0], &info->bound_addr);
+ } else {
+ pj_sockaddr def_addr;
+ pj_uint16_t port = pj_sockaddr_get_port(&info->bound_addr);
+ unsigned i;
+
+ /* Get the default address */
+ status = pj_gethostip(stun_sock->af, &def_addr);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ pj_sockaddr_set_port(&def_addr, port);
+
+ /* Enum all IP interfaces in the host */
+ info->alias_cnt = PJ_ARRAY_SIZE(info->aliases);
+ status = pj_enum_ip_interface(stun_sock->af, &info->alias_cnt,
+ info->aliases);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ /* Set the port number for each address.
+ */
+ for (i=0; i<info->alias_cnt; ++i) {
+ pj_sockaddr_set_port(&info->aliases[i], port);
+ }
+
+ /* Put the default IP in the first slot */
+ for (i=0; i<info->alias_cnt; ++i) {
+ if (pj_sockaddr_cmp(&info->aliases[i], &def_addr)==0) {
+ if (i!=0) {
+ pj_sockaddr_cp(&info->aliases[i], &info->aliases[0]);
+ pj_sockaddr_cp(&info->aliases[0], &def_addr);
+ }
+ break;
+ }
+ }
+ }
+
+ return PJ_SUCCESS;
+}
+
+/* Send application data */
+PJ_DEF(pj_status_t) pj_stun_sock_sendto( pj_stun_sock *stun_sock,
+ pj_ioqueue_op_key_t *send_key,
+ const void *pkt,
+ unsigned pkt_len,
+ unsigned flag,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len)
+{
+ pj_ssize_t size;
+ PJ_ASSERT_RETURN(stun_sock && pkt && dst_addr && addr_len, PJ_EINVAL);
+
+ if (send_key==NULL)
+ send_key = &stun_sock->send_key;
+
+ size = pkt_len;
+ return pj_activesock_sendto(stun_sock->active_sock, send_key,
+ pkt, &size, flag, dst_addr, addr_len);
+}
+
+/* This callback is called by the STUN session to send packet */
+static pj_status_t sess_on_send_msg(pj_stun_session *sess,
+ void *token,
+ const void *pkt,
+ pj_size_t pkt_size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len)
+{
+ pj_stun_sock *stun_sock;
+ pj_ssize_t size;
+
+ stun_sock = (pj_stun_sock *) pj_stun_session_get_user_data(sess);
+
+ pj_assert(token==INTERNAL_MSG_TOKEN);
+ PJ_UNUSED_ARG(token);
+
+ size = pkt_size;
+ return pj_activesock_sendto(stun_sock->active_sock,
+ &stun_sock->int_send_key,
+ pkt, &size, 0, dst_addr, addr_len);
+}
+
+/* This callback is called by the STUN session when outgoing transaction
+ * is complete
+ */
+static void sess_on_request_complete(pj_stun_session *sess,
+ pj_status_t status,
+ void *token,
+ pj_stun_tx_data *tdata,
+ const pj_stun_msg *response,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_stun_sock *stun_sock;
+ const pj_stun_sockaddr_attr *mapped_attr;
+ pj_stun_sock_op op;
+ pj_bool_t mapped_changed;
+ pj_bool_t resched = PJ_TRUE;
+
+ stun_sock = (pj_stun_sock *) pj_stun_session_get_user_data(sess);
+
+ PJ_UNUSED_ARG(tdata);
+ PJ_UNUSED_ARG(token);
+ PJ_UNUSED_ARG(src_addr);
+ PJ_UNUSED_ARG(src_addr_len);
+
+ /* Check if this is a keep-alive or the first Binding request */
+ if (pj_sockaddr_has_addr(&stun_sock->mapped_addr))
+ op = PJ_STUN_SOCK_KEEP_ALIVE_OP;
+ else
+ op = PJ_STUN_SOCK_BINDING_OP;
+
+ /* Handle failure */
+ if (status != PJ_SUCCESS) {
+ resched = sess_fail(stun_sock, op, status);
+ goto on_return;
+ }
+
+ /* Get XOR-MAPPED-ADDRESS, or MAPPED-ADDRESS when XOR-MAPPED-ADDRESS
+ * doesn't exist.
+ */
+ mapped_attr = (const pj_stun_sockaddr_attr*)
+ pj_stun_msg_find_attr(response, PJ_STUN_ATTR_XOR_MAPPED_ADDR,
+ 0);
+ if (mapped_attr==NULL) {
+ mapped_attr = (const pj_stun_sockaddr_attr*)
+ pj_stun_msg_find_attr(response, PJ_STUN_ATTR_MAPPED_ADDR,
+ 0);
+ }
+
+ if (mapped_attr == NULL) {
+ resched = sess_fail(stun_sock, op, PJNATH_ESTUNNOMAPPEDADDR);
+ goto on_return;
+ }
+
+ /* Determine if mapped address has changed, and save the new mapped
+ * address and call callback if so
+ */
+ mapped_changed = !pj_sockaddr_has_addr(&stun_sock->mapped_addr) ||
+ pj_sockaddr_cmp(&stun_sock->mapped_addr,
+ &mapped_attr->sockaddr) != 0;
+ if (mapped_changed) {
+ /* Print mapped adress */
+ {
+ char addrinfo[PJ_INET6_ADDRSTRLEN+10];
+ PJ_LOG(4,(stun_sock->obj_name,
+ "STUN mapped address found/changed: %s",
+ pj_sockaddr_print(&mapped_attr->sockaddr,
+ addrinfo, sizeof(addrinfo), 3)));
+ }
+
+ pj_sockaddr_cp(&stun_sock->mapped_addr, &mapped_attr->sockaddr);
+
+ if (op==PJ_STUN_SOCK_KEEP_ALIVE_OP)
+ op = PJ_STUN_SOCK_MAPPED_ADDR_CHANGE;
+ }
+
+ /* Notify user */
+ resched = (*stun_sock->cb.on_status)(stun_sock, op, PJ_SUCCESS);
+
+on_return:
+ /* Start/restart keep-alive timer */
+ if (resched)
+ start_ka_timer(stun_sock);
+}
+
+/* Schedule keep-alive timer */
+static void start_ka_timer(pj_stun_sock *stun_sock)
+{
+ if (stun_sock->ka_timer.id != 0) {
+ pj_timer_heap_cancel(stun_sock->stun_cfg.timer_heap,
+ &stun_sock->ka_timer);
+ stun_sock->ka_timer.id = 0;
+ }
+
+ pj_assert(stun_sock->ka_interval != 0);
+ if (stun_sock->ka_interval > 0) {
+ pj_time_val delay;
+
+ delay.sec = stun_sock->ka_interval;
+ delay.msec = 0;
+
+ if (pj_timer_heap_schedule(stun_sock->stun_cfg.timer_heap,
+ &stun_sock->ka_timer,
+ &delay) == PJ_SUCCESS)
+ {
+ stun_sock->ka_timer.id = PJ_TRUE;
+ }
+ }
+}
+
+/* Keep-alive timer callback */
+static void ka_timer_cb(pj_timer_heap_t *th, pj_timer_entry *te)
+{
+ pj_stun_sock *stun_sock;
+
+ stun_sock = (pj_stun_sock *) te->user_data;
+
+ PJ_UNUSED_ARG(th);
+
+ /* Time to send STUN Binding request */
+ if (get_mapped_addr(stun_sock) != PJ_SUCCESS)
+ return;
+
+ /* Next keep-alive timer will be scheduled once the request
+ * is complete.
+ */
+}
+
+/* Callback from active socket when incoming packet is received */
+static pj_bool_t on_data_recvfrom(pj_activesock_t *asock,
+ void *data,
+ pj_size_t size,
+ const pj_sockaddr_t *src_addr,
+ int addr_len,
+ pj_status_t status)
+{
+ pj_stun_sock *stun_sock;
+ pj_stun_msg_hdr *hdr;
+ pj_uint16_t type;
+
+ stun_sock = (pj_stun_sock*) pj_activesock_get_user_data(asock);
+
+ /* Log socket error */
+ if (status != PJ_SUCCESS) {
+ PJ_PERROR(2,(stun_sock->obj_name, status, "recvfrom() error"));
+ return PJ_TRUE;
+ }
+
+ /* Check that this is STUN message */
+ status = pj_stun_msg_check((const pj_uint8_t*)data, size,
+ PJ_STUN_IS_DATAGRAM | PJ_STUN_CHECK_PACKET);
+ if (status != PJ_SUCCESS) {
+ /* Not STUN -- give it to application */
+ goto process_app_data;
+ }
+
+ /* Treat packet as STUN header and copy the STUN message type.
+ * We don't want to access the type directly from the header
+ * since it may not be properly aligned.
+ */
+ hdr = (pj_stun_msg_hdr*) data;
+ pj_memcpy(&type, &hdr->type, 2);
+ type = pj_ntohs(type);
+
+ /* If the packet is a STUN Binding response and part of the
+ * transaction ID matches our internal ID, then this is
+ * our internal STUN message (Binding request or keep alive).
+ * Give it to our STUN session.
+ */
+ if (!PJ_STUN_IS_RESPONSE(type) ||
+ PJ_STUN_GET_METHOD(type) != PJ_STUN_BINDING_METHOD ||
+ pj_memcmp(hdr->tsx_id, stun_sock->tsx_id, 10) != 0)
+ {
+ /* Not STUN Binding response, or STUN transaction ID mismatch.
+ * This is not our message too -- give it to application.
+ */
+ goto process_app_data;
+ }
+
+ /* This is our STUN Binding response. Give it to the STUN session */
+ status = pj_stun_session_on_rx_pkt(stun_sock->stun_sess, data, size,
+ PJ_STUN_IS_DATAGRAM, NULL, NULL,
+ src_addr, addr_len);
+ return status!=PJNATH_ESTUNDESTROYED ? PJ_TRUE : PJ_FALSE;
+
+process_app_data:
+ if (stun_sock->cb.on_rx_data) {
+ pj_bool_t ret;
+
+ ret = (*stun_sock->cb.on_rx_data)(stun_sock, data, size,
+ src_addr, addr_len);
+ return ret;
+ }
+
+ return PJ_TRUE;
+}
+
+/* Callback from active socket about send status */
+static pj_bool_t on_data_sent(pj_activesock_t *asock,
+ pj_ioqueue_op_key_t *send_key,
+ pj_ssize_t sent)
+{
+ pj_stun_sock *stun_sock;
+
+ stun_sock = (pj_stun_sock*) pj_activesock_get_user_data(asock);
+
+ /* Don't report to callback if this is internal message */
+ if (send_key == &stun_sock->int_send_key) {
+ return PJ_TRUE;
+ }
+
+ /* Report to callback */
+ if (stun_sock->cb.on_data_sent) {
+ pj_bool_t ret;
+
+ /* If app gives NULL send_key in sendto() function, then give
+ * NULL in the callback too
+ */
+ if (send_key == &stun_sock->send_key)
+ send_key = NULL;
+
+ /* Call callback */
+ ret = (*stun_sock->cb.on_data_sent)(stun_sock, send_key, sent);
+
+ return ret;
+ }
+
+ return PJ_TRUE;
+}
+
diff --git a/pjnath/src/pjnath/stun_transaction.c b/pjnath/src/pjnath/stun_transaction.c
new file mode 100644
index 0000000..d714ecf
--- /dev/null
+++ b/pjnath/src/pjnath/stun_transaction.c
@@ -0,0 +1,448 @@
+/* $Id: stun_transaction.c 3753 2011-09-18 14:59:56Z bennylp $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/stun_transaction.h>
+#include <pjnath/errno.h>
+#include <pj/assert.h>
+#include <pj/log.h>
+#include <pj/pool.h>
+#include <pj/string.h>
+#include <pj/timer.h>
+
+
+#define TIMER_ACTIVE 1
+
+
+struct pj_stun_client_tsx
+{
+ char obj_name[PJ_MAX_OBJ_NAME];
+ pj_stun_tsx_cb cb;
+ void *user_data;
+
+ pj_bool_t complete;
+
+ pj_bool_t require_retransmit;
+ unsigned rto_msec;
+ pj_timer_entry retransmit_timer;
+ unsigned transmit_count;
+ pj_time_val retransmit_time;
+ pj_timer_heap_t *timer_heap;
+
+ pj_timer_entry destroy_timer;
+
+ void *last_pkt;
+ unsigned last_pkt_size;
+};
+
+
+static void retransmit_timer_callback(pj_timer_heap_t *timer_heap,
+ pj_timer_entry *timer);
+static void destroy_timer_callback(pj_timer_heap_t *timer_heap,
+ pj_timer_entry *timer);
+
+#define stun_perror(tsx,msg,rc) pjnath_perror(tsx->obj_name, msg, rc)
+
+/*
+ * Create a STUN client transaction.
+ */
+PJ_DEF(pj_status_t) pj_stun_client_tsx_create(pj_stun_config *cfg,
+ pj_pool_t *pool,
+ const pj_stun_tsx_cb *cb,
+ pj_stun_client_tsx **p_tsx)
+{
+ pj_stun_client_tsx *tsx;
+
+ PJ_ASSERT_RETURN(cfg && cb && p_tsx, PJ_EINVAL);
+ PJ_ASSERT_RETURN(cb->on_send_msg, PJ_EINVAL);
+
+ tsx = PJ_POOL_ZALLOC_T(pool, pj_stun_client_tsx);
+ tsx->rto_msec = cfg->rto_msec;
+ tsx->timer_heap = cfg->timer_heap;
+ pj_memcpy(&tsx->cb, cb, sizeof(*cb));
+
+ tsx->retransmit_timer.cb = &retransmit_timer_callback;
+ tsx->retransmit_timer.user_data = tsx;
+
+ tsx->destroy_timer.cb = &destroy_timer_callback;
+ tsx->destroy_timer.user_data = tsx;
+
+ pj_ansi_snprintf(tsx->obj_name, sizeof(tsx->obj_name), "stuntsx%p", tsx);
+
+ *p_tsx = tsx;
+
+ PJ_LOG(5,(tsx->obj_name, "STUN client transaction created"));
+ return PJ_SUCCESS;
+}
+
+
+PJ_DEF(pj_status_t) pj_stun_client_tsx_schedule_destroy(
+ pj_stun_client_tsx *tsx,
+ const pj_time_val *delay)
+{
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(tsx && delay, PJ_EINVAL);
+ PJ_ASSERT_RETURN(tsx->cb.on_destroy, PJ_EINVAL);
+
+ /* Cancel previously registered timer */
+ if (tsx->destroy_timer.id != 0) {
+ pj_timer_heap_cancel(tsx->timer_heap, &tsx->destroy_timer);
+ tsx->destroy_timer.id = 0;
+ }
+
+ /* Stop retransmission, just in case */
+ if (tsx->retransmit_timer.id != 0) {
+ pj_timer_heap_cancel(tsx->timer_heap, &tsx->retransmit_timer);
+ tsx->retransmit_timer.id = 0;
+ }
+
+ status = pj_timer_heap_schedule(tsx->timer_heap,
+ &tsx->destroy_timer, delay);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ tsx->destroy_timer.id = TIMER_ACTIVE;
+ tsx->cb.on_complete = NULL;
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Destroy transaction immediately.
+ */
+PJ_DEF(pj_status_t) pj_stun_client_tsx_destroy(pj_stun_client_tsx *tsx)
+{
+ PJ_ASSERT_RETURN(tsx, PJ_EINVAL);
+
+ if (tsx->retransmit_timer.id != 0) {
+ pj_timer_heap_cancel(tsx->timer_heap, &tsx->retransmit_timer);
+ tsx->retransmit_timer.id = 0;
+ }
+ if (tsx->destroy_timer.id != 0) {
+ pj_timer_heap_cancel(tsx->timer_heap, &tsx->destroy_timer);
+ tsx->destroy_timer.id = 0;
+ }
+
+ PJ_LOG(5,(tsx->obj_name, "STUN client transaction destroyed"));
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Check if transaction has completed.
+ */
+PJ_DEF(pj_bool_t) pj_stun_client_tsx_is_complete(pj_stun_client_tsx *tsx)
+{
+ PJ_ASSERT_RETURN(tsx, PJ_FALSE);
+ return tsx->complete;
+}
+
+
+/*
+ * Set user data.
+ */
+PJ_DEF(pj_status_t) pj_stun_client_tsx_set_data(pj_stun_client_tsx *tsx,
+ void *data)
+{
+ PJ_ASSERT_RETURN(tsx, PJ_EINVAL);
+ tsx->user_data = data;
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Get the user data
+ */
+PJ_DEF(void*) pj_stun_client_tsx_get_data(pj_stun_client_tsx *tsx)
+{
+ PJ_ASSERT_RETURN(tsx, NULL);
+ return tsx->user_data;
+}
+
+
+/*
+ * Transmit message.
+ */
+static pj_status_t tsx_transmit_msg(pj_stun_client_tsx *tsx)
+{
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(tsx->retransmit_timer.id == 0 ||
+ !tsx->require_retransmit, PJ_EBUSY);
+
+ if (tsx->require_retransmit) {
+ /* Calculate retransmit/timeout delay */
+ if (tsx->transmit_count == 0) {
+ tsx->retransmit_time.sec = 0;
+ tsx->retransmit_time.msec = tsx->rto_msec;
+
+ } else if (tsx->transmit_count < PJ_STUN_MAX_TRANSMIT_COUNT-1) {
+ unsigned msec;
+
+ msec = PJ_TIME_VAL_MSEC(tsx->retransmit_time);
+ msec <<= 1;
+ tsx->retransmit_time.sec = msec / 1000;
+ tsx->retransmit_time.msec = msec % 1000;
+
+ } else {
+ tsx->retransmit_time.sec = PJ_STUN_TIMEOUT_VALUE / 1000;
+ tsx->retransmit_time.msec = PJ_STUN_TIMEOUT_VALUE % 1000;
+ }
+
+ /* Schedule timer first because when send_msg() failed we can
+ * cancel it (as opposed to when schedule_timer() failed we cannot
+ * cancel transmission).
+ */;
+ status = pj_timer_heap_schedule(tsx->timer_heap,
+ &tsx->retransmit_timer,
+ &tsx->retransmit_time);
+ if (status != PJ_SUCCESS) {
+ tsx->retransmit_timer.id = 0;
+ return status;
+ }
+ tsx->retransmit_timer.id = TIMER_ACTIVE;
+ }
+
+
+ tsx->transmit_count++;
+
+ PJ_LOG(5,(tsx->obj_name, "STUN sending message (transmit count=%d)",
+ tsx->transmit_count));
+ pj_log_push_indent();
+
+ /* Send message */
+ status = tsx->cb.on_send_msg(tsx, tsx->last_pkt, tsx->last_pkt_size);
+
+ if (status == PJNATH_ESTUNDESTROYED) {
+ /* We've been destroyed, don't access the object. */
+ } else if (status != PJ_SUCCESS) {
+ if (tsx->retransmit_timer.id != 0) {
+ pj_timer_heap_cancel(tsx->timer_heap,
+ &tsx->retransmit_timer);
+ tsx->retransmit_timer.id = 0;
+ }
+ stun_perror(tsx, "STUN error sending message", status);
+ }
+
+ pj_log_pop_indent();
+ return status;
+}
+
+
+/*
+ * Send outgoing message and start STUN transaction.
+ */
+PJ_DEF(pj_status_t) pj_stun_client_tsx_send_msg(pj_stun_client_tsx *tsx,
+ pj_bool_t retransmit,
+ void *pkt,
+ unsigned pkt_len)
+{
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(tsx && pkt && pkt_len, PJ_EINVAL);
+ PJ_ASSERT_RETURN(tsx->retransmit_timer.id == 0, PJ_EBUSY);
+
+ /* Encode message */
+ tsx->last_pkt = pkt;
+ tsx->last_pkt_size = pkt_len;
+
+ /* Update STUN retransmit flag */
+ tsx->require_retransmit = retransmit;
+
+ /* For TCP, schedule timeout timer after PJ_STUN_TIMEOUT_VALUE.
+ * Since we don't have timeout timer, simulate this by using
+ * retransmit timer.
+ */
+ if (!retransmit) {
+ unsigned timeout;
+
+ pj_assert(tsx->retransmit_timer.id == 0);
+ tsx->transmit_count = PJ_STUN_MAX_TRANSMIT_COUNT;
+
+ timeout = tsx->rto_msec * 16;
+ tsx->retransmit_time.sec = timeout / 1000;
+ tsx->retransmit_time.msec = timeout % 1000;
+
+ /* Schedule timer first because when send_msg() failed we can
+ * cancel it (as opposed to when schedule_timer() failed we cannot
+ * cancel transmission).
+ */;
+ status = pj_timer_heap_schedule(tsx->timer_heap,
+ &tsx->retransmit_timer,
+ &tsx->retransmit_time);
+ if (status != PJ_SUCCESS) {
+ tsx->retransmit_timer.id = 0;
+ return status;
+ }
+ tsx->retransmit_timer.id = TIMER_ACTIVE;
+ }
+
+ /* Send the message */
+ status = tsx_transmit_msg(tsx);
+ if (status != PJ_SUCCESS) {
+ if (tsx->retransmit_timer.id != 0) {
+ pj_timer_heap_cancel(tsx->timer_heap,
+ &tsx->retransmit_timer);
+ tsx->retransmit_timer.id = 0;
+ }
+ return status;
+ }
+
+ return PJ_SUCCESS;
+}
+
+
+/* Retransmit timer callback */
+static void retransmit_timer_callback(pj_timer_heap_t *timer_heap,
+ pj_timer_entry *timer)
+{
+ pj_stun_client_tsx *tsx = (pj_stun_client_tsx *) timer->user_data;
+ pj_status_t status;
+
+ PJ_UNUSED_ARG(timer_heap);
+
+ if (tsx->transmit_count >= PJ_STUN_MAX_TRANSMIT_COUNT) {
+ /* Retransmission count exceeded. Transaction has failed */
+ tsx->retransmit_timer.id = 0;
+ PJ_LOG(4,(tsx->obj_name, "STUN timeout waiting for response"));
+ pj_log_push_indent();
+ if (!tsx->complete) {
+ tsx->complete = PJ_TRUE;
+ if (tsx->cb.on_complete) {
+ tsx->cb.on_complete(tsx, PJNATH_ESTUNTIMEDOUT, NULL, NULL, 0);
+ }
+ }
+ /* We might have been destroyed, don't try to access the object */
+ pj_log_pop_indent();
+ return;
+ }
+
+ tsx->retransmit_timer.id = 0;
+ status = tsx_transmit_msg(tsx);
+ if (status == PJNATH_ESTUNDESTROYED) {
+ /* We've been destroyed, don't try to access the object */
+ } else if (status != PJ_SUCCESS) {
+ tsx->retransmit_timer.id = 0;
+ if (!tsx->complete) {
+ tsx->complete = PJ_TRUE;
+ if (tsx->cb.on_complete) {
+ tsx->cb.on_complete(tsx, status, NULL, NULL, 0);
+ }
+ }
+ /* We might have been destroyed, don't try to access the object */
+ }
+}
+
+/*
+ * Request to retransmit the request.
+ */
+PJ_DEF(pj_status_t) pj_stun_client_tsx_retransmit(pj_stun_client_tsx *tsx)
+{
+ if (tsx->destroy_timer.id != 0) {
+ return PJ_SUCCESS;
+ }
+
+ if (tsx->retransmit_timer.id != 0) {
+ pj_timer_heap_cancel(tsx->timer_heap, &tsx->retransmit_timer);
+ tsx->retransmit_timer.id = 0;
+ }
+
+ return tsx_transmit_msg(tsx);
+}
+
+/* Timer callback to destroy transaction */
+static void destroy_timer_callback(pj_timer_heap_t *timer_heap,
+ pj_timer_entry *timer)
+{
+ pj_stun_client_tsx *tsx = (pj_stun_client_tsx *) timer->user_data;
+
+ PJ_UNUSED_ARG(timer_heap);
+
+ tsx->destroy_timer.id = PJ_FALSE;
+ tsx->cb.on_destroy(tsx);
+ /* Don't access transaction after this */
+}
+
+
+/*
+ * Notify the STUN transaction about the arrival of STUN response.
+ */
+PJ_DEF(pj_status_t) pj_stun_client_tsx_on_rx_msg(pj_stun_client_tsx *tsx,
+ const pj_stun_msg *msg,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_stun_errcode_attr *err_attr;
+ pj_status_t status;
+
+ /* Must be STUN response message */
+ if (!PJ_STUN_IS_SUCCESS_RESPONSE(msg->hdr.type) &&
+ !PJ_STUN_IS_ERROR_RESPONSE(msg->hdr.type))
+ {
+ PJ_LOG(4,(tsx->obj_name,
+ "STUN rx_msg() error: not response message"));
+ return PJNATH_EINSTUNMSGTYPE;
+ }
+
+
+ /* We have a response with matching transaction ID.
+ * We can cancel retransmit timer now.
+ */
+ if (tsx->retransmit_timer.id) {
+ pj_timer_heap_cancel(tsx->timer_heap, &tsx->retransmit_timer);
+ tsx->retransmit_timer.id = 0;
+ }
+
+ /* Find STUN error code attribute */
+ err_attr = (pj_stun_errcode_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_ERROR_CODE, 0);
+
+ if (err_attr && err_attr->err_code <= 200) {
+ /* draft-ietf-behave-rfc3489bis-05.txt Section 8.3.2:
+ * Any response between 100 and 299 MUST result in the cessation
+ * of request retransmissions, but otherwise is discarded.
+ */
+ PJ_LOG(4,(tsx->obj_name,
+ "STUN rx_msg() error: received provisional %d code (%.*s)",
+ err_attr->err_code,
+ (int)err_attr->reason.slen,
+ err_attr->reason.ptr));
+ return PJ_SUCCESS;
+ }
+
+ if (err_attr == NULL) {
+ status = PJ_SUCCESS;
+ } else {
+ status = PJ_STATUS_FROM_STUN_CODE(err_attr->err_code);
+ }
+
+ /* Call callback */
+ if (!tsx->complete) {
+ tsx->complete = PJ_TRUE;
+ if (tsx->cb.on_complete) {
+ tsx->cb.on_complete(tsx, status, msg, src_addr, src_addr_len);
+ }
+ /* We might have been destroyed, don't try to access the object */
+ }
+
+ return PJ_SUCCESS;
+
+}
+
diff --git a/pjnath/src/pjnath/turn_session.c b/pjnath/src/pjnath/turn_session.c
new file mode 100644
index 0000000..cbe8f5c
--- /dev/null
+++ b/pjnath/src/pjnath/turn_session.c
@@ -0,0 +1,2040 @@
+/* $Id: turn_session.c 3844 2011-10-24 15:03:43Z bennylp $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/turn_session.h>
+#include <pjnath/errno.h>
+#include <pjlib-util/srv_resolver.h>
+#include <pj/addr_resolv.h>
+#include <pj/assert.h>
+#include <pj/errno.h>
+#include <pj/hash.h>
+#include <pj/lock.h>
+#include <pj/log.h>
+#include <pj/os.h>
+#include <pj/pool.h>
+#include <pj/rand.h>
+#include <pj/sock.h>
+
+#define PJ_TURN_CHANNEL_MIN 0x4000
+#define PJ_TURN_CHANNEL_MAX 0x7FFF /* inclusive */
+#define PJ_TURN_CHANNEL_HTABLE_SIZE 8
+#define PJ_TURN_PERM_HTABLE_SIZE 8
+
+static const char *state_names[] =
+{
+ "Null",
+ "Resolving",
+ "Resolved",
+ "Allocating",
+ "Ready",
+ "Deallocating",
+ "Deallocated",
+ "Destroying"
+};
+
+enum timer_id_t
+{
+ TIMER_NONE,
+ TIMER_KEEP_ALIVE,
+ TIMER_DESTROY
+};
+
+/* This structure describes a channel binding. A channel binding is index by
+ * the channel number or IP address and port number of the peer.
+ */
+struct ch_t
+{
+ /* The channel number */
+ pj_uint16_t num;
+
+ /* PJ_TRUE if we've received successful response to ChannelBind request
+ * for this channel.
+ */
+ pj_bool_t bound;
+
+ /* The peer IP address and port */
+ pj_sockaddr addr;
+
+ /* The channel binding expiration */
+ pj_time_val expiry;
+};
+
+
+/* This structure describes a permission. A permission is identified by the
+ * IP address only.
+ */
+struct perm_t
+{
+ /* Cache of hash value to speed-up lookup */
+ pj_uint32_t hval;
+
+ /* The permission IP address. The port number MUST be zero */
+ pj_sockaddr addr;
+
+ /* Number of peers that uses this permission. */
+ unsigned peer_cnt;
+
+ /* Automatically renew this permission once it expires? */
+ pj_bool_t renew;
+
+ /* The permission expiration */
+ pj_time_val expiry;
+
+ /* Arbitrary/random pointer value (token) to map this perm with the
+ * request to create it. It is used to invalidate this perm when the
+ * request fails.
+ */
+ void *req_token;
+};
+
+
+/* The TURN client session structure */
+struct pj_turn_session
+{
+ pj_pool_t *pool;
+ const char *obj_name;
+ pj_turn_session_cb cb;
+ void *user_data;
+ pj_stun_config stun_cfg;
+
+ pj_lock_t *lock;
+ int busy;
+
+ pj_turn_state_t state;
+ pj_status_t last_status;
+ pj_bool_t pending_destroy;
+ pj_bool_t destroy_notified;
+
+ pj_stun_session *stun;
+
+ unsigned lifetime;
+ int ka_interval;
+ pj_time_val expiry;
+
+ pj_timer_heap_t *timer_heap;
+ pj_timer_entry timer;
+
+ pj_dns_srv_async_query *dns_async;
+ pj_uint16_t default_port;
+
+ pj_uint16_t af;
+ pj_turn_tp_type conn_type;
+ pj_uint16_t srv_addr_cnt;
+ pj_sockaddr *srv_addr_list;
+ pj_sockaddr *srv_addr;
+
+ pj_bool_t pending_alloc;
+ pj_turn_alloc_param alloc_param;
+
+ pj_sockaddr mapped_addr;
+ pj_sockaddr relay_addr;
+
+ pj_hash_table_t *ch_table;
+ pj_hash_table_t *perm_table;
+
+ pj_uint32_t send_ind_tsx_id[3];
+ /* tx_pkt must be 16bit aligned */
+ pj_uint8_t tx_pkt[PJ_TURN_MAX_PKT_LEN];
+
+ pj_uint16_t next_ch;
+};
+
+
+/*
+ * Prototypes.
+ */
+static void sess_shutdown(pj_turn_session *sess,
+ pj_status_t status);
+static void do_destroy(pj_turn_session *sess);
+static void send_refresh(pj_turn_session *sess, int lifetime);
+static pj_status_t stun_on_send_msg(pj_stun_session *sess,
+ void *token,
+ const void *pkt,
+ pj_size_t pkt_size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len);
+static void stun_on_request_complete(pj_stun_session *sess,
+ pj_status_t status,
+ void *token,
+ pj_stun_tx_data *tdata,
+ const pj_stun_msg *response,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len);
+static pj_status_t stun_on_rx_indication(pj_stun_session *sess,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_stun_msg *msg,
+ void *token,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len);
+static void dns_srv_resolver_cb(void *user_data,
+ pj_status_t status,
+ const pj_dns_srv_record *rec);
+static struct ch_t *lookup_ch_by_addr(pj_turn_session *sess,
+ const pj_sockaddr_t *addr,
+ unsigned addr_len,
+ pj_bool_t update,
+ pj_bool_t bind_channel);
+static struct ch_t *lookup_ch_by_chnum(pj_turn_session *sess,
+ pj_uint16_t chnum);
+static struct perm_t *lookup_perm(pj_turn_session *sess,
+ const pj_sockaddr_t *addr,
+ unsigned addr_len,
+ pj_bool_t update);
+static void invalidate_perm(pj_turn_session *sess,
+ struct perm_t *perm);
+static void on_timer_event(pj_timer_heap_t *th, pj_timer_entry *e);
+
+
+/*
+ * Create default pj_turn_alloc_param.
+ */
+PJ_DEF(void) pj_turn_alloc_param_default(pj_turn_alloc_param *prm)
+{
+ pj_bzero(prm, sizeof(*prm));
+}
+
+/*
+ * Duplicate pj_turn_alloc_param.
+ */
+PJ_DEF(void) pj_turn_alloc_param_copy( pj_pool_t *pool,
+ pj_turn_alloc_param *dst,
+ const pj_turn_alloc_param *src)
+{
+ PJ_UNUSED_ARG(pool);
+ pj_memcpy(dst, src, sizeof(*dst));
+}
+
+/*
+ * Get TURN state name.
+ */
+PJ_DEF(const char*) pj_turn_state_name(pj_turn_state_t state)
+{
+ return state_names[state];
+}
+
+/*
+ * Create TURN client session.
+ */
+PJ_DEF(pj_status_t) pj_turn_session_create( const pj_stun_config *cfg,
+ const char *name,
+ int af,
+ pj_turn_tp_type conn_type,
+ const pj_turn_session_cb *cb,
+ unsigned options,
+ void *user_data,
+ pj_turn_session **p_sess)
+{
+ pj_pool_t *pool;
+ pj_turn_session *sess;
+ pj_stun_session_cb stun_cb;
+ pj_lock_t *null_lock;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(cfg && cfg->pf && cb && p_sess, PJ_EINVAL);
+ PJ_ASSERT_RETURN(cb->on_send_pkt, PJ_EINVAL);
+
+ PJ_UNUSED_ARG(options);
+
+ if (name == NULL)
+ name = "turn%p";
+
+ /* Allocate and create TURN session */
+ pool = pj_pool_create(cfg->pf, name, PJNATH_POOL_LEN_TURN_SESS,
+ PJNATH_POOL_INC_TURN_SESS, NULL);
+ sess = PJ_POOL_ZALLOC_T(pool, pj_turn_session);
+ sess->pool = pool;
+ sess->obj_name = pool->obj_name;
+ sess->timer_heap = cfg->timer_heap;
+ sess->af = (pj_uint16_t)af;
+ sess->conn_type = conn_type;
+ sess->ka_interval = PJ_TURN_KEEP_ALIVE_SEC;
+ sess->user_data = user_data;
+ sess->next_ch = PJ_TURN_CHANNEL_MIN;
+
+ /* Copy STUN session */
+ pj_memcpy(&sess->stun_cfg, cfg, sizeof(pj_stun_config));
+
+ /* Copy callback */
+ pj_memcpy(&sess->cb, cb, sizeof(*cb));
+
+ /* Peer hash table */
+ sess->ch_table = pj_hash_create(pool, PJ_TURN_CHANNEL_HTABLE_SIZE);
+
+ /* Permission hash table */
+ sess->perm_table = pj_hash_create(pool, PJ_TURN_PERM_HTABLE_SIZE);
+
+ /* Session lock */
+ status = pj_lock_create_recursive_mutex(pool, sess->obj_name,
+ &sess->lock);
+ if (status != PJ_SUCCESS) {
+ do_destroy(sess);
+ return status;
+ }
+
+ /* Timer */
+ pj_timer_entry_init(&sess->timer, TIMER_NONE, sess, &on_timer_event);
+
+ /* Create STUN session */
+ pj_bzero(&stun_cb, sizeof(stun_cb));
+ stun_cb.on_send_msg = &stun_on_send_msg;
+ stun_cb.on_request_complete = &stun_on_request_complete;
+ stun_cb.on_rx_indication = &stun_on_rx_indication;
+ status = pj_stun_session_create(&sess->stun_cfg, sess->obj_name, &stun_cb,
+ PJ_FALSE, &sess->stun);
+ if (status != PJ_SUCCESS) {
+ do_destroy(sess);
+ return status;
+ }
+
+ /* Attach ourself to STUN session */
+ pj_stun_session_set_user_data(sess->stun, sess);
+
+ /* Replace mutex in STUN session with a NULL mutex, since access to
+ * STUN session is serialized.
+ */
+ status = pj_lock_create_null_mutex(pool, name, &null_lock);
+ if (status != PJ_SUCCESS) {
+ do_destroy(sess);
+ return status;
+ }
+ pj_stun_session_set_lock(sess->stun, null_lock, PJ_TRUE);
+
+ /* Done */
+
+ PJ_LOG(4,(sess->obj_name, "TURN client session created"));
+
+ *p_sess = sess;
+ return PJ_SUCCESS;
+}
+
+
+/* Destroy */
+static void do_destroy(pj_turn_session *sess)
+{
+ /* Lock session */
+ if (sess->lock) {
+ pj_lock_acquire(sess->lock);
+ }
+
+ /* Cancel pending timer, if any */
+ if (sess->timer.id != TIMER_NONE) {
+ pj_timer_heap_cancel(sess->timer_heap, &sess->timer);
+ sess->timer.id = TIMER_NONE;
+ }
+
+ /* Destroy STUN session */
+ if (sess->stun) {
+ pj_stun_session_destroy(sess->stun);
+ sess->stun = NULL;
+ }
+
+ /* Destroy lock */
+ if (sess->lock) {
+ pj_lock_release(sess->lock);
+ pj_lock_destroy(sess->lock);
+ sess->lock = NULL;
+ }
+
+ /* Destroy pool */
+ if (sess->pool) {
+ pj_pool_t *pool = sess->pool;
+
+ PJ_LOG(4,(sess->obj_name, "TURN client session destroyed"));
+
+ sess->pool = NULL;
+ pj_pool_release(pool);
+ }
+}
+
+
+/* Set session state */
+static void set_state(pj_turn_session *sess, enum pj_turn_state_t state)
+{
+ pj_turn_state_t old_state = sess->state;
+
+ if (state==sess->state)
+ return;
+
+ PJ_LOG(4,(sess->obj_name, "State changed %s --> %s",
+ state_names[old_state], state_names[state]));
+ sess->state = state;
+
+ if (sess->cb.on_state) {
+ (*sess->cb.on_state)(sess, old_state, state);
+ }
+}
+
+/*
+ * Notify application and shutdown the TURN session.
+ */
+static void sess_shutdown(pj_turn_session *sess,
+ pj_status_t status)
+{
+ pj_bool_t can_destroy = PJ_TRUE;
+
+ PJ_LOG(4,(sess->obj_name, "Request to shutdown in state %s, cause:%d",
+ state_names[sess->state], status));
+
+ if (sess->last_status == PJ_SUCCESS && status != PJ_SUCCESS)
+ sess->last_status = status;
+
+ switch (sess->state) {
+ case PJ_TURN_STATE_NULL:
+ break;
+ case PJ_TURN_STATE_RESOLVING:
+ if (sess->dns_async != NULL) {
+ pj_dns_srv_cancel_query(sess->dns_async, PJ_FALSE);
+ sess->dns_async = NULL;
+ }
+ break;
+ case PJ_TURN_STATE_RESOLVED:
+ break;
+ case PJ_TURN_STATE_ALLOCATING:
+ /* We need to wait until allocation complete */
+ sess->pending_destroy = PJ_TRUE;
+ can_destroy = PJ_FALSE;
+ break;
+ case PJ_TURN_STATE_READY:
+ /* Send REFRESH with LIFETIME=0 */
+ can_destroy = PJ_FALSE;
+ send_refresh(sess, 0);
+ break;
+ case PJ_TURN_STATE_DEALLOCATING:
+ can_destroy = PJ_FALSE;
+ /* This may recursively call this function again with
+ * state==PJ_TURN_STATE_DEALLOCATED.
+ */
+ send_refresh(sess, 0);
+ break;
+ case PJ_TURN_STATE_DEALLOCATED:
+ case PJ_TURN_STATE_DESTROYING:
+ break;
+ }
+
+ if (can_destroy) {
+ /* Schedule destroy */
+ pj_time_val delay = {0, 0};
+
+ set_state(sess, PJ_TURN_STATE_DESTROYING);
+
+ if (sess->timer.id != TIMER_NONE) {
+ pj_timer_heap_cancel(sess->timer_heap, &sess->timer);
+ sess->timer.id = TIMER_NONE;
+ }
+
+ sess->timer.id = TIMER_DESTROY;
+ pj_timer_heap_schedule(sess->timer_heap, &sess->timer, &delay);
+ }
+}
+
+
+/*
+ * Public API to destroy TURN client session.
+ */
+PJ_DEF(pj_status_t) pj_turn_session_shutdown(pj_turn_session *sess)
+{
+ PJ_ASSERT_RETURN(sess, PJ_EINVAL);
+
+ pj_lock_acquire(sess->lock);
+
+ sess_shutdown(sess, PJ_SUCCESS);
+
+ pj_lock_release(sess->lock);
+
+ return PJ_SUCCESS;
+}
+
+
+/**
+ * Forcefully destroy the TURN session.
+ */
+PJ_DEF(pj_status_t) pj_turn_session_destroy( pj_turn_session *sess,
+ pj_status_t last_err)
+{
+ PJ_ASSERT_RETURN(sess, PJ_EINVAL);
+
+ if (last_err != PJ_SUCCESS && sess->last_status == PJ_SUCCESS)
+ sess->last_status = last_err;
+ set_state(sess, PJ_TURN_STATE_DEALLOCATED);
+ sess_shutdown(sess, PJ_SUCCESS);
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Get TURN session info.
+ */
+PJ_DEF(pj_status_t) pj_turn_session_get_info( pj_turn_session *sess,
+ pj_turn_session_info *info)
+{
+ pj_time_val now;
+
+ PJ_ASSERT_RETURN(sess && info, PJ_EINVAL);
+
+ pj_gettimeofday(&now);
+
+ info->state = sess->state;
+ info->conn_type = sess->conn_type;
+ info->lifetime = sess->expiry.sec - now.sec;
+ info->last_status = sess->last_status;
+
+ if (sess->srv_addr)
+ pj_memcpy(&info->server, sess->srv_addr, sizeof(info->server));
+ else
+ pj_bzero(&info->server, sizeof(info->server));
+
+ pj_memcpy(&info->mapped_addr, &sess->mapped_addr,
+ sizeof(sess->mapped_addr));
+ pj_memcpy(&info->relay_addr, &sess->relay_addr,
+ sizeof(sess->relay_addr));
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Re-assign user data.
+ */
+PJ_DEF(pj_status_t) pj_turn_session_set_user_data( pj_turn_session *sess,
+ void *user_data)
+{
+ sess->user_data = user_data;
+ return PJ_SUCCESS;
+}
+
+
+/**
+ * Retrieve user data.
+ */
+PJ_DEF(void*) pj_turn_session_get_user_data(pj_turn_session *sess)
+{
+ return sess->user_data;
+}
+
+
+/*
+ * Configure message logging. By default all flags are enabled.
+ *
+ * @param sess The TURN client session.
+ * @param flags Bitmask combination of #pj_stun_sess_msg_log_flag
+ */
+PJ_DEF(void) pj_turn_session_set_log( pj_turn_session *sess,
+ unsigned flags)
+{
+ pj_stun_session_set_log(sess->stun, flags);
+}
+
+
+/*
+ * Set software name
+ */
+PJ_DEF(pj_status_t) pj_turn_session_set_software_name( pj_turn_session *sess,
+ const pj_str_t *sw)
+{
+ pj_status_t status;
+
+ pj_lock_acquire(sess->lock);
+ status = pj_stun_session_set_software_name(sess->stun, sw);
+ pj_lock_release(sess->lock);
+
+ return status;
+}
+
+
+/**
+ * Set the server or domain name of the server.
+ */
+PJ_DEF(pj_status_t) pj_turn_session_set_server( pj_turn_session *sess,
+ const pj_str_t *domain,
+ int default_port,
+ pj_dns_resolver *resolver)
+{
+ pj_sockaddr tmp_addr;
+ pj_bool_t is_ip_addr;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(sess && domain, PJ_EINVAL);
+ PJ_ASSERT_RETURN(sess->state == PJ_TURN_STATE_NULL, PJ_EINVALIDOP);
+
+ pj_lock_acquire(sess->lock);
+
+ /* See if "domain" contains just IP address */
+ tmp_addr.addr.sa_family = sess->af;
+ status = pj_inet_pton(sess->af, domain,
+ pj_sockaddr_get_addr(&tmp_addr));
+ is_ip_addr = (status == PJ_SUCCESS);
+
+ if (!is_ip_addr && resolver) {
+ /* Resolve with DNS SRV resolution, and fallback to DNS A resolution
+ * if default_port is specified.
+ */
+ unsigned opt = 0;
+ pj_str_t res_name;
+
+ switch (sess->conn_type) {
+ case PJ_TURN_TP_UDP:
+ res_name = pj_str("_turn._udp.");
+ break;
+ case PJ_TURN_TP_TCP:
+ res_name = pj_str("_turn._tcp.");
+ break;
+ case PJ_TURN_TP_TLS:
+ res_name = pj_str("_turns._tcp.");
+ break;
+ default:
+ status = PJNATH_ETURNINTP;
+ goto on_return;
+ }
+
+ /* Fallback to DNS A only if default port is specified */
+ if (default_port>0 && default_port<65536) {
+ opt = PJ_DNS_SRV_FALLBACK_A;
+ sess->default_port = (pj_uint16_t)default_port;
+ }
+
+ PJ_LOG(5,(sess->obj_name, "Resolving %.*s%.*s with DNS SRV",
+ (int)res_name.slen, res_name.ptr,
+ (int)domain->slen, domain->ptr));
+ set_state(sess, PJ_TURN_STATE_RESOLVING);
+
+ /* User may have destroyed us in the callback */
+ if (sess->state != PJ_TURN_STATE_RESOLVING) {
+ status = PJ_ECANCELLED;
+ goto on_return;
+ }
+
+ status = pj_dns_srv_resolve(domain, &res_name, default_port,
+ sess->pool, resolver, opt, sess,
+ &dns_srv_resolver_cb, &sess->dns_async);
+ if (status != PJ_SUCCESS) {
+ set_state(sess, PJ_TURN_STATE_NULL);
+ goto on_return;
+ }
+
+ } else {
+ /* Resolver is not specified, resolve with standard gethostbyname().
+ * The default_port MUST be specified in this case.
+ */
+ pj_addrinfo *ai;
+ unsigned i, cnt;
+
+ /* Default port must be specified */
+ PJ_ASSERT_RETURN(default_port>0 && default_port<65536, PJ_EINVAL);
+ sess->default_port = (pj_uint16_t)default_port;
+
+ cnt = PJ_TURN_MAX_DNS_SRV_CNT;
+ ai = (pj_addrinfo*)
+ pj_pool_calloc(sess->pool, cnt, sizeof(pj_addrinfo));
+
+ PJ_LOG(5,(sess->obj_name, "Resolving %.*s with DNS A",
+ (int)domain->slen, domain->ptr));
+ set_state(sess, PJ_TURN_STATE_RESOLVING);
+
+ /* User may have destroyed us in the callback */
+ if (sess->state != PJ_TURN_STATE_RESOLVING) {
+ status = PJ_ECANCELLED;
+ goto on_return;
+ }
+
+ status = pj_getaddrinfo(sess->af, domain, &cnt, ai);
+ if (status != PJ_SUCCESS)
+ goto on_return;
+
+ sess->srv_addr_cnt = (pj_uint16_t)cnt;
+ sess->srv_addr_list = (pj_sockaddr*)
+ pj_pool_calloc(sess->pool, cnt,
+ sizeof(pj_sockaddr));
+ for (i=0; i<cnt; ++i) {
+ pj_sockaddr *addr = &sess->srv_addr_list[i];
+ pj_memcpy(addr, &ai[i].ai_addr, sizeof(pj_sockaddr));
+ addr->addr.sa_family = sess->af;
+ addr->ipv4.sin_port = pj_htons(sess->default_port);
+ }
+
+ sess->srv_addr = &sess->srv_addr_list[0];
+ set_state(sess, PJ_TURN_STATE_RESOLVED);
+ }
+
+on_return:
+ pj_lock_release(sess->lock);
+ return status;
+}
+
+
+/**
+ * Set credential to be used by the session.
+ */
+PJ_DEF(pj_status_t) pj_turn_session_set_credential(pj_turn_session *sess,
+ const pj_stun_auth_cred *cred)
+{
+ PJ_ASSERT_RETURN(sess && cred, PJ_EINVAL);
+ PJ_ASSERT_RETURN(sess->stun, PJ_EINVALIDOP);
+
+ pj_lock_acquire(sess->lock);
+
+ pj_stun_session_set_credential(sess->stun, PJ_STUN_AUTH_LONG_TERM, cred);
+
+ pj_lock_release(sess->lock);
+
+ return PJ_SUCCESS;
+}
+
+
+/**
+ * Create TURN allocation.
+ */
+PJ_DEF(pj_status_t) pj_turn_session_alloc(pj_turn_session *sess,
+ const pj_turn_alloc_param *param)
+{
+ pj_stun_tx_data *tdata;
+ pj_bool_t retransmit;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(sess, PJ_EINVAL);
+ PJ_ASSERT_RETURN(sess->state>PJ_TURN_STATE_NULL &&
+ sess->state<=PJ_TURN_STATE_RESOLVED,
+ PJ_EINVALIDOP);
+
+ pj_lock_acquire(sess->lock);
+
+ if (param && param != &sess->alloc_param)
+ pj_turn_alloc_param_copy(sess->pool, &sess->alloc_param, param);
+
+ if (sess->state < PJ_TURN_STATE_RESOLVED) {
+ sess->pending_alloc = PJ_TRUE;
+
+ PJ_LOG(4,(sess->obj_name, "Pending ALLOCATE in state %s",
+ state_names[sess->state]));
+
+ pj_lock_release(sess->lock);
+ return PJ_SUCCESS;
+
+ }
+
+ /* Ready to allocate */
+ pj_assert(sess->state == PJ_TURN_STATE_RESOLVED);
+
+ /* Create a bare request */
+ status = pj_stun_session_create_req(sess->stun, PJ_STUN_ALLOCATE_REQUEST,
+ PJ_STUN_MAGIC, NULL, &tdata);
+ if (status != PJ_SUCCESS) {
+ pj_lock_release(sess->lock);
+ return status;
+ }
+
+ /* MUST include REQUESTED-TRANSPORT attribute */
+ pj_stun_msg_add_uint_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_REQ_TRANSPORT,
+ PJ_STUN_SET_RT_PROTO(PJ_TURN_TP_UDP));
+
+ /* Include BANDWIDTH if requested */
+ if (sess->alloc_param.bandwidth > 0) {
+ pj_stun_msg_add_uint_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_BANDWIDTH,
+ sess->alloc_param.bandwidth);
+ }
+
+ /* Include LIFETIME if requested */
+ if (sess->alloc_param.lifetime > 0) {
+ pj_stun_msg_add_uint_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_LIFETIME,
+ sess->alloc_param.lifetime);
+ }
+
+ /* Server address must be set */
+ pj_assert(sess->srv_addr != NULL);
+
+ /* Send request */
+ set_state(sess, PJ_TURN_STATE_ALLOCATING);
+ retransmit = (sess->conn_type == PJ_TURN_TP_UDP);
+ status = pj_stun_session_send_msg(sess->stun, NULL, PJ_FALSE,
+ retransmit, sess->srv_addr,
+ pj_sockaddr_get_len(sess->srv_addr),
+ tdata);
+ if (status != PJ_SUCCESS) {
+ /* Set state back to RESOLVED. We don't want to destroy session now,
+ * let the application do it if it wants to.
+ */
+ set_state(sess, PJ_TURN_STATE_RESOLVED);
+ }
+
+ pj_lock_release(sess->lock);
+ return status;
+}
+
+
+/*
+ * Install or renew permissions
+ */
+PJ_DEF(pj_status_t) pj_turn_session_set_perm( pj_turn_session *sess,
+ unsigned addr_cnt,
+ const pj_sockaddr addr[],
+ unsigned options)
+{
+ pj_stun_tx_data *tdata;
+ pj_hash_iterator_t it_buf, *it;
+ void *req_token;
+ unsigned i, attr_added=0;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(sess && addr_cnt && addr, PJ_EINVAL);
+
+ pj_lock_acquire(sess->lock);
+
+ /* Create a bare CreatePermission request */
+ status = pj_stun_session_create_req(sess->stun,
+ PJ_STUN_CREATE_PERM_REQUEST,
+ PJ_STUN_MAGIC, NULL, &tdata);
+ if (status != PJ_SUCCESS) {
+ pj_lock_release(sess->lock);
+ return status;
+ }
+
+ /* Create request token to map the request to the perm structures
+ * which the request belongs.
+ */
+ req_token = (void*)(long)pj_rand();
+
+ /* Process the addresses */
+ for (i=0; i<addr_cnt; ++i) {
+ struct perm_t *perm;
+
+ /* Lookup the perm structure and create if it doesn't exist */
+ perm = lookup_perm(sess, &addr[i], pj_sockaddr_get_len(&addr[i]),
+ PJ_TRUE);
+ perm->renew = (options & 0x01);
+
+ /* Only add to the request if the request doesn't contain this
+ * address yet.
+ */
+ if (perm->req_token != req_token) {
+ perm->req_token = req_token;
+
+ /* Add XOR-PEER-ADDRESS */
+ status = pj_stun_msg_add_sockaddr_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_XOR_PEER_ADDR,
+ PJ_TRUE,
+ &addr[i],
+ sizeof(addr[i]));
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ ++attr_added;
+ }
+ }
+
+ pj_assert(attr_added != 0);
+
+ /* Send the request */
+ status = pj_stun_session_send_msg(sess->stun, req_token, PJ_FALSE,
+ (sess->conn_type==PJ_TURN_TP_UDP),
+ sess->srv_addr,
+ pj_sockaddr_get_len(sess->srv_addr),
+ tdata);
+ if (status != PJ_SUCCESS) {
+ /* tdata is already destroyed */
+ tdata = NULL;
+ goto on_error;
+ }
+
+ pj_lock_release(sess->lock);
+ return PJ_SUCCESS;
+
+on_error:
+ /* destroy tdata */
+ if (tdata) {
+ pj_stun_msg_destroy_tdata(sess->stun, tdata);
+ }
+ /* invalidate perm structures associated with this request */
+ it = pj_hash_first(sess->perm_table, &it_buf);
+ while (it) {
+ struct perm_t *perm = (struct perm_t*)
+ pj_hash_this(sess->perm_table, it);
+ it = pj_hash_next(sess->perm_table, it);
+ if (perm->req_token == req_token)
+ invalidate_perm(sess, perm);
+ }
+ pj_lock_release(sess->lock);
+ return status;
+}
+
+/*
+ * Send REFRESH
+ */
+static void send_refresh(pj_turn_session *sess, int lifetime)
+{
+ pj_stun_tx_data *tdata;
+ pj_status_t status;
+
+ PJ_ASSERT_ON_FAIL(sess->state==PJ_TURN_STATE_READY, return);
+
+ /* Create a bare REFRESH request */
+ status = pj_stun_session_create_req(sess->stun, PJ_STUN_REFRESH_REQUEST,
+ PJ_STUN_MAGIC, NULL, &tdata);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ /* Add LIFETIME */
+ if (lifetime >= 0) {
+ pj_stun_msg_add_uint_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_LIFETIME, lifetime);
+ }
+
+ /* Send request */
+ if (lifetime == 0) {
+ set_state(sess, PJ_TURN_STATE_DEALLOCATING);
+ }
+
+ status = pj_stun_session_send_msg(sess->stun, NULL, PJ_FALSE,
+ (sess->conn_type==PJ_TURN_TP_UDP),
+ sess->srv_addr,
+ pj_sockaddr_get_len(sess->srv_addr),
+ tdata);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ return;
+
+on_error:
+ if (lifetime == 0) {
+ set_state(sess, PJ_TURN_STATE_DEALLOCATED);
+ sess_shutdown(sess, status);
+ }
+}
+
+
+/**
+ * Relay data to the specified peer through the session.
+ */
+PJ_DEF(pj_status_t) pj_turn_session_sendto( pj_turn_session *sess,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *addr,
+ unsigned addr_len)
+{
+ struct ch_t *ch;
+ struct perm_t *perm;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(sess && pkt && pkt_len && addr && addr_len,
+ PJ_EINVAL);
+
+ /* Return error if we're not ready */
+ if (sess->state != PJ_TURN_STATE_READY) {
+ return PJ_EIGNORED;
+ }
+
+ /* Lock session now */
+ pj_lock_acquire(sess->lock);
+
+ /* Lookup permission first */
+ perm = lookup_perm(sess, addr, pj_sockaddr_get_len(addr), PJ_FALSE);
+ if (perm == NULL) {
+ /* Permission doesn't exist, install it first */
+ char ipstr[PJ_INET6_ADDRSTRLEN+2];
+
+ PJ_LOG(4,(sess->obj_name,
+ "sendto(): IP %s has no permission, requesting it first..",
+ pj_sockaddr_print(addr, ipstr, sizeof(ipstr), 2)));
+
+ status = pj_turn_session_set_perm(sess, 1, (const pj_sockaddr*)addr,
+ 0);
+ if (status != PJ_SUCCESS) {
+ pj_lock_release(sess->lock);
+ return status;
+ }
+ }
+
+ /* See if the peer is bound to a channel number */
+ ch = lookup_ch_by_addr(sess, addr, pj_sockaddr_get_len(addr),
+ PJ_FALSE, PJ_FALSE);
+ if (ch && ch->num != PJ_TURN_INVALID_CHANNEL && ch->bound) {
+ unsigned total_len;
+
+ /* Peer is assigned a channel number, we can use ChannelData */
+ pj_turn_channel_data *cd = (pj_turn_channel_data*)sess->tx_pkt;
+
+ pj_assert(sizeof(*cd)==4);
+
+ /* Calculate total length, including paddings */
+ total_len = (pkt_len + sizeof(*cd) + 3) & (~3);
+ if (total_len > sizeof(sess->tx_pkt)) {
+ status = PJ_ETOOBIG;
+ goto on_return;
+ }
+
+ cd->ch_number = pj_htons((pj_uint16_t)ch->num);
+ cd->length = pj_htons((pj_uint16_t)pkt_len);
+ pj_memcpy(cd+1, pkt, pkt_len);
+
+ pj_assert(sess->srv_addr != NULL);
+
+ status = sess->cb.on_send_pkt(sess, sess->tx_pkt, total_len,
+ sess->srv_addr,
+ pj_sockaddr_get_len(sess->srv_addr));
+
+ } else {
+ /* Use Send Indication. */
+ pj_stun_sockaddr_attr peer_attr;
+ pj_stun_binary_attr data_attr;
+ pj_stun_msg send_ind;
+ pj_size_t send_ind_len;
+
+ /* Increment counter */
+ ++sess->send_ind_tsx_id[2];
+
+ /* Create blank SEND-INDICATION */
+ status = pj_stun_msg_init(&send_ind, PJ_STUN_SEND_INDICATION,
+ PJ_STUN_MAGIC,
+ (const pj_uint8_t*)sess->send_ind_tsx_id);
+ if (status != PJ_SUCCESS)
+ goto on_return;
+
+ /* Add XOR-PEER-ADDRESS */
+ pj_stun_sockaddr_attr_init(&peer_attr, PJ_STUN_ATTR_XOR_PEER_ADDR,
+ PJ_TRUE, addr, addr_len);
+ pj_stun_msg_add_attr(&send_ind, (pj_stun_attr_hdr*)&peer_attr);
+
+ /* Add DATA attribute */
+ pj_stun_binary_attr_init(&data_attr, NULL, PJ_STUN_ATTR_DATA, NULL, 0);
+ data_attr.data = (pj_uint8_t*)pkt;
+ data_attr.length = pkt_len;
+ pj_stun_msg_add_attr(&send_ind, (pj_stun_attr_hdr*)&data_attr);
+
+ /* Encode the message */
+ status = pj_stun_msg_encode(&send_ind, sess->tx_pkt,
+ sizeof(sess->tx_pkt), 0,
+ NULL, &send_ind_len);
+ if (status != PJ_SUCCESS)
+ goto on_return;
+
+ /* Send the Send Indication */
+ status = sess->cb.on_send_pkt(sess, sess->tx_pkt, send_ind_len,
+ sess->srv_addr,
+ pj_sockaddr_get_len(sess->srv_addr));
+ }
+
+on_return:
+ pj_lock_release(sess->lock);
+ return status;
+}
+
+
+/**
+ * Bind a peer address to a channel number.
+ */
+PJ_DEF(pj_status_t) pj_turn_session_bind_channel(pj_turn_session *sess,
+ const pj_sockaddr_t *peer_adr,
+ unsigned addr_len)
+{
+ struct ch_t *ch;
+ pj_stun_tx_data *tdata;
+ pj_uint16_t ch_num;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(sess && peer_adr && addr_len, PJ_EINVAL);
+ PJ_ASSERT_RETURN(sess->state == PJ_TURN_STATE_READY, PJ_EINVALIDOP);
+
+ pj_lock_acquire(sess->lock);
+
+ /* Create blank ChannelBind request */
+ status = pj_stun_session_create_req(sess->stun,
+ PJ_STUN_CHANNEL_BIND_REQUEST,
+ PJ_STUN_MAGIC, NULL, &tdata);
+ if (status != PJ_SUCCESS)
+ goto on_return;
+
+ /* Lookup if this peer has already been assigned a number */
+ ch = lookup_ch_by_addr(sess, peer_adr, pj_sockaddr_get_len(peer_adr),
+ PJ_TRUE, PJ_FALSE);
+ pj_assert(ch);
+
+ if (ch->num != PJ_TURN_INVALID_CHANNEL) {
+ /* Channel is already bound. This is a refresh request. */
+ ch_num = ch->num;
+ } else {
+ PJ_ASSERT_ON_FAIL(sess->next_ch <= PJ_TURN_CHANNEL_MAX,
+ {status=PJ_ETOOMANY; goto on_return;});
+ ch->num = ch_num = sess->next_ch++;
+ }
+
+ /* Add CHANNEL-NUMBER attribute */
+ pj_stun_msg_add_uint_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_CHANNEL_NUMBER,
+ PJ_STUN_SET_CH_NB(ch_num));
+
+ /* Add XOR-PEER-ADDRESS attribute */
+ pj_stun_msg_add_sockaddr_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_XOR_PEER_ADDR, PJ_TRUE,
+ peer_adr, addr_len);
+
+ /* Send the request, associate peer data structure with tdata
+ * for future reference when we receive the ChannelBind response.
+ */
+ status = pj_stun_session_send_msg(sess->stun, ch, PJ_FALSE,
+ (sess->conn_type==PJ_TURN_TP_UDP),
+ sess->srv_addr,
+ pj_sockaddr_get_len(sess->srv_addr),
+ tdata);
+
+on_return:
+ pj_lock_release(sess->lock);
+ return status;
+}
+
+
+/**
+ * Notify TURN client session upon receiving a packet from server.
+ * The packet maybe a STUN packet or ChannelData packet.
+ */
+PJ_DEF(pj_status_t) pj_turn_session_on_rx_pkt(pj_turn_session *sess,
+ void *pkt,
+ pj_size_t pkt_len,
+ pj_size_t *parsed_len)
+{
+ pj_bool_t is_stun;
+ pj_status_t status;
+ pj_bool_t is_datagram;
+
+ /* Packet could be ChannelData or STUN message (response or
+ * indication).
+ */
+
+ /* Start locking the session */
+ pj_lock_acquire(sess->lock);
+
+ is_datagram = (sess->conn_type==PJ_TURN_TP_UDP);
+
+ /* Quickly check if this is STUN message */
+ is_stun = ((((pj_uint8_t*)pkt)[0] & 0xC0) == 0);
+
+ if (is_stun) {
+ /* This looks like STUN, give it to the STUN session */
+ unsigned options;
+
+ options = PJ_STUN_CHECK_PACKET | PJ_STUN_NO_FINGERPRINT_CHECK;
+ if (is_datagram)
+ options |= PJ_STUN_IS_DATAGRAM;
+ status=pj_stun_session_on_rx_pkt(sess->stun, pkt, pkt_len,
+ options, NULL, parsed_len,
+ sess->srv_addr,
+ pj_sockaddr_get_len(sess->srv_addr));
+
+ } else {
+ /* This must be ChannelData. */
+ pj_turn_channel_data cd;
+ struct ch_t *ch;
+
+ if (pkt_len < 4) {
+ if (parsed_len) *parsed_len = 0;
+ return PJ_ETOOSMALL;
+ }
+
+ /* Decode ChannelData packet */
+ pj_memcpy(&cd, pkt, sizeof(pj_turn_channel_data));
+ cd.ch_number = pj_ntohs(cd.ch_number);
+ cd.length = pj_ntohs(cd.length);
+
+ /* Check that size is sane */
+ if (pkt_len < cd.length+sizeof(cd)) {
+ if (parsed_len) {
+ if (is_datagram) {
+ /* Discard the datagram */
+ *parsed_len = pkt_len;
+ } else {
+ /* Insufficient fragment */
+ *parsed_len = 0;
+ }
+ }
+ status = PJ_ETOOSMALL;
+ goto on_return;
+ } else {
+ if (parsed_len) {
+ /* Apply padding too */
+ *parsed_len = ((cd.length + 3) & (~3)) + sizeof(cd);
+ }
+ }
+
+ /* Lookup channel */
+ ch = lookup_ch_by_chnum(sess, cd.ch_number);
+ if (!ch || !ch->bound) {
+ status = PJ_ENOTFOUND;
+ goto on_return;
+ }
+
+ /* Notify application */
+ if (sess->cb.on_rx_data) {
+ (*sess->cb.on_rx_data)(sess, ((pj_uint8_t*)pkt)+sizeof(cd),
+ cd.length, &ch->addr,
+ pj_sockaddr_get_len(&ch->addr));
+ }
+
+ status = PJ_SUCCESS;
+ }
+
+on_return:
+ pj_lock_release(sess->lock);
+ return status;
+}
+
+
+/*
+ * This is a callback from STUN session to send outgoing packet.
+ */
+static pj_status_t stun_on_send_msg(pj_stun_session *stun,
+ void *token,
+ const void *pkt,
+ pj_size_t pkt_size,
+ const pj_sockaddr_t *dst_addr,
+ unsigned addr_len)
+{
+ pj_turn_session *sess;
+
+ PJ_UNUSED_ARG(token);
+
+ sess = (pj_turn_session*) pj_stun_session_get_user_data(stun);
+ return (*sess->cb.on_send_pkt)(sess, (const pj_uint8_t*)pkt, pkt_size,
+ dst_addr, addr_len);
+}
+
+
+/*
+ * Handle failed ALLOCATE or REFRESH request. This may switch to alternate
+ * server if we have one.
+ */
+static void on_session_fail( pj_turn_session *sess,
+ enum pj_stun_method_e method,
+ pj_status_t status,
+ const pj_str_t *reason)
+{
+ sess->last_status = status;
+
+ do {
+ pj_str_t reason1;
+ char err_msg[PJ_ERR_MSG_SIZE];
+
+ if (reason == NULL) {
+ pj_strerror(status, err_msg, sizeof(err_msg));
+ reason1 = pj_str(err_msg);
+ reason = &reason1;
+ }
+
+ PJ_LOG(4,(sess->obj_name, "%s error: %.*s",
+ pj_stun_get_method_name(method),
+ (int)reason->slen, reason->ptr));
+
+ /* If this is ALLOCATE response and we don't have more server
+ * addresses to try, notify application and destroy the TURN
+ * session.
+ */
+ if (method==PJ_STUN_ALLOCATE_METHOD &&
+ sess->srv_addr == &sess->srv_addr_list[sess->srv_addr_cnt-1])
+ {
+
+ set_state(sess, PJ_TURN_STATE_DEALLOCATED);
+ sess_shutdown(sess, status);
+ return;
+ }
+
+ /* Otherwise if this is not ALLOCATE response, notify application
+ * that session has been TERMINATED.
+ */
+ if (method!=PJ_STUN_ALLOCATE_METHOD) {
+ set_state(sess, PJ_TURN_STATE_DEALLOCATED);
+ sess_shutdown(sess, status);
+ return;
+ }
+
+ /* Try next server */
+ ++sess->srv_addr;
+ reason = NULL;
+
+ PJ_LOG(4,(sess->obj_name, "Trying next server"));
+ set_state(sess, PJ_TURN_STATE_RESOLVED);
+
+ } while (0);
+}
+
+
+/*
+ * Handle successful response to ALLOCATE or REFRESH request.
+ */
+static void on_allocate_success(pj_turn_session *sess,
+ enum pj_stun_method_e method,
+ const pj_stun_msg *msg)
+{
+ const pj_stun_lifetime_attr *lf_attr;
+ const pj_stun_xor_relayed_addr_attr *raddr_attr;
+ const pj_stun_sockaddr_attr *mapped_attr;
+ pj_str_t s;
+ pj_time_val timeout;
+
+ /* Must have LIFETIME attribute */
+ lf_attr = (const pj_stun_lifetime_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_LIFETIME, 0);
+ if (lf_attr == NULL) {
+ on_session_fail(sess, method, PJNATH_EINSTUNMSG,
+ pj_cstr(&s, "Error: Missing LIFETIME attribute"));
+ return;
+ }
+
+ /* If LIFETIME is zero, this is a deallocation */
+ if (lf_attr->value == 0) {
+ set_state(sess, PJ_TURN_STATE_DEALLOCATED);
+ sess_shutdown(sess, PJ_SUCCESS);
+ return;
+ }
+
+ /* Update lifetime and keep-alive interval */
+ sess->lifetime = lf_attr->value;
+ pj_gettimeofday(&sess->expiry);
+
+ if (sess->lifetime < PJ_TURN_KEEP_ALIVE_SEC) {
+ if (sess->lifetime <= 2) {
+ on_session_fail(sess, method, PJ_ETOOSMALL,
+ pj_cstr(&s, "Error: LIFETIME too small"));
+ return;
+ }
+ sess->ka_interval = sess->lifetime - 2;
+ sess->expiry.sec += (sess->ka_interval-1);
+ } else {
+ int timeout;
+
+ sess->ka_interval = PJ_TURN_KEEP_ALIVE_SEC;
+
+ timeout = sess->lifetime - PJ_TURN_REFRESH_SEC_BEFORE;
+ if (timeout < sess->ka_interval)
+ timeout = sess->ka_interval - 1;
+
+ sess->expiry.sec += timeout;
+ }
+
+ /* Check that relayed transport address contains correct
+ * address family.
+ */
+ raddr_attr = (const pj_stun_xor_relayed_addr_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_XOR_RELAYED_ADDR, 0);
+ if (raddr_attr == NULL && method==PJ_STUN_ALLOCATE_METHOD) {
+ on_session_fail(sess, method, PJNATH_EINSTUNMSG,
+ pj_cstr(&s, "Error: Received ALLOCATE without "
+ "RELAY-ADDRESS attribute"));
+ return;
+ }
+ if (raddr_attr && raddr_attr->sockaddr.addr.sa_family != sess->af) {
+ on_session_fail(sess, method, PJNATH_EINSTUNMSG,
+ pj_cstr(&s, "Error: RELAY-ADDRESS with non IPv4"
+ " address family is not supported "
+ "for now"));
+ return;
+ }
+ if (raddr_attr && !pj_sockaddr_has_addr(&raddr_attr->sockaddr)) {
+ on_session_fail(sess, method, PJNATH_EINSTUNMSG,
+ pj_cstr(&s, "Error: Invalid IP address in "
+ "RELAY-ADDRESS attribute"));
+ return;
+ }
+
+ /* Save relayed address */
+ if (raddr_attr) {
+ /* If we already have relay address, check if the relay address
+ * in the response matches our relay address.
+ */
+ if (pj_sockaddr_has_addr(&sess->relay_addr)) {
+ if (pj_sockaddr_cmp(&sess->relay_addr, &raddr_attr->sockaddr)) {
+ on_session_fail(sess, method, PJNATH_EINSTUNMSG,
+ pj_cstr(&s, "Error: different RELAY-ADDRESS is"
+ "returned by server"));
+ return;
+ }
+ } else {
+ /* Otherwise save the relayed address */
+ pj_memcpy(&sess->relay_addr, &raddr_attr->sockaddr,
+ sizeof(pj_sockaddr));
+ }
+ }
+
+ /* Get mapped address */
+ mapped_attr = (const pj_stun_sockaddr_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_XOR_MAPPED_ADDR, 0);
+ if (mapped_attr) {
+ pj_memcpy(&sess->mapped_addr, &mapped_attr->sockaddr,
+ sizeof(mapped_attr->sockaddr));
+ }
+
+ /* Success */
+
+ /* Cancel existing keep-alive timer, if any */
+ pj_assert(sess->timer.id != TIMER_DESTROY);
+
+ if (sess->timer.id != TIMER_NONE) {
+ pj_timer_heap_cancel(sess->timer_heap, &sess->timer);
+ sess->timer.id = TIMER_NONE;
+ }
+
+ /* Start keep-alive timer once allocation succeeds */
+ timeout.sec = sess->ka_interval;
+ timeout.msec = 0;
+
+ sess->timer.id = TIMER_KEEP_ALIVE;
+ pj_timer_heap_schedule(sess->timer_heap, &sess->timer, &timeout);
+
+ set_state(sess, PJ_TURN_STATE_READY);
+}
+
+/*
+ * Notification from STUN session on request completion.
+ */
+static void stun_on_request_complete(pj_stun_session *stun,
+ pj_status_t status,
+ void *token,
+ pj_stun_tx_data *tdata,
+ const pj_stun_msg *response,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_turn_session *sess;
+ enum pj_stun_method_e method = (enum pj_stun_method_e)
+ PJ_STUN_GET_METHOD(tdata->msg->hdr.type);
+
+ PJ_UNUSED_ARG(src_addr);
+ PJ_UNUSED_ARG(src_addr_len);
+
+ sess = (pj_turn_session*)pj_stun_session_get_user_data(stun);
+
+ if (method == PJ_STUN_ALLOCATE_METHOD) {
+
+ /* Destroy if we have pending destroy request */
+ if (sess->pending_destroy) {
+ if (status == PJ_SUCCESS)
+ sess->state = PJ_TURN_STATE_READY;
+ else
+ sess->state = PJ_TURN_STATE_DEALLOCATED;
+ sess_shutdown(sess, PJ_SUCCESS);
+ return;
+ }
+
+ /* Handle ALLOCATE response */
+ if (status==PJ_SUCCESS &&
+ PJ_STUN_IS_SUCCESS_RESPONSE(response->hdr.type))
+ {
+
+ /* Successful Allocate response */
+ on_allocate_success(sess, method, response);
+
+ } else {
+ /* Failed Allocate request */
+ const pj_str_t *err_msg = NULL;
+
+ if (status == PJ_SUCCESS) {
+ const pj_stun_errcode_attr *err_attr;
+ err_attr = (const pj_stun_errcode_attr*)
+ pj_stun_msg_find_attr(response,
+ PJ_STUN_ATTR_ERROR_CODE, 0);
+ if (err_attr) {
+ status = PJ_STATUS_FROM_STUN_CODE(err_attr->err_code);
+ err_msg = &err_attr->reason;
+ } else {
+ status = PJNATH_EINSTUNMSG;
+ }
+ }
+
+ on_session_fail(sess, method, status, err_msg);
+ }
+
+ } else if (method == PJ_STUN_REFRESH_METHOD) {
+ /* Handle Refresh response */
+ if (status==PJ_SUCCESS &&
+ PJ_STUN_IS_SUCCESS_RESPONSE(response->hdr.type))
+ {
+ /* Success, schedule next refresh. */
+ on_allocate_success(sess, method, response);
+
+ } else {
+ /* Failed Refresh request */
+ const pj_str_t *err_msg = NULL;
+
+ pj_assert(status != PJ_SUCCESS);
+
+ if (response) {
+ const pj_stun_errcode_attr *err_attr;
+ err_attr = (const pj_stun_errcode_attr*)
+ pj_stun_msg_find_attr(response,
+ PJ_STUN_ATTR_ERROR_CODE, 0);
+ if (err_attr) {
+ status = PJ_STATUS_FROM_STUN_CODE(err_attr->err_code);
+ err_msg = &err_attr->reason;
+ }
+ }
+
+ /* Notify and destroy */
+ on_session_fail(sess, method, status, err_msg);
+ }
+
+ } else if (method == PJ_STUN_CHANNEL_BIND_METHOD) {
+ /* Handle ChannelBind response */
+ if (status==PJ_SUCCESS &&
+ PJ_STUN_IS_SUCCESS_RESPONSE(response->hdr.type))
+ {
+ /* Successful ChannelBind response */
+ struct ch_t *ch = (struct ch_t*)token;
+
+ pj_assert(ch->num != PJ_TURN_INVALID_CHANNEL);
+ ch->bound = PJ_TRUE;
+
+ /* Update hash table */
+ lookup_ch_by_addr(sess, &ch->addr,
+ pj_sockaddr_get_len(&ch->addr),
+ PJ_TRUE, PJ_TRUE);
+
+ } else {
+ /* Failed ChannelBind response */
+ pj_str_t reason = {"", 0};
+ int err_code = 0;
+ char errbuf[PJ_ERR_MSG_SIZE];
+
+ pj_assert(status != PJ_SUCCESS);
+
+ if (response) {
+ const pj_stun_errcode_attr *err_attr;
+ err_attr = (const pj_stun_errcode_attr*)
+ pj_stun_msg_find_attr(response,
+ PJ_STUN_ATTR_ERROR_CODE, 0);
+ if (err_attr) {
+ err_code = err_attr->err_code;
+ status = PJ_STATUS_FROM_STUN_CODE(err_attr->err_code);
+ reason = err_attr->reason;
+ }
+ } else {
+ err_code = status;
+ reason = pj_strerror(status, errbuf, sizeof(errbuf));
+ }
+
+ PJ_LOG(1,(sess->obj_name, "ChannelBind failed: %d/%.*s",
+ err_code, (int)reason.slen, reason.ptr));
+
+ if (err_code == PJ_STUN_SC_ALLOCATION_MISMATCH) {
+ /* Allocation mismatch means allocation no longer exists */
+ on_session_fail(sess, PJ_STUN_CHANNEL_BIND_METHOD,
+ status, &reason);
+ return;
+ }
+ }
+
+ } else if (method == PJ_STUN_CREATE_PERM_METHOD) {
+ /* Handle CreatePermission response */
+ if (status==PJ_SUCCESS &&
+ PJ_STUN_IS_SUCCESS_RESPONSE(response->hdr.type))
+ {
+ /* No special handling when the request is successful. */
+ } else {
+ /* Iterate the permission table and invalidate all permissions
+ * that are related to this request.
+ */
+ pj_hash_iterator_t it_buf, *it;
+ char ipstr[PJ_INET6_ADDRSTRLEN+10];
+ int err_code;
+ char errbuf[PJ_ERR_MSG_SIZE];
+ pj_str_t reason;
+
+ pj_assert(status != PJ_SUCCESS);
+
+ if (response) {
+ const pj_stun_errcode_attr *eattr;
+
+ eattr = (const pj_stun_errcode_attr*)
+ pj_stun_msg_find_attr(response,
+ PJ_STUN_ATTR_ERROR_CODE, 0);
+ if (eattr) {
+ err_code = eattr->err_code;
+ reason = eattr->reason;
+ } else {
+ err_code = -1;
+ reason = pj_str("?");
+ }
+ } else {
+ err_code = status;
+ reason = pj_strerror(status, errbuf, sizeof(errbuf));
+ }
+
+ it = pj_hash_first(sess->perm_table, &it_buf);
+ while (it) {
+ struct perm_t *perm = (struct perm_t*)
+ pj_hash_this(sess->perm_table, it);
+ it = pj_hash_next(sess->perm_table, it);
+
+ if (perm->req_token == token) {
+ PJ_LOG(1,(sess->obj_name,
+ "CreatePermission failed for IP %s: %d/%.*s",
+ pj_sockaddr_print(&perm->addr, ipstr,
+ sizeof(ipstr), 2),
+ err_code, (int)reason.slen, reason.ptr));
+
+ invalidate_perm(sess, perm);
+ }
+ }
+
+ if (err_code == PJ_STUN_SC_ALLOCATION_MISMATCH) {
+ /* Allocation mismatch means allocation no longer exists */
+ on_session_fail(sess, PJ_STUN_CREATE_PERM_METHOD,
+ status, &reason);
+ return;
+ }
+ }
+
+ } else {
+ PJ_LOG(4,(sess->obj_name, "Unexpected STUN %s response",
+ pj_stun_get_method_name(response->hdr.type)));
+ }
+}
+
+
+/*
+ * Notification from STUN session on incoming STUN Indication
+ * message.
+ */
+static pj_status_t stun_on_rx_indication(pj_stun_session *stun,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_stun_msg *msg,
+ void *token,
+ const pj_sockaddr_t *src_addr,
+ unsigned src_addr_len)
+{
+ pj_turn_session *sess;
+ pj_stun_xor_peer_addr_attr *peer_attr;
+ pj_stun_icmp_attr *icmp;
+ pj_stun_data_attr *data_attr;
+
+ PJ_UNUSED_ARG(token);
+ PJ_UNUSED_ARG(pkt);
+ PJ_UNUSED_ARG(pkt_len);
+ PJ_UNUSED_ARG(src_addr);
+ PJ_UNUSED_ARG(src_addr_len);
+
+ sess = (pj_turn_session*)pj_stun_session_get_user_data(stun);
+
+ /* Expecting Data Indication only */
+ if (msg->hdr.type != PJ_STUN_DATA_INDICATION) {
+ PJ_LOG(4,(sess->obj_name, "Unexpected STUN %s indication",
+ pj_stun_get_method_name(msg->hdr.type)));
+ return PJ_EINVALIDOP;
+ }
+
+ /* Check if there is ICMP attribute in the message */
+ icmp = (pj_stun_icmp_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_ICMP, 0);
+ if (icmp != NULL) {
+ /* This is a forwarded ICMP packet. Ignore it for now */
+ return PJ_SUCCESS;
+ }
+
+ /* Get XOR-PEER-ADDRESS attribute */
+ peer_attr = (pj_stun_xor_peer_addr_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_XOR_PEER_ADDR, 0);
+
+ /* Get DATA attribute */
+ data_attr = (pj_stun_data_attr*)
+ pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_DATA, 0);
+
+ /* Must have both XOR-PEER-ADDRESS and DATA attributes */
+ if (!peer_attr || !data_attr) {
+ PJ_LOG(4,(sess->obj_name,
+ "Received Data indication with missing attributes"));
+ return PJ_EINVALIDOP;
+ }
+
+ /* Notify application */
+ if (sess->cb.on_rx_data) {
+ (*sess->cb.on_rx_data)(sess, data_attr->data, data_attr->length,
+ &peer_attr->sockaddr,
+ pj_sockaddr_get_len(&peer_attr->sockaddr));
+ }
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Notification on completion of DNS SRV resolution.
+ */
+static void dns_srv_resolver_cb(void *user_data,
+ pj_status_t status,
+ const pj_dns_srv_record *rec)
+{
+ pj_turn_session *sess = (pj_turn_session*) user_data;
+ unsigned i, cnt, tot_cnt;
+
+ /* Clear async resolver */
+ sess->dns_async = NULL;
+
+ /* Check failure */
+ if (status != PJ_SUCCESS) {
+ sess_shutdown(sess, status);
+ return;
+ }
+
+ /* Calculate total number of server entries in the response */
+ tot_cnt = 0;
+ for (i=0; i<rec->count; ++i) {
+ tot_cnt += rec->entry[i].server.addr_count;
+ }
+
+ if (tot_cnt > PJ_TURN_MAX_DNS_SRV_CNT)
+ tot_cnt = PJ_TURN_MAX_DNS_SRV_CNT;
+
+ /* Allocate server entries */
+ sess->srv_addr_list = (pj_sockaddr*)
+ pj_pool_calloc(sess->pool, tot_cnt,
+ sizeof(pj_sockaddr));
+
+ /* Copy results to server entries */
+ for (i=0, cnt=0; i<rec->count && cnt<PJ_TURN_MAX_DNS_SRV_CNT; ++i) {
+ unsigned j;
+
+ for (j=0; j<rec->entry[i].server.addr_count &&
+ cnt<PJ_TURN_MAX_DNS_SRV_CNT; ++j)
+ {
+ pj_sockaddr_in *addr = &sess->srv_addr_list[cnt].ipv4;
+
+ addr->sin_family = sess->af;
+ addr->sin_port = pj_htons(rec->entry[i].port);
+ addr->sin_addr.s_addr = rec->entry[i].server.addr[j].s_addr;
+
+ ++cnt;
+ }
+ }
+ sess->srv_addr_cnt = (pj_uint16_t)cnt;
+
+ /* Set current server */
+ sess->srv_addr = &sess->srv_addr_list[0];
+
+ /* Set state to PJ_TURN_STATE_RESOLVED */
+ set_state(sess, PJ_TURN_STATE_RESOLVED);
+
+ /* Run pending allocation */
+ if (sess->pending_alloc) {
+ pj_turn_session_alloc(sess, NULL);
+ }
+}
+
+
+/*
+ * Lookup peer descriptor from its address.
+ */
+static struct ch_t *lookup_ch_by_addr(pj_turn_session *sess,
+ const pj_sockaddr_t *addr,
+ unsigned addr_len,
+ pj_bool_t update,
+ pj_bool_t bind_channel)
+{
+ pj_uint32_t hval = 0;
+ struct ch_t *ch;
+
+ ch = (struct ch_t*)
+ pj_hash_get(sess->ch_table, addr, addr_len, &hval);
+ if (ch == NULL && update) {
+ ch = PJ_POOL_ZALLOC_T(sess->pool, struct ch_t);
+ ch->num = PJ_TURN_INVALID_CHANNEL;
+ pj_memcpy(&ch->addr, addr, addr_len);
+
+ /* Register by peer address */
+ pj_hash_set(sess->pool, sess->ch_table, &ch->addr, addr_len,
+ hval, ch);
+ }
+
+ if (ch && update) {
+ pj_gettimeofday(&ch->expiry);
+ ch->expiry.sec += PJ_TURN_PERM_TIMEOUT - sess->ka_interval - 1;
+
+ if (bind_channel) {
+ pj_uint32_t hval = 0;
+ /* Register by channel number */
+ pj_assert(ch->num != PJ_TURN_INVALID_CHANNEL && ch->bound);
+
+ if (pj_hash_get(sess->ch_table, &ch->num,
+ sizeof(ch->num), &hval)==0) {
+ pj_hash_set(sess->pool, sess->ch_table, &ch->num,
+ sizeof(ch->num), hval, ch);
+ }
+ }
+ }
+
+ /* Also create/update permission for this destination. Ideally we
+ * should update this when we receive the successful response,
+ * but that would cause duplicate CreatePermission to be sent
+ * during refreshing.
+ */
+ if (ch && update) {
+ lookup_perm(sess, &ch->addr, pj_sockaddr_get_len(&ch->addr), PJ_TRUE);
+ }
+
+ return ch;
+}
+
+
+/*
+ * Lookup channel descriptor from its channel number.
+ */
+static struct ch_t *lookup_ch_by_chnum(pj_turn_session *sess,
+ pj_uint16_t chnum)
+{
+ return (struct ch_t*) pj_hash_get(sess->ch_table, &chnum,
+ sizeof(chnum), NULL);
+}
+
+
+/*
+ * Lookup permission and optionally create if it doesn't exist.
+ */
+static struct perm_t *lookup_perm(pj_turn_session *sess,
+ const pj_sockaddr_t *addr,
+ unsigned addr_len,
+ pj_bool_t update)
+{
+ pj_uint32_t hval = 0;
+ pj_sockaddr perm_addr;
+ struct perm_t *perm;
+
+ /* make sure port number if zero */
+ if (pj_sockaddr_get_port(addr) != 0) {
+ pj_memcpy(&perm_addr, addr, addr_len);
+ pj_sockaddr_set_port(&perm_addr, 0);
+ addr = &perm_addr;
+ }
+
+ /* lookup and create if it doesn't exist and wanted */
+ perm = (struct perm_t*)
+ pj_hash_get(sess->perm_table, addr, addr_len, &hval);
+ if (perm == NULL && update) {
+ perm = PJ_POOL_ZALLOC_T(sess->pool, struct perm_t);
+ pj_memcpy(&perm->addr, addr, addr_len);
+ perm->hval = hval;
+
+ pj_hash_set(sess->pool, sess->perm_table, &perm->addr, addr_len,
+ perm->hval, perm);
+ }
+
+ if (perm && update) {
+ pj_gettimeofday(&perm->expiry);
+ perm->expiry.sec += PJ_TURN_PERM_TIMEOUT - sess->ka_interval - 1;
+
+ }
+
+ return perm;
+}
+
+/*
+ * Delete permission
+ */
+static void invalidate_perm(pj_turn_session *sess,
+ struct perm_t *perm)
+{
+ pj_hash_set(NULL, sess->perm_table, &perm->addr,
+ pj_sockaddr_get_len(&perm->addr), perm->hval, NULL);
+}
+
+/*
+ * Scan permission's hash table to refresh the permission.
+ */
+static unsigned refresh_permissions(pj_turn_session *sess,
+ const pj_time_val *now)
+{
+ pj_stun_tx_data *tdata = NULL;
+ unsigned count = 0;
+ void *req_token = NULL;
+ pj_hash_iterator_t *it, itbuf;
+ pj_status_t status;
+
+ it = pj_hash_first(sess->perm_table, &itbuf);
+ while (it) {
+ struct perm_t *perm = (struct perm_t*)
+ pj_hash_this(sess->perm_table, it);
+
+ it = pj_hash_next(sess->perm_table, it);
+
+ if (perm->expiry.sec-1 <= now->sec) {
+ if (perm->renew) {
+ /* Renew this permission */
+ if (tdata == NULL) {
+ /* Create a bare CreatePermission request */
+ status = pj_stun_session_create_req(
+ sess->stun,
+ PJ_STUN_CREATE_PERM_REQUEST,
+ PJ_STUN_MAGIC, NULL, &tdata);
+ if (status != PJ_SUCCESS) {
+ PJ_LOG(1,(sess->obj_name,
+ "Error creating CreatePermission request: %d",
+ status));
+ return 0;
+ }
+
+ /* Create request token to map the request to the perm
+ * structures which the request belongs.
+ */
+ req_token = (void*)(long)pj_rand();
+ }
+
+ status = pj_stun_msg_add_sockaddr_attr(
+ tdata->pool,
+ tdata->msg,
+ PJ_STUN_ATTR_XOR_PEER_ADDR,
+ PJ_TRUE,
+ &perm->addr,
+ sizeof(perm->addr));
+ if (status != PJ_SUCCESS) {
+ pj_stun_msg_destroy_tdata(sess->stun, tdata);
+ return 0;
+ }
+
+ perm->expiry = *now;
+ perm->expiry.sec += PJ_TURN_PERM_TIMEOUT-sess->ka_interval-1;
+ perm->req_token = req_token;
+ ++count;
+
+ } else {
+ /* This permission has expired and app doesn't want
+ * us to renew, so delete it from the hash table.
+ */
+ invalidate_perm(sess, perm);
+ }
+ }
+ }
+
+ if (tdata) {
+ status = pj_stun_session_send_msg(sess->stun, req_token, PJ_FALSE,
+ (sess->conn_type==PJ_TURN_TP_UDP),
+ sess->srv_addr,
+ pj_sockaddr_get_len(sess->srv_addr),
+ tdata);
+ if (status != PJ_SUCCESS) {
+ PJ_LOG(1,(sess->obj_name,
+ "Error sending CreatePermission request: %d",
+ status));
+ count = 0;
+ }
+
+ }
+
+ return count;
+}
+
+/*
+ * Timer event.
+ */
+static void on_timer_event(pj_timer_heap_t *th, pj_timer_entry *e)
+{
+ pj_turn_session *sess = (pj_turn_session*)e->user_data;
+ enum timer_id_t eid;
+
+ PJ_UNUSED_ARG(th);
+
+ pj_lock_acquire(sess->lock);
+
+ eid = (enum timer_id_t) e->id;
+ e->id = TIMER_NONE;
+
+ if (eid == TIMER_KEEP_ALIVE) {
+ pj_time_val now;
+ pj_hash_iterator_t itbuf, *it;
+ pj_bool_t resched = PJ_TRUE;
+ pj_bool_t pkt_sent = PJ_FALSE;
+
+ pj_gettimeofday(&now);
+
+ /* Refresh allocation if it's time to do so */
+ if (PJ_TIME_VAL_LTE(sess->expiry, now)) {
+ int lifetime = sess->alloc_param.lifetime;
+
+ if (lifetime == 0)
+ lifetime = -1;
+
+ send_refresh(sess, lifetime);
+ resched = PJ_FALSE;
+ pkt_sent = PJ_TRUE;
+ }
+
+ /* Scan hash table to refresh bound channels */
+ it = pj_hash_first(sess->ch_table, &itbuf);
+ while (it) {
+ struct ch_t *ch = (struct ch_t*)
+ pj_hash_this(sess->ch_table, it);
+ if (ch->bound && PJ_TIME_VAL_LTE(ch->expiry, now)) {
+
+ /* Send ChannelBind to refresh channel binding and
+ * permission.
+ */
+ pj_turn_session_bind_channel(sess, &ch->addr,
+ pj_sockaddr_get_len(&ch->addr));
+ pkt_sent = PJ_TRUE;
+ }
+
+ it = pj_hash_next(sess->ch_table, it);
+ }
+
+ /* Scan permission table to refresh permissions */
+ if (refresh_permissions(sess, &now))
+ pkt_sent = PJ_TRUE;
+
+ /* If no packet is sent, send a blank Send indication to
+ * refresh local NAT.
+ */
+ if (!pkt_sent && sess->alloc_param.ka_interval > 0) {
+ pj_stun_tx_data *tdata;
+ pj_status_t rc;
+
+ /* Create blank SEND-INDICATION */
+ rc = pj_stun_session_create_ind(sess->stun,
+ PJ_STUN_SEND_INDICATION, &tdata);
+ if (rc == PJ_SUCCESS) {
+ /* Add DATA attribute with zero length */
+ pj_stun_msg_add_binary_attr(tdata->pool, tdata->msg,
+ PJ_STUN_ATTR_DATA, NULL, 0);
+
+ /* Send the indication */
+ pj_stun_session_send_msg(sess->stun, NULL, PJ_FALSE,
+ PJ_FALSE, sess->srv_addr,
+ pj_sockaddr_get_len(sess->srv_addr),
+ tdata);
+ }
+ }
+
+ /* Reshcedule timer */
+ if (resched) {
+ pj_time_val delay;
+
+ delay.sec = sess->ka_interval;
+ delay.msec = 0;
+
+ sess->timer.id = TIMER_KEEP_ALIVE;
+ pj_timer_heap_schedule(sess->timer_heap, &sess->timer, &delay);
+ }
+
+ pj_lock_release(sess->lock);
+
+ } else if (eid == TIMER_DESTROY) {
+ /* Time to destroy */
+ pj_lock_release(sess->lock);
+ do_destroy(sess);
+ } else {
+ pj_assert(!"Unknown timer event");
+ pj_lock_release(sess->lock);
+ }
+}
+
diff --git a/pjnath/src/pjnath/turn_sock.c b/pjnath/src/pjnath/turn_sock.c
new file mode 100644
index 0000000..799b557
--- /dev/null
+++ b/pjnath/src/pjnath/turn_sock.c
@@ -0,0 +1,808 @@
+/* $Id: turn_sock.c 3841 2011-10-24 09:28:13Z ming $ */
+/*
+ * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
+ * Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <pjnath/turn_sock.h>
+#include <pj/activesock.h>
+#include <pj/assert.h>
+#include <pj/errno.h>
+#include <pj/lock.h>
+#include <pj/log.h>
+#include <pj/pool.h>
+#include <pj/ioqueue.h>
+
+enum
+{
+ TIMER_NONE,
+ TIMER_DESTROY
+};
+
+#define INIT 0x1FFFFFFF
+
+struct pj_turn_sock
+{
+ pj_pool_t *pool;
+ const char *obj_name;
+ pj_turn_session *sess;
+ pj_turn_sock_cb cb;
+ void *user_data;
+
+ pj_lock_t *lock;
+
+ pj_turn_alloc_param alloc_param;
+ pj_stun_config cfg;
+ pj_turn_sock_cfg setting;
+
+ pj_bool_t destroy_request;
+ pj_timer_entry timer;
+
+ int af;
+ pj_turn_tp_type conn_type;
+ pj_activesock_t *active_sock;
+ pj_ioqueue_op_key_t send_key;
+};
+
+
+/*
+ * Callback prototypes.
+ */
+static pj_status_t turn_on_send_pkt(pj_turn_session *sess,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *dst_addr,
+ unsigned dst_addr_len);
+static void turn_on_channel_bound(pj_turn_session *sess,
+ const pj_sockaddr_t *peer_addr,
+ unsigned addr_len,
+ unsigned ch_num);
+static void turn_on_rx_data(pj_turn_session *sess,
+ void *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *peer_addr,
+ unsigned addr_len);
+static void turn_on_state(pj_turn_session *sess,
+ pj_turn_state_t old_state,
+ pj_turn_state_t new_state);
+
+static pj_bool_t on_data_read(pj_activesock_t *asock,
+ void *data,
+ pj_size_t size,
+ pj_status_t status,
+ pj_size_t *remainder);
+static pj_bool_t on_connect_complete(pj_activesock_t *asock,
+ pj_status_t status);
+
+
+
+static void destroy(pj_turn_sock *turn_sock);
+static void timer_cb(pj_timer_heap_t *th, pj_timer_entry *e);
+
+
+/* Init config */
+PJ_DEF(void) pj_turn_sock_cfg_default(pj_turn_sock_cfg *cfg)
+{
+ pj_bzero(cfg, sizeof(*cfg));
+ cfg->qos_type = PJ_QOS_TYPE_BEST_EFFORT;
+ cfg->qos_ignore_error = PJ_TRUE;
+}
+
+/*
+ * Create.
+ */
+PJ_DEF(pj_status_t) pj_turn_sock_create(pj_stun_config *cfg,
+ int af,
+ pj_turn_tp_type conn_type,
+ const pj_turn_sock_cb *cb,
+ const pj_turn_sock_cfg *setting,
+ void *user_data,
+ pj_turn_sock **p_turn_sock)
+{
+ pj_turn_sock *turn_sock;
+ pj_turn_session_cb sess_cb;
+ pj_turn_sock_cfg default_setting;
+ pj_pool_t *pool;
+ const char *name_tmpl;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(cfg && p_turn_sock, PJ_EINVAL);
+ PJ_ASSERT_RETURN(af==pj_AF_INET() || af==pj_AF_INET6(), PJ_EINVAL);
+ PJ_ASSERT_RETURN(conn_type!=PJ_TURN_TP_TCP || PJ_HAS_TCP, PJ_EINVAL);
+
+ if (!setting) {
+ pj_turn_sock_cfg_default(&default_setting);
+ setting = &default_setting;
+ }
+
+ switch (conn_type) {
+ case PJ_TURN_TP_UDP:
+ name_tmpl = "udprel%p";
+ break;
+ case PJ_TURN_TP_TCP:
+ name_tmpl = "tcprel%p";
+ break;
+ default:
+ PJ_ASSERT_RETURN(!"Invalid TURN conn_type", PJ_EINVAL);
+ name_tmpl = "tcprel%p";
+ break;
+ }
+
+ /* Create and init basic data structure */
+ pool = pj_pool_create(cfg->pf, name_tmpl, PJNATH_POOL_LEN_TURN_SOCK,
+ PJNATH_POOL_INC_TURN_SOCK, NULL);
+ turn_sock = PJ_POOL_ZALLOC_T(pool, pj_turn_sock);
+ turn_sock->pool = pool;
+ turn_sock->obj_name = pool->obj_name;
+ turn_sock->user_data = user_data;
+ turn_sock->af = af;
+ turn_sock->conn_type = conn_type;
+
+ /* Copy STUN config (this contains ioqueue, timer heap, etc.) */
+ pj_memcpy(&turn_sock->cfg, cfg, sizeof(*cfg));
+
+ /* Copy setting (QoS parameters etc */
+ pj_memcpy(&turn_sock->setting, setting, sizeof(*setting));
+
+ /* Set callback */
+ if (cb) {
+ pj_memcpy(&turn_sock->cb, cb, sizeof(*cb));
+ }
+
+ /* Create lock */
+ status = pj_lock_create_recursive_mutex(pool, turn_sock->obj_name,
+ &turn_sock->lock);
+ if (status != PJ_SUCCESS) {
+ destroy(turn_sock);
+ return status;
+ }
+
+ /* Init timer */
+ pj_timer_entry_init(&turn_sock->timer, TIMER_NONE, turn_sock, &timer_cb);
+
+ /* Init TURN session */
+ pj_bzero(&sess_cb, sizeof(sess_cb));
+ sess_cb.on_send_pkt = &turn_on_send_pkt;
+ sess_cb.on_channel_bound = &turn_on_channel_bound;
+ sess_cb.on_rx_data = &turn_on_rx_data;
+ sess_cb.on_state = &turn_on_state;
+ status = pj_turn_session_create(cfg, pool->obj_name, af, conn_type,
+ &sess_cb, 0, turn_sock, &turn_sock->sess);
+ if (status != PJ_SUCCESS) {
+ destroy(turn_sock);
+ return status;
+ }
+
+ /* Note: socket and ioqueue will be created later once the TURN server
+ * has been resolved.
+ */
+
+ *p_turn_sock = turn_sock;
+ return PJ_SUCCESS;
+}
+
+/*
+ * Destroy.
+ */
+static void destroy(pj_turn_sock *turn_sock)
+{
+ if (turn_sock->lock) {
+ pj_lock_acquire(turn_sock->lock);
+ }
+
+ if (turn_sock->sess) {
+ pj_turn_session_set_user_data(turn_sock->sess, NULL);
+ pj_turn_session_shutdown(turn_sock->sess);
+ turn_sock->sess = NULL;
+ }
+
+ if (turn_sock->active_sock) {
+ pj_activesock_close(turn_sock->active_sock);
+ turn_sock->active_sock = NULL;
+ }
+
+ if (turn_sock->lock) {
+ pj_lock_release(turn_sock->lock);
+ pj_lock_destroy(turn_sock->lock);
+ turn_sock->lock = NULL;
+ }
+
+ if (turn_sock->pool) {
+ pj_pool_t *pool = turn_sock->pool;
+ turn_sock->pool = NULL;
+ pj_pool_release(pool);
+ }
+}
+
+
+PJ_DEF(void) pj_turn_sock_destroy(pj_turn_sock *turn_sock)
+{
+ pj_lock_acquire(turn_sock->lock);
+ turn_sock->destroy_request = PJ_TRUE;
+
+ if (turn_sock->sess) {
+ pj_turn_session_shutdown(turn_sock->sess);
+ /* This will ultimately call our state callback, and when
+ * session state is DESTROYING we will schedule a timer to
+ * destroy ourselves.
+ */
+ pj_lock_release(turn_sock->lock);
+ } else {
+ pj_lock_release(turn_sock->lock);
+ destroy(turn_sock);
+ }
+
+}
+
+
+/* Timer callback */
+static void timer_cb(pj_timer_heap_t *th, pj_timer_entry *e)
+{
+ pj_turn_sock *turn_sock = (pj_turn_sock*)e->user_data;
+ int eid = e->id;
+
+ PJ_UNUSED_ARG(th);
+
+ e->id = TIMER_NONE;
+
+ switch (eid) {
+ case TIMER_DESTROY:
+ PJ_LOG(5,(turn_sock->obj_name, "Destroying TURN"));
+ destroy(turn_sock);
+ break;
+ default:
+ pj_assert(!"Invalid timer id");
+ break;
+ }
+}
+
+
+/* Display error */
+static void show_err(pj_turn_sock *turn_sock, const char *title,
+ pj_status_t status)
+{
+ PJ_PERROR(4,(turn_sock->obj_name, status, title));
+}
+
+/* On error, terminate session */
+static void sess_fail(pj_turn_sock *turn_sock, const char *title,
+ pj_status_t status)
+{
+ show_err(turn_sock, title, status);
+ if (turn_sock->sess) {
+ pj_turn_session_destroy(turn_sock->sess, status);
+ }
+}
+
+/*
+ * Set user data.
+ */
+PJ_DEF(pj_status_t) pj_turn_sock_set_user_data( pj_turn_sock *turn_sock,
+ void *user_data)
+{
+ PJ_ASSERT_RETURN(turn_sock, PJ_EINVAL);
+ turn_sock->user_data = user_data;
+ return PJ_SUCCESS;
+}
+
+/*
+ * Get user data.
+ */
+PJ_DEF(void*) pj_turn_sock_get_user_data(pj_turn_sock *turn_sock)
+{
+ PJ_ASSERT_RETURN(turn_sock, NULL);
+ return turn_sock->user_data;
+}
+
+/**
+ * Get info.
+ */
+PJ_DEF(pj_status_t) pj_turn_sock_get_info(pj_turn_sock *turn_sock,
+ pj_turn_session_info *info)
+{
+ PJ_ASSERT_RETURN(turn_sock && info, PJ_EINVAL);
+
+ if (turn_sock->sess) {
+ return pj_turn_session_get_info(turn_sock->sess, info);
+ } else {
+ pj_bzero(info, sizeof(*info));
+ info->state = PJ_TURN_STATE_NULL;
+ return PJ_SUCCESS;
+ }
+}
+
+/**
+ * Lock the TURN socket. Application may need to call this function to
+ * synchronize access to other objects to avoid deadlock.
+ */
+PJ_DEF(pj_status_t) pj_turn_sock_lock(pj_turn_sock *turn_sock)
+{
+ return pj_lock_acquire(turn_sock->lock);
+}
+
+/**
+ * Unlock the TURN socket.
+ */
+PJ_DEF(pj_status_t) pj_turn_sock_unlock(pj_turn_sock *turn_sock)
+{
+ return pj_lock_release(turn_sock->lock);
+}
+
+/*
+ * Set STUN message logging for this TURN session.
+ */
+PJ_DEF(void) pj_turn_sock_set_log( pj_turn_sock *turn_sock,
+ unsigned flags)
+{
+ pj_turn_session_set_log(turn_sock->sess, flags);
+}
+
+/*
+ * Set software name
+ */
+PJ_DEF(pj_status_t) pj_turn_sock_set_software_name( pj_turn_sock *turn_sock,
+ const pj_str_t *sw)
+{
+ return pj_turn_session_set_software_name(turn_sock->sess, sw);
+}
+
+/*
+ * Initialize.
+ */
+PJ_DEF(pj_status_t) pj_turn_sock_alloc(pj_turn_sock *turn_sock,
+ const pj_str_t *domain,
+ int default_port,
+ pj_dns_resolver *resolver,
+ const pj_stun_auth_cred *cred,
+ const pj_turn_alloc_param *param)
+{
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(turn_sock && domain, PJ_EINVAL);
+ PJ_ASSERT_RETURN(turn_sock->sess, PJ_EINVALIDOP);
+
+ /* Copy alloc param. We will call session_alloc() only after the
+ * server address has been resolved.
+ */
+ if (param) {
+ pj_turn_alloc_param_copy(turn_sock->pool, &turn_sock->alloc_param, param);
+ } else {
+ pj_turn_alloc_param_default(&turn_sock->alloc_param);
+ }
+
+ /* Set credental */
+ if (cred) {
+ status = pj_turn_session_set_credential(turn_sock->sess, cred);
+ if (status != PJ_SUCCESS) {
+ sess_fail(turn_sock, "Error setting credential", status);
+ return status;
+ }
+ }
+
+ /* Resolve server */
+ status = pj_turn_session_set_server(turn_sock->sess, domain, default_port,
+ resolver);
+ if (status != PJ_SUCCESS) {
+ sess_fail(turn_sock, "Error setting TURN server", status);
+ return status;
+ }
+
+ /* Done for now. The next work will be done when session state moved
+ * to RESOLVED state.
+ */
+
+ return PJ_SUCCESS;
+}
+
+/*
+ * Install permission
+ */
+PJ_DEF(pj_status_t) pj_turn_sock_set_perm( pj_turn_sock *turn_sock,
+ unsigned addr_cnt,
+ const pj_sockaddr addr[],
+ unsigned options)
+{
+ if (turn_sock->sess == NULL)
+ return PJ_EINVALIDOP;
+
+ return pj_turn_session_set_perm(turn_sock->sess, addr_cnt, addr, options);
+}
+
+/*
+ * Send packet.
+ */
+PJ_DEF(pj_status_t) pj_turn_sock_sendto( pj_turn_sock *turn_sock,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *addr,
+ unsigned addr_len)
+{
+ PJ_ASSERT_RETURN(turn_sock && addr && addr_len, PJ_EINVAL);
+
+ if (turn_sock->sess == NULL)
+ return PJ_EINVALIDOP;
+
+ return pj_turn_session_sendto(turn_sock->sess, pkt, pkt_len,
+ addr, addr_len);
+}
+
+/*
+ * Bind a peer address to a channel number.
+ */
+PJ_DEF(pj_status_t) pj_turn_sock_bind_channel( pj_turn_sock *turn_sock,
+ const pj_sockaddr_t *peer,
+ unsigned addr_len)
+{
+ PJ_ASSERT_RETURN(turn_sock && peer && addr_len, PJ_EINVAL);
+ PJ_ASSERT_RETURN(turn_sock->sess != NULL, PJ_EINVALIDOP);
+
+ return pj_turn_session_bind_channel(turn_sock->sess, peer, addr_len);
+}
+
+
+/*
+ * Notification when outgoing TCP socket has been connected.
+ */
+static pj_bool_t on_connect_complete(pj_activesock_t *asock,
+ pj_status_t status)
+{
+ pj_turn_sock *turn_sock;
+
+ turn_sock = (pj_turn_sock*) pj_activesock_get_user_data(asock);
+
+ if (status != PJ_SUCCESS) {
+ sess_fail(turn_sock, "TCP connect() error", status);
+ return PJ_FALSE;
+ }
+
+ if (turn_sock->conn_type != PJ_TURN_TP_UDP) {
+ PJ_LOG(5,(turn_sock->obj_name, "TCP connected"));
+ }
+
+ /* Kick start pending read operation */
+ status = pj_activesock_start_read(asock, turn_sock->pool,
+ PJ_TURN_MAX_PKT_LEN, 0);
+
+ /* Init send_key */
+ pj_ioqueue_op_key_init(&turn_sock->send_key, sizeof(turn_sock->send_key));
+
+ /* Send Allocate request */
+ status = pj_turn_session_alloc(turn_sock->sess, &turn_sock->alloc_param);
+ if (status != PJ_SUCCESS) {
+ sess_fail(turn_sock, "Error sending ALLOCATE", status);
+ return PJ_FALSE;
+ }
+
+ return PJ_TRUE;
+}
+
+static pj_uint16_t GETVAL16H(const pj_uint8_t *buf, unsigned pos)
+{
+ return (pj_uint16_t) ((buf[pos + 0] << 8) | \
+ (buf[pos + 1] << 0));
+}
+
+/* Quick check to determine if there is enough packet to process in the
+ * incoming buffer. Return the packet length, or zero if there's no packet.
+ */
+static unsigned has_packet(pj_turn_sock *turn_sock, const void *buf, pj_size_t bufsize)
+{
+ pj_bool_t is_stun;
+
+ if (turn_sock->conn_type == PJ_TURN_TP_UDP)
+ return bufsize;
+
+ /* Quickly check if this is STUN message, by checking the first two bits and
+ * size field which must be multiple of 4 bytes
+ */
+ is_stun = ((((pj_uint8_t*)buf)[0] & 0xC0) == 0) &&
+ ((GETVAL16H((const pj_uint8_t*)buf, 2) & 0x03)==0);
+
+ if (is_stun) {
+ pj_size_t msg_len = GETVAL16H((const pj_uint8_t*)buf, 2);
+ return (msg_len+20 <= bufsize) ? msg_len+20 : 0;
+ } else {
+ /* This must be ChannelData. */
+ pj_turn_channel_data cd;
+
+ if (bufsize < 4)
+ return 0;
+
+ /* Decode ChannelData packet */
+ pj_memcpy(&cd, buf, sizeof(pj_turn_channel_data));
+ cd.length = pj_ntohs(cd.length);
+
+ if (bufsize >= cd.length+sizeof(cd))
+ return (cd.length+sizeof(cd)+3) & (~3);
+ else
+ return 0;
+ }
+}
+
+/*
+ * Notification from ioqueue when incoming UDP packet is received.
+ */
+static pj_bool_t on_data_read(pj_activesock_t *asock,
+ void *data,
+ pj_size_t size,
+ pj_status_t status,
+ pj_size_t *remainder)
+{
+ pj_turn_sock *turn_sock;
+ pj_bool_t ret = PJ_TRUE;
+
+ turn_sock = (pj_turn_sock*) pj_activesock_get_user_data(asock);
+ pj_lock_acquire(turn_sock->lock);
+
+ if (status == PJ_SUCCESS && turn_sock->sess) {
+ /* Report incoming packet to TURN session, repeat while we have
+ * "packet" in the buffer (required for stream-oriented transports)
+ */
+ unsigned pkt_len;
+
+ //PJ_LOG(5,(turn_sock->pool->obj_name,
+ // "Incoming data, %lu bytes total buffer", size));
+
+ while ((pkt_len=has_packet(turn_sock, data, size)) != 0) {
+ pj_size_t parsed_len;
+ //const pj_uint8_t *pkt = (const pj_uint8_t*)data;
+
+ //PJ_LOG(5,(turn_sock->pool->obj_name,
+ // "Packet start: %02X %02X %02X %02X",
+ // pkt[0], pkt[1], pkt[2], pkt[3]));
+
+ //PJ_LOG(5,(turn_sock->pool->obj_name,
+ // "Processing %lu bytes packet of %lu bytes total buffer",
+ // pkt_len, size));
+
+ parsed_len = (unsigned)size;
+ pj_turn_session_on_rx_pkt(turn_sock->sess, data, size, &parsed_len);
+
+ /* parsed_len may be zero if we have parsing error, so use our
+ * previous calculation to exhaust the bad packet.
+ */
+ if (parsed_len == 0)
+ parsed_len = pkt_len;
+
+ if (parsed_len < (unsigned)size) {
+ *remainder = size - parsed_len;
+ pj_memmove(data, ((char*)data)+parsed_len, *remainder);
+ } else {
+ *remainder = 0;
+ }
+ size = *remainder;
+
+ //PJ_LOG(5,(turn_sock->pool->obj_name,
+ // "Buffer size now %lu bytes", size));
+ }
+ } else if (status != PJ_SUCCESS &&
+ turn_sock->conn_type != PJ_TURN_TP_UDP)
+ {
+ sess_fail(turn_sock, "TCP connection closed", status);
+ ret = PJ_FALSE;
+ goto on_return;
+ }
+
+on_return:
+ pj_lock_release(turn_sock->lock);
+
+ return ret;
+}
+
+
+/*
+ * Callback from TURN session to send outgoing packet.
+ */
+static pj_status_t turn_on_send_pkt(pj_turn_session *sess,
+ const pj_uint8_t *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *dst_addr,
+ unsigned dst_addr_len)
+{
+ pj_turn_sock *turn_sock = (pj_turn_sock*)
+ pj_turn_session_get_user_data(sess);
+ pj_ssize_t len = pkt_len;
+ pj_status_t status;
+
+ if (turn_sock == NULL) {
+ /* We've been destroyed */
+ // https://trac.pjsip.org/repos/ticket/1316
+ //pj_assert(!"We should shutdown gracefully");
+ return PJ_EINVALIDOP;
+ }
+
+ PJ_UNUSED_ARG(dst_addr);
+ PJ_UNUSED_ARG(dst_addr_len);
+
+ status = pj_activesock_send(turn_sock->active_sock, &turn_sock->send_key,
+ pkt, &len, 0);
+ if (status != PJ_SUCCESS && status != PJ_EPENDING) {
+ show_err(turn_sock, "socket send()", status);
+ }
+
+ return status;
+}
+
+
+/*
+ * Callback from TURN session when a channel is successfully bound.
+ */
+static void turn_on_channel_bound(pj_turn_session *sess,
+ const pj_sockaddr_t *peer_addr,
+ unsigned addr_len,
+ unsigned ch_num)
+{
+ PJ_UNUSED_ARG(sess);
+ PJ_UNUSED_ARG(peer_addr);
+ PJ_UNUSED_ARG(addr_len);
+ PJ_UNUSED_ARG(ch_num);
+}
+
+
+/*
+ * Callback from TURN session upon incoming data.
+ */
+static void turn_on_rx_data(pj_turn_session *sess,
+ void *pkt,
+ unsigned pkt_len,
+ const pj_sockaddr_t *peer_addr,
+ unsigned addr_len)
+{
+ pj_turn_sock *turn_sock = (pj_turn_sock*)
+ pj_turn_session_get_user_data(sess);
+ if (turn_sock == NULL) {
+ /* We've been destroyed */
+ return;
+ }
+
+ if (turn_sock->cb.on_rx_data) {
+ (*turn_sock->cb.on_rx_data)(turn_sock, pkt, pkt_len,
+ peer_addr, addr_len);
+ }
+}
+
+
+/*
+ * Callback from TURN session when state has changed
+ */
+static void turn_on_state(pj_turn_session *sess,
+ pj_turn_state_t old_state,
+ pj_turn_state_t new_state)
+{
+ pj_turn_sock *turn_sock = (pj_turn_sock*)
+ pj_turn_session_get_user_data(sess);
+ pj_status_t status;
+
+ if (turn_sock == NULL) {
+ /* We've been destroyed */
+ return;
+ }
+
+ /* Notify app first */
+ if (turn_sock->cb.on_state) {
+ (*turn_sock->cb.on_state)(turn_sock, old_state, new_state);
+ }
+
+ /* Make sure user hasn't destroyed us in the callback */
+ if (turn_sock->sess && new_state == PJ_TURN_STATE_RESOLVED) {
+ pj_turn_session_info info;
+ pj_turn_session_get_info(turn_sock->sess, &info);
+ new_state = info.state;
+ }
+
+ if (turn_sock->sess && new_state == PJ_TURN_STATE_RESOLVED) {
+ /*
+ * Once server has been resolved, initiate outgoing TCP
+ * connection to the server.
+ */
+ pj_turn_session_info info;
+ char addrtxt[PJ_INET6_ADDRSTRLEN+8];
+ int sock_type;
+ pj_sock_t sock;
+ pj_activesock_cb asock_cb;
+
+ /* Close existing connection, if any. This happens when
+ * we're switching to alternate TURN server when either TCP
+ * connection or ALLOCATE request failed.
+ */
+ if (turn_sock->active_sock) {
+ pj_activesock_close(turn_sock->active_sock);
+ turn_sock->active_sock = NULL;
+ }
+
+ /* Get server address from session info */
+ pj_turn_session_get_info(sess, &info);
+
+ if (turn_sock->conn_type == PJ_TURN_TP_UDP)
+ sock_type = pj_SOCK_DGRAM();
+ else
+ sock_type = pj_SOCK_STREAM();
+
+ /* Init socket */
+ status = pj_sock_socket(turn_sock->af, sock_type, 0, &sock);
+ if (status != PJ_SUCCESS) {
+ pj_turn_sock_destroy(turn_sock);
+ return;
+ }
+
+ /* Apply QoS, if specified */
+ status = pj_sock_apply_qos2(sock, turn_sock->setting.qos_type,
+ &turn_sock->setting.qos_params,
+ (turn_sock->setting.qos_ignore_error?2:1),
+ turn_sock->pool->obj_name, NULL);
+ if (status != PJ_SUCCESS && !turn_sock->setting.qos_ignore_error) {
+ pj_turn_sock_destroy(turn_sock);
+ return;
+ }
+
+ /* Create active socket */
+ pj_bzero(&asock_cb, sizeof(asock_cb));
+ asock_cb.on_data_read = &on_data_read;
+ asock_cb.on_connect_complete = &on_connect_complete;
+ status = pj_activesock_create(turn_sock->pool, sock,
+ sock_type, NULL,
+ turn_sock->cfg.ioqueue, &asock_cb,
+ turn_sock,
+ &turn_sock->active_sock);
+ if (status != PJ_SUCCESS) {
+ pj_turn_sock_destroy(turn_sock);
+ return;
+ }
+
+ PJ_LOG(5,(turn_sock->pool->obj_name,
+ "Connecting to %s",
+ pj_sockaddr_print(&info.server, addrtxt,
+ sizeof(addrtxt), 3)));
+
+ /* Initiate non-blocking connect */
+#if PJ_HAS_TCP
+ status=pj_activesock_start_connect(turn_sock->active_sock,
+ turn_sock->pool,
+ &info.server,
+ pj_sockaddr_get_len(&info.server));
+ if (status == PJ_SUCCESS) {
+ on_connect_complete(turn_sock->active_sock, PJ_SUCCESS);
+ } else if (status != PJ_EPENDING) {
+ pj_turn_sock_destroy(turn_sock);
+ return;
+ }
+#else
+ on_connect_complete(turn_sock->active_sock, PJ_SUCCESS);
+#endif
+
+ /* Done for now. Subsequent work will be done in
+ * on_connect_complete() callback.
+ */
+ }
+
+ if (new_state >= PJ_TURN_STATE_DESTROYING && turn_sock->sess) {
+ pj_time_val delay = {0, 0};
+
+ turn_sock->sess = NULL;
+ pj_turn_session_set_user_data(sess, NULL);
+
+ if (turn_sock->timer.id) {
+ pj_timer_heap_cancel(turn_sock->cfg.timer_heap, &turn_sock->timer);
+ turn_sock->timer.id = 0;
+ }
+
+ turn_sock->timer.id = TIMER_DESTROY;
+ pj_timer_heap_schedule(turn_sock->cfg.timer_heap, &turn_sock->timer,
+ &delay);
+ }
+}
+
+