summaryrefslogtreecommitdiff
path: root/pjlib
diff options
context:
space:
mode:
authorJason Parker <jparker@digium.com>2013-03-11 15:09:56 -0500
committerJason Parker <jparker@digium.com>2013-03-11 15:09:56 -0500
commit483805f79570115ab95c69698792d238c1719b1b (patch)
tree6b53ab2fd2b2478f864ccc8bd1b0bfaedc4d2050 /pjlib
parentf3ab456a17af1c89a6e3be4d20c5944853df1cb0 (diff)
Import pjproject-2.1
Diffstat (limited to 'pjlib')
-rw-r--r--pjlib/build/Makefile2
-rw-r--r--pjlib/include/pj/activesock.h9
-rw-r--r--pjlib/include/pj/addr_resolv.h27
-rw-r--r--pjlib/include/pj/config.h22
-rw-r--r--pjlib/include/pj/config_site_sample.h6
-rw-r--r--pjlib/include/pj/errno.h7
-rw-r--r--pjlib/include/pj/guid.h21
-rw-r--r--pjlib/include/pj/hash.h40
-rw-r--r--pjlib/include/pj/ioqueue.h15
-rw-r--r--pjlib/include/pj/lock.h254
-rw-r--r--pjlib/include/pj/pool.h4
-rw-r--r--pjlib/include/pj/pool_i.h4
-rw-r--r--pjlib/include/pj/sock.h20
-rw-r--r--pjlib/include/pj/timer.h68
-rw-r--r--pjlib/include/pj/types.h5
-rw-r--r--pjlib/src/pj/activesock.c58
-rw-r--r--pjlib/src/pj/errno.c5
-rw-r--r--pjlib/src/pj/guid.c23
-rw-r--r--pjlib/src/pj/hash.c95
-rw-r--r--pjlib/src/pj/ioqueue_common_abs.c116
-rw-r--r--pjlib/src/pj/ioqueue_common_abs.h3
-rw-r--r--pjlib/src/pj/ioqueue_epoll.c20
-rw-r--r--pjlib/src/pj/ioqueue_select.c83
-rw-r--r--pjlib/src/pj/ioqueue_symbian.cpp15
-rw-r--r--pjlib/src/pj/ip_helper_generic.c20
-rw-r--r--pjlib/src/pj/lock.c519
-rw-r--r--pjlib/src/pj/os_core_unix.c123
-rw-r--r--pjlib/src/pj/os_info.c5
-rw-r--r--pjlib/src/pj/pool.c6
-rw-r--r--pjlib/src/pj/pool_caching.c10
-rw-r--r--pjlib/src/pj/sock_bsd.c7
-rw-r--r--pjlib/src/pj/sock_common.c111
-rw-r--r--pjlib/src/pj/ssl_sock_ossl.c436
-rw-r--r--pjlib/src/pj/timer.c119
-rw-r--r--pjlib/src/pj/timer_symbian.cpp31
-rw-r--r--pjlib/src/pjlib-test/activesock.c3
-rw-r--r--pjlib/src/pjlib-test/ioq_tcp.c14
-rw-r--r--pjlib/src/pjlib-test/ioq_udp.c7
-rw-r--r--pjlib/src/pjlib-test/ssl_sock.c25
39 files changed, 1996 insertions, 362 deletions
diff --git a/pjlib/build/Makefile b/pjlib/build/Makefile
index 9183644..a36f5f4 100644
--- a/pjlib/build/Makefile
+++ b/pjlib/build/Makefile
@@ -87,7 +87,7 @@ pjlib: ../include/pj/config_site.h
../include/pj/config_site.h:
touch ../include/pj/config_site.h
-pjlib-test:
+pjlib-test: pjlib
$(MAKE) -f $(RULES_MAK) APP=TEST app=pjlib-test $(TEST_EXE)
.PHONY: ../lib/pjlib.ko
diff --git a/pjlib/include/pj/activesock.h b/pjlib/include/pj/activesock.h
index 0c30c01..6d3b0d0 100644
--- a/pjlib/include/pj/activesock.h
+++ b/pjlib/include/pj/activesock.h
@@ -1,4 +1,4 @@
-/* $Id: activesock.h 3553 2011-05-05 06:14:19Z nanang $ */
+/* $Id: activesock.h 4359 2013-02-21 11:18:36Z bennylp $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -174,6 +174,11 @@ typedef struct pj_activesock_cb
typedef struct pj_activesock_cfg
{
/**
+ * Optional group lock to be assigned to the ioqueue key.
+ */
+ pj_grp_lock_t *grp_lock;
+
+ /**
* Number of concurrent asynchronous operations that is to be supported
* by the active socket. This value only affects socket receive and
* accept operations -- the active socket will issue one or more
@@ -290,7 +295,6 @@ PJ_DECL(pj_status_t) pj_activesock_create_udp(pj_pool_t *pool,
pj_activesock_t **p_asock,
pj_sockaddr *bound_addr);
-
/**
* Close the active socket. This will unregister the socket from the
* ioqueue and ultimately close the socket.
@@ -548,6 +552,7 @@ PJ_DECL(pj_status_t) pj_activesock_start_connect(pj_activesock_t *asock,
const pj_sockaddr_t *remaddr,
int addr_len);
+
#endif /* PJ_HAS_TCP */
/**
diff --git a/pjlib/include/pj/addr_resolv.h b/pjlib/include/pj/addr_resolv.h
index ae10337..41eacc8 100644
--- a/pjlib/include/pj/addr_resolv.h
+++ b/pjlib/include/pj/addr_resolv.h
@@ -1,4 +1,4 @@
-/* $Id: addr_resolv.h 3553 2011-05-05 06:14:19Z nanang $ */
+/* $Id: addr_resolv.h 4218 2012-08-07 02:18:15Z bennylp $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -117,6 +117,31 @@ PJ_DECL(pj_status_t) pj_gethostip(int af, pj_sockaddr *addr);
/**
+ * Get the interface IP address to send data to the specified destination.
+ *
+ * @param af The desired address family to query. Valid values
+ * are pj_AF_INET() or pj_AF_INET6().
+ * @param dst The destination host.
+ * @param itf_addr On successful resolution, the address family and address
+ * part of this socket address will be filled up with the host
+ * IP address, in network byte order. Other parts of the socket
+ * address should be ignored.
+ * @param allow_resolve If \a dst may contain hostname (instead of IP
+ * address), specify whether hostname resolution should
+ * be performed. If not, default interface address will
+ * be returned.
+ * @param p_dst_addr If not NULL, it will be filled with the IP address of
+ * the destination host.
+ *
+ * @return PJ_SUCCESS on success, or the appropriate error code.
+ */
+PJ_DECL(pj_status_t) pj_getipinterface(int af,
+ const pj_str_t *dst,
+ pj_sockaddr *itf_addr,
+ pj_bool_t allow_resolve,
+ pj_sockaddr *p_dst_addr);
+
+/**
* Get the IP address of the default interface. Default interface is the
* interface of the default route.
*
diff --git a/pjlib/include/pj/config.h b/pjlib/include/pj/config.h
index 629fa44..120e6ce 100644
--- a/pjlib/include/pj/config.h
+++ b/pjlib/include/pj/config.h
@@ -1,4 +1,4 @@
-/* $Id: config.h 4189 2012-07-03 03:11:24Z ming $ */
+/* $Id: config.h 4415 2013-03-05 08:34:45Z ming $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -488,6 +488,14 @@
/**
+ * Set this to 1 to enable debugging on the group lock. Default: 0
+ */
+#ifndef PJ_GRP_LOCK_DEBUG
+# define PJ_GRP_LOCK_DEBUG 0
+#endif
+
+
+/**
* Specify this as \a stack_size argument in #pj_thread_create() to specify
* that thread should use default stack size for the current platform.
*
@@ -1122,6 +1130,14 @@
#endif
/**
+ * Simulate race condition by sleeping the thread in strategic locations.
+ * Default: no!
+ */
+#ifndef PJ_RACE_ME
+# define PJ_RACE_ME(x)
+#endif
+
+/**
* Function attributes to inform that the function may throw exception.
*
* @param x The exception list, enclosed in parenthesis.
@@ -1167,10 +1183,10 @@ PJ_BEGIN_DECL
#define PJ_VERSION_NUM_MAJOR 2
/** PJLIB version minor number. */
-#define PJ_VERSION_NUM_MINOR 0
+#define PJ_VERSION_NUM_MINOR 1
/** PJLIB version revision number. */
-#define PJ_VERSION_NUM_REV 1
+#define PJ_VERSION_NUM_REV 0
/**
* Extra suffix for the version (e.g. "-trunk"), or empty for
diff --git a/pjlib/include/pj/config_site_sample.h b/pjlib/include/pj/config_site_sample.h
index 0c7832f..845d1bb 100644
--- a/pjlib/include/pj/config_site_sample.h
+++ b/pjlib/include/pj/config_site_sample.h
@@ -302,8 +302,10 @@
* PJLIB settings.
*/
- /* Disable floating point support */
- #define PJ_HAS_FLOATING_POINT 0
+ /* Both armv6 and armv7 has FP hardware support.
+ * See https://trac.pjsip.org/repos/ticket/1589 for more info
+ */
+ #define PJ_HAS_FLOATING_POINT 1
/*
* PJMEDIA settings
diff --git a/pjlib/include/pj/errno.h b/pjlib/include/pj/errno.h
index 98a735a..75aadbe 100644
--- a/pjlib/include/pj/errno.h
+++ b/pjlib/include/pj/errno.h
@@ -1,4 +1,4 @@
-/* $Id: errno.h 3664 2011-07-19 03:42:28Z nanang $ */
+/* $Id: errno.h 4359 2013-02-21 11:18:36Z bennylp $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -422,6 +422,11 @@ PJ_DECL(pj_status_t) pj_register_strerror(pj_status_t start_code,
* Unsupported address family
*/
#define PJ_EAFNOTSUP (PJ_ERRNO_START_STATUS + 22)/* 70022 */
+/**
+ * @hideinitializer
+ * Object no longer exists
+ */
+#define PJ_EGONE (PJ_ERRNO_START_STATUS + 23)/* 70023 */
/** @} */ /* pj_errnum */
diff --git a/pjlib/include/pj/guid.h b/pjlib/include/pj/guid.h
index 991ce53..2a76af6 100644
--- a/pjlib/include/pj/guid.h
+++ b/pjlib/include/pj/guid.h
@@ -1,4 +1,4 @@
-/* $Id: guid.h 3553 2011-05-05 06:14:19Z nanang $ */
+/* $Id: guid.h 4208 2012-07-18 07:52:33Z ming $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -83,6 +83,17 @@ PJ_DECL(unsigned) pj_GUID_STRING_LENGTH(void);
PJ_DECL(pj_str_t*) pj_generate_unique_string(pj_str_t *str);
/**
+ * Create a globally unique string in lowercase, which length is
+ * PJ_GUID_STRING_LENGTH characters. Caller is responsible for preallocating
+ * the storage used in the string.
+ *
+ * @param str The string to store the result.
+ *
+ * @return The string.
+ */
+PJ_DECL(pj_str_t*) pj_generate_unique_string_lower(pj_str_t *str);
+
+/**
* Generate a unique string.
*
* @param pool Pool to allocate memory from.
@@ -90,6 +101,14 @@ PJ_DECL(pj_str_t*) pj_generate_unique_string(pj_str_t *str);
*/
PJ_DECL(void) pj_create_unique_string(pj_pool_t *pool, pj_str_t *str);
+/**
+ * Generate a unique string in lowercase.
+ *
+ * @param pool Pool to allocate memory from.
+ * @param str The string.
+ */
+PJ_DECL(void) pj_create_unique_string_lower(pj_pool_t *pool, pj_str_t *str);
+
/**
* @}
diff --git a/pjlib/include/pj/hash.h b/pjlib/include/pj/hash.h
index 75b46c1..5d9a2d9 100644
--- a/pjlib/include/pj/hash.h
+++ b/pjlib/include/pj/hash.h
@@ -1,4 +1,4 @@
-/* $Id: hash.h 3841 2011-10-24 09:28:13Z ming $ */
+/* $Id: hash.h 4208 2012-07-18 07:52:33Z ming $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -75,8 +75,8 @@ PJ_DECL(pj_uint32_t) pj_hash_calc(pj_uint32_t hval,
* string is stored in \c result.
*
* @param hval The initial hash value, normally zero.
- * @param result Buffer to store the result, which must be enough to hold
- * the string.
+ * @param result Optional. Buffer to store the result, which must be enough
+ * to hold the string.
* @param key The input key to be converted and calculated.
*
* @return The hash value.
@@ -116,6 +116,17 @@ PJ_DECL(void *) pj_hash_get( pj_hash_table_t *ht,
/**
+ * Variant of #pj_hash_get() with the key being converted to lowercase when
+ * calculating the hash value.
+ *
+ * @see pj_hash_get()
+ */
+PJ_DECL(void *) pj_hash_get_lower( pj_hash_table_t *ht,
+ const void *key, unsigned keylen,
+ pj_uint32_t *hval );
+
+
+/**
* Associate/disassociate a value with the specified key. If value is not
* NULL and entry already exists, the entry's value will be overwritten.
* If value is not NULL and entry does not exist, a new one will be created
@@ -142,6 +153,17 @@ PJ_DECL(void) pj_hash_set( pj_pool_t *pool, pj_hash_table_t *ht,
/**
+ * Variant of #pj_hash_set() with the key being converted to lowercase when
+ * calculating the hash value.
+ *
+ * @see pj_hash_set()
+ */
+PJ_DECL(void) pj_hash_set_lower( pj_pool_t *pool, pj_hash_table_t *ht,
+ const void *key, unsigned keylen,
+ pj_uint32_t hval, void *value );
+
+
+/**
* Associate/disassociate a value with the specified key. This function works
* like #pj_hash_set(), except that it doesn't use pool (hence the np -- no
* pool suffix). If new entry needs to be allocated, it will use the entry_buf.
@@ -165,6 +187,18 @@ PJ_DECL(void) pj_hash_set_np(pj_hash_table_t *ht,
void *value);
/**
+ * Variant of #pj_hash_set_np() with the key being converted to lowercase
+ * when calculating the hash value.
+ *
+ * @see pj_hash_set_np()
+ */
+PJ_DECL(void) pj_hash_set_np_lower(pj_hash_table_t *ht,
+ const void *key, unsigned keylen,
+ pj_uint32_t hval,
+ pj_hash_entry_buf entry_buf,
+ void *value);
+
+/**
* Get the total number of entries in the hash table.
*
* @param ht the hash table.
diff --git a/pjlib/include/pj/ioqueue.h b/pjlib/include/pj/ioqueue.h
index 068e7ba..853bf48 100644
--- a/pjlib/include/pj/ioqueue.h
+++ b/pjlib/include/pj/ioqueue.h
@@ -1,4 +1,4 @@
-/* $Id: ioqueue.h 3553 2011-05-05 06:14:19Z nanang $
+/* $Id: ioqueue.h 4359 2013-02-21 11:18:36Z bennylp $
*/
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
@@ -405,6 +405,19 @@ PJ_DECL(pj_status_t) pj_ioqueue_register_sock( pj_pool_t *pool,
pj_ioqueue_key_t **key );
/**
+ * Variant of pj_ioqueue_register_sock() with additional group lock parameter.
+ * If group lock is set for the key, the key will add the reference counter
+ * when the socket is registered and decrease it when it is destroyed.
+ */
+PJ_DECL(pj_status_t) pj_ioqueue_register_sock2(pj_pool_t *pool,
+ pj_ioqueue_t *ioque,
+ pj_sock_t sock,
+ pj_grp_lock_t *grp_lock,
+ void *user_data,
+ const pj_ioqueue_callback *cb,
+ pj_ioqueue_key_t **key );
+
+/**
* Unregister from the I/O Queue framework. Caller must make sure that
* the key doesn't have any pending operations before calling this function,
* by calling #pj_ioqueue_is_pending() for all previously submitted
diff --git a/pjlib/include/pj/lock.h b/pjlib/include/pj/lock.h
index f0d3bc5..e9b26c7 100644
--- a/pjlib/include/pj/lock.h
+++ b/pjlib/include/pj/lock.h
@@ -1,4 +1,4 @@
-/* $Id: lock.h 3553 2011-05-05 06:14:19Z nanang $ */
+/* $Id: lock.h 4359 2013-02-21 11:18:36Z bennylp $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -147,6 +147,258 @@ PJ_DECL(pj_status_t) pj_lock_destroy( pj_lock_t *lock );
/** @} */
+
+/**
+ * @defgroup PJ_GRP_LOCK Group Lock
+ * @ingroup PJ_LOCK
+ * @{
+ *
+ * Group lock is a synchronization object to manage concurrency among members
+ * within the same logical group. Example of such groups are:
+ *
+ * - dialog, which has members such as the dialog itself, an invite session,
+ * and several transactions
+ * - ICE, which has members such as ICE stream transport, ICE session, STUN
+ * socket, TURN socket, and down to ioqueue key
+ *
+ * Group lock has three functions:
+ *
+ * - mutual exclusion: to protect resources from being accessed by more than
+ * one threads at the same time
+ * - session management: to make sure that the resource is not destroyed
+ * while others are still using or about to use it.
+ * - lock coordinator: to provide uniform lock ordering among more than one
+ * lock objects, which is necessary to avoid deadlock.
+ *
+ * The requirements of the group lock are:
+ *
+ * - must satisfy all the functions above
+ * - must allow members to join or leave the group (for example,
+ * transaction may be added or removed from a dialog)
+ * - must be able to synchronize with external lock (for example, a dialog
+ * lock must be able to sync itself with PJSUA lock)
+ *
+ * Please see https://trac.pjsip.org/repos/wiki/Group_Lock for more info.
+ */
+
+/**
+ * Settings for creating the group lock.
+ */
+typedef struct pj_grp_lock_config
+{
+ /**
+ * Creation flags, currently must be zero.
+ */
+ unsigned flags;
+
+} pj_grp_lock_config;
+
+
+/**
+ * Initialize the config with the default values.
+ *
+ * @param cfg The config to be initialized.
+ */
+PJ_DECL(void) pj_grp_lock_config_default(pj_grp_lock_config *cfg);
+
+/**
+ * Create a group lock object. Initially the group lock will have reference
+ * counter of one.
+ *
+ * @param pool The group lock only uses the pool parameter to get
+ * the pool factory, from which it will create its own
+ * pool.
+ * @param cfg Optional configuration.
+ * @param p_grp_lock Pointer to receive the newly created group lock.
+ *
+ * @return PJ_SUCCESS or the appropriate error code.
+ */
+PJ_DECL(pj_status_t) pj_grp_lock_create(pj_pool_t *pool,
+ const pj_grp_lock_config *cfg,
+ pj_grp_lock_t **p_grp_lock);
+
+/**
+ * Forcibly destroy the group lock, ignoring the reference counter value.
+ *
+ * @param grp_lock The group lock.
+ *
+ * @return PJ_SUCCESS or the appropriate error code.
+ */
+PJ_DECL(pj_status_t) pj_grp_lock_destroy( pj_grp_lock_t *grp_lock);
+
+/**
+ * Move the contents of the old lock to the new lock and destroy the
+ * old lock.
+ *
+ * @param old_lock The old group lock to be destroyed.
+ * @param new_lock The new group lock.
+ *
+ * @return PJ_SUCCESS or the appropriate error code.
+ */
+PJ_DECL(pj_status_t) pj_grp_lock_replace(pj_grp_lock_t *old_lock,
+ pj_grp_lock_t *new_lock);
+
+/**
+ * Acquire lock on the specified group lock.
+ *
+ * @param grp_lock The group lock.
+ *
+ * @return PJ_SUCCESS or the appropriate error code.
+ */
+PJ_DECL(pj_status_t) pj_grp_lock_acquire( pj_grp_lock_t *grp_lock);
+
+/**
+ * Acquire lock on the specified group lock if it is available, otherwise
+ * return immediately wihout waiting.
+ *
+ * @param grp_lock The group lock.
+ *
+ * @return PJ_SUCCESS or the appropriate error code.
+ */
+PJ_DECL(pj_status_t) pj_grp_lock_tryacquire( pj_grp_lock_t *grp_lock);
+
+/**
+ * Release the previously held lock. This may cause the group lock
+ * to be destroyed if it is the last one to hold the reference counter.
+ * In that case, the function will return PJ_EGONE.
+ *
+ * @param grp_lock The group lock.
+ *
+ * @return PJ_SUCCESS or the appropriate error code.
+ */
+PJ_DECL(pj_status_t) pj_grp_lock_release( pj_grp_lock_t *grp_lock);
+
+/**
+ * Add a destructor handler, to be called by the group lock when it is
+ * about to be destroyed.
+ *
+ * @param grp_lock The group lock.
+ * @param pool Pool to allocate memory for the handler.
+ * @param member A pointer to be passed to the handler.
+ * @param handler The destroy handler.
+ *
+ * @return PJ_SUCCESS or the appropriate error code.
+ */
+PJ_DECL(pj_status_t) pj_grp_lock_add_handler(pj_grp_lock_t *grp_lock,
+ pj_pool_t *pool,
+ void *member,
+ void (*handler)(void *member));
+
+/**
+ * Remove previously registered handler. All parameters must be the same
+ * as when the handler was added.
+ *
+ * @param grp_lock The group lock.
+ * @param member A pointer to be passed to the handler.
+ * @param handler The destroy handler.
+ *
+ * @return PJ_SUCCESS or the appropriate error code.
+ */
+PJ_DECL(pj_status_t) pj_grp_lock_del_handler(pj_grp_lock_t *grp_lock,
+ void *member,
+ void (*handler)(void *member));
+
+/**
+ * Increment reference counter to prevent the group lock grom being destroyed.
+ *
+ * @param grp_lock The group lock.
+ *
+ * @return PJ_SUCCESS or the appropriate error code.
+ */
+#if !PJ_GRP_LOCK_DEBUG
+PJ_DECL(pj_status_t) pj_grp_lock_add_ref(pj_grp_lock_t *grp_lock);
+
+#define pj_grp_lock_add_ref_dbg(grp_lock, x, y) pj_grp_lock_add_ref(grp_lock)
+
+#else
+
+#define pj_grp_lock_add_ref(g) pj_grp_lock_add_ref_dbg(g, __FILE__, __LINE__)
+
+PJ_DECL(pj_status_t) pj_grp_lock_add_ref_dbg(pj_grp_lock_t *grp_lock,
+ const char *file,
+ int line);
+#endif
+
+/**
+ * Decrement the reference counter. When the counter value reaches zero, the
+ * group lock will be destroyed and all destructor handlers will be called.
+ *
+ * @param grp_lock The group lock.
+ *
+ * @return PJ_SUCCESS or the appropriate error code.
+ */
+#if !PJ_GRP_LOCK_DEBUG
+PJ_DECL(pj_status_t) pj_grp_lock_dec_ref(pj_grp_lock_t *grp_lock);
+
+#define pj_grp_lock_dec_ref_dbg(grp_lock, x, y) pj_grp_lock_dec_ref(grp_lock)
+#else
+
+#define pj_grp_lock_dec_ref(g) pj_grp_lock_dec_ref_dbg(g, __FILE__, __LINE__)
+
+PJ_DECL(pj_status_t) pj_grp_lock_dec_ref_dbg(pj_grp_lock_t *grp_lock,
+ const char *file,
+ int line);
+
+#endif
+
+/**
+ * Get current reference count value. This normally is only used for
+ * debugging purpose.
+ *
+ * @param grp_lock The group lock.
+ *
+ * @return The reference count value.
+ */
+PJ_DECL(int) pj_grp_lock_get_ref(pj_grp_lock_t *grp_lock);
+
+
+/**
+ * Dump group lock info for debugging purpose. If group lock debugging is
+ * enabled (via PJ_GRP_LOCK_DEBUG) macro, this will print the group lock
+ * reference counter value along with the source file and line. If
+ * debugging is disabled, this will only print the reference counter.
+ *
+ * @param grp_lock The group lock.
+ */
+PJ_DECL(void) pj_grp_lock_dump(pj_grp_lock_t *grp_lock);
+
+
+/**
+ * Synchronize an external lock with the group lock, by adding it to the
+ * list of locks to be acquired by the group lock when the group lock is
+ * acquired.
+ *
+ * The ''pos'' argument specifies the lock order and also the relative
+ * position with regard to lock ordering against the group lock. Locks with
+ * lower ''pos'' value will be locked first, and those with negative value
+ * will be locked before the group lock (the group lock's ''pos'' value is
+ * zero).
+ *
+ * @param grp_lock The group lock.
+ * @param ext_lock The external lock
+ * @param pos The position.
+ *
+ * @return PJ_SUCCESS or the appropriate error code.
+ */
+PJ_DECL(pj_status_t) pj_grp_lock_chain_lock(pj_grp_lock_t *grp_lock,
+ pj_lock_t *ext_lock,
+ int pos);
+
+/**
+ * Remove an external lock from group lock's list of synchronized locks.
+ *
+ * @param grp_lock The group lock.
+ * @param ext_lock The external lock
+ *
+ * @return PJ_SUCCESS or the appropriate error code.
+ */
+PJ_DECL(pj_status_t) pj_grp_lock_unchain_lock(pj_grp_lock_t *grp_lock,
+ pj_lock_t *ext_lock);
+
+
+/** @} */
+
+
PJ_END_DECL
diff --git a/pjlib/include/pj/pool.h b/pjlib/include/pj/pool.h
index 5738d4f..73c8473 100644
--- a/pjlib/include/pj/pool.h
+++ b/pjlib/include/pj/pool.h
@@ -1,4 +1,4 @@
-/* $Id: pool.h 3553 2011-05-05 06:14:19Z nanang $ */
+/* $Id: pool.h 4298 2012-11-22 05:00:01Z nanang $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -509,7 +509,7 @@ PJ_INLINE(void*) pj_pool_zalloc(pj_pool_t *pool, pj_size_t size)
* Internal functions
*/
PJ_IDECL(void*) pj_pool_alloc_from_block(pj_pool_block *block, pj_size_t size);
-PJ_DECL(void*) pj_pool_allocate_find(pj_pool_t *pool, unsigned size);
+PJ_DECL(void*) pj_pool_allocate_find(pj_pool_t *pool, pj_size_t size);
diff --git a/pjlib/include/pj/pool_i.h b/pjlib/include/pj/pool_i.h
index ea4fa2d..376fd6a 100644
--- a/pjlib/include/pj/pool_i.h
+++ b/pjlib/include/pj/pool_i.h
@@ -1,4 +1,4 @@
-/* $Id: pool_i.h 3553 2011-05-05 06:14:19Z nanang $ */
+/* $Id: pool_i.h 4298 2012-11-22 05:00:01Z nanang $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -47,7 +47,7 @@ PJ_IDEF(void*) pj_pool_alloc_from_block( pj_pool_block *block, pj_size_t size )
if (size & (PJ_POOL_ALIGNMENT-1)) {
size = (size + PJ_POOL_ALIGNMENT) & ~(PJ_POOL_ALIGNMENT-1);
}
- if ((unsigned)(block->end - block->cur) >= size) {
+ if ((pj_size_t)(block->end - block->cur) >= size) {
void *ptr = block->cur;
block->cur += size;
return ptr;
diff --git a/pjlib/include/pj/sock.h b/pjlib/include/pj/sock.h
index 2fcd896..5692d61 100644
--- a/pjlib/include/pj/sock.h
+++ b/pjlib/include/pj/sock.h
@@ -1,4 +1,4 @@
-/* $Id: sock.h 3841 2011-10-24 09:28:13Z ming $ */
+/* $Id: sock.h 4343 2013-02-07 09:35:34Z nanang $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -1166,6 +1166,24 @@ PJ_DECL(pj_status_t) pj_sock_bind_in( pj_sock_t sockfd,
pj_uint32_t addr,
pj_uint16_t port);
+/**
+ * Bind the IP socket sockfd to the given address and a random port in the
+ * specified range.
+ *
+ * @param sockfd The socket desriptor.
+ * @param addr The local address and port to bind the socket to.
+ * @param port_range The port range, relative the to start port number
+ * specified in port field in #addr. Note that if the
+ * port is zero, this param will be ignored.
+ * @param max_try Maximum retries.
+ *
+ * @return Zero on success.
+ */
+PJ_DECL(pj_status_t) pj_sock_bind_random( pj_sock_t sockfd,
+ const pj_sockaddr_t *addr,
+ pj_uint16_t port_range,
+ pj_uint16_t max_try);
+
#if PJ_HAS_TCP
/**
* Listen for incoming connection. This function only applies to connection
diff --git a/pjlib/include/pj/timer.h b/pjlib/include/pj/timer.h
index 1cc11cf..a89c30b 100644
--- a/pjlib/include/pj/timer.h
+++ b/pjlib/include/pj/timer.h
@@ -1,4 +1,4 @@
-/* $Id: timer.h 4154 2012-06-05 10:41:17Z bennylp $ */
+/* $Id: timer.h 4359 2013-02-21 11:18:36Z bennylp $ */
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,6 +24,7 @@
*/
#include <pj/types.h>
+#include <pj/lock.h>
PJ_BEGIN_DECL
@@ -118,6 +119,12 @@ typedef struct pj_timer_entry
*/
pj_time_val _timer_value;
+ /**
+ * Internal: the group lock used by this entry, set when
+ * pj_timer_heap_schedule_w_lock() is used.
+ */
+ pj_grp_lock_t *_grp_lock;
+
#if PJ_TIMER_DEBUG
const char *src_file;
int src_line;
@@ -229,7 +236,46 @@ PJ_DECL(pj_status_t) pj_timer_heap_schedule( pj_timer_heap_t *ht,
#endif /* PJ_TIMER_DEBUG */
/**
- * Cancel a previously registered timer.
+ * Schedule a timer entry which will expire AFTER the specified delay, and
+ * increment the reference counter of the group lock while the timer entry
+ * is active. The group lock reference counter will automatically be released
+ * after the timer callback is called or when the timer is cancelled.
+ *
+ * @param ht The timer heap.
+ * @param entry The entry to be registered.
+ * @param id_val The value to be set to the "id" field of the timer entry
+ * once the timer is scheduled.
+ * @param delay The interval to expire.
+ * @param grp_lock The group lock.
+ *
+ * @return PJ_SUCCESS, or the appropriate error code.
+ */
+#if PJ_TIMER_DEBUG
+# define pj_timer_heap_schedule_w_grp_lock(ht,e,d,id,g) \
+ pj_timer_heap_schedule_w_grp_lock_dbg(ht,e,d,id,g,__FILE__,__LINE__)
+
+ PJ_DECL(pj_status_t) pj_timer_heap_schedule_w_grp_lock_dbg(
+ pj_timer_heap_t *ht,
+ pj_timer_entry *entry,
+ const pj_time_val *delay,
+ int id_val,
+ pj_grp_lock_t *grp_lock,
+ const char *src_file,
+ int src_line);
+#else
+PJ_DECL(pj_status_t) pj_timer_heap_schedule_w_grp_lock(
+ pj_timer_heap_t *ht,
+ pj_timer_entry *entry,
+ const pj_time_val *delay,
+ int id_val,
+ pj_grp_lock_t *grp_lock);
+#endif /* PJ_TIMER_DEBUG */
+
+
+/**
+ * Cancel a previously registered timer. This will also decrement the
+ * reference counter of the group lock associated with the timer entry,
+ * if the entry was scheduled with one.
*
* @param ht The timer heap.
* @param entry The entry to be cancelled.
@@ -241,6 +287,24 @@ PJ_DECL(int) pj_timer_heap_cancel( pj_timer_heap_t *ht,
pj_timer_entry *entry);
/**
+ * Cancel only if the previously registered timer is active. This will
+ * also decrement the reference counter of the group lock associated
+ * with the timer entry, if the entry was scheduled with one. In any
+ * case, set the "id" to the specified value.
+ *
+ * @param ht The timer heap.
+ * @param entry The entry to be cancelled.
+ * @param id_val Value to be set to "id"
+ *
+ * @return The number of timer cancelled, which should be one if the
+ * entry has really been registered, or zero if no timer was
+ * cancelled.
+ */
+PJ_DECL(int) pj_timer_heap_cancel_if_active(pj_timer_heap_t *ht,
+ pj_timer_entry *entry,
+ int id_val);
+
+/**
* Get the number of timer entries.
*
* @param ht The timer heap.
diff --git a/pjlib/include/pj/types.h b/pjlib/include/pj/types.h
index 99c54db..a615585 100644
--- a/pjlib/include/pj/types.h
+++ b/pjlib/include/pj/types.h
@@ -1,4 +1,4 @@
-/* $Id: types.h 4154 2012-06-05 10:41:17Z bennylp $ */
+/* $Id: types.h 4359 2013-02-21 11:18:36Z bennylp $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -231,6 +231,9 @@ typedef struct pj_thread_t pj_thread_t;
/** Lock object. */
typedef struct pj_lock_t pj_lock_t;
+/** Group lock */
+typedef struct pj_grp_lock_t pj_grp_lock_t;
+
/** Mutex handle. */
typedef struct pj_mutex_t pj_mutex_t;
diff --git a/pjlib/src/pj/activesock.c b/pjlib/src/pj/activesock.c
index 5c91383..3eaf027 100644
--- a/pjlib/src/pj/activesock.c
+++ b/pjlib/src/pj/activesock.c
@@ -1,4 +1,4 @@
-/* $Id: activesock.c 3553 2011-05-05 06:14:19Z nanang $ */
+/* $Id: activesock.c 4359 2013-02-21 11:18:36Z bennylp $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -43,6 +43,13 @@ enum read_type
TYPE_RECV_FROM
};
+enum shutdown_dir
+{
+ SHUT_NONE = 0,
+ SHUT_RX = 1,
+ SHUT_TX = 2
+};
+
struct read_op
{
pj_ioqueue_op_key_t op_key;
@@ -77,6 +84,7 @@ struct pj_activesock_t
pj_ioqueue_t *ioqueue;
void *user_data;
unsigned async_count;
+ unsigned shutdown;
unsigned max_loop;
pj_activesock_cb cb;
#if defined(PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT) && \
@@ -209,8 +217,9 @@ PJ_DEF(pj_status_t) pj_activesock_create( pj_pool_t *pool,
ioq_cb.on_accept_complete = &ioqueue_on_accept_complete;
#endif
- status = pj_ioqueue_register_sock(pool, ioqueue, sock, asock,
- &ioq_cb, &asock->key);
+ status = pj_ioqueue_register_sock2(pool, ioqueue, sock,
+ (opt? opt->grp_lock : NULL),
+ asock, &ioq_cb, &asock->key);
if (status != PJ_SUCCESS) {
pj_activesock_close(asock);
return status;
@@ -283,10 +292,10 @@ PJ_DEF(pj_status_t) pj_activesock_create_udp( pj_pool_t *pool,
return PJ_SUCCESS;
}
-
PJ_DEF(pj_status_t) pj_activesock_close(pj_activesock_t *asock)
{
PJ_ASSERT_RETURN(asock, PJ_EINVAL);
+ asock->shutdown = SHUT_RX | SHUT_TX;
if (asock->key) {
#if defined(PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT) && \
PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT!=0
@@ -448,6 +457,10 @@ static void ioqueue_on_read_complete(pj_ioqueue_key_t *key,
asock = (pj_activesock_t*) pj_ioqueue_get_user_data(key);
+ /* Ignore if we've been shutdown */
+ if (asock->shutdown & SHUT_RX)
+ return;
+
do {
unsigned flags;
@@ -569,6 +582,10 @@ static void ioqueue_on_read_complete(pj_ioqueue_key_t *key,
if (!ret)
return;
+ /* Also stop further read if we've been shutdown */
+ if (asock->shutdown & SHUT_RX)
+ return;
+
/* Only stream oriented socket may leave data in the packet */
if (asock->stream_oriented) {
r->size = remainder;
@@ -648,6 +665,9 @@ PJ_DEF(pj_status_t) pj_activesock_send( pj_activesock_t *asock,
{
PJ_ASSERT_RETURN(asock && send_key && data && size, PJ_EINVAL);
+ if (asock->shutdown & SHUT_TX)
+ return PJ_EINVALIDOP;
+
send_key->activesock_data = NULL;
if (asock->whole_data) {
@@ -698,6 +718,9 @@ PJ_DEF(pj_status_t) pj_activesock_sendto( pj_activesock_t *asock,
PJ_ASSERT_RETURN(asock && send_key && data && size && addr && addr_len,
PJ_EINVAL);
+ if (asock->shutdown & SHUT_TX)
+ return PJ_EINVALIDOP;
+
return pj_ioqueue_sendto(asock->key, send_key, data, size, flags,
addr, addr_len);
}
@@ -711,6 +734,13 @@ static void ioqueue_on_write_complete(pj_ioqueue_key_t *key,
asock = (pj_activesock_t*) pj_ioqueue_get_user_data(key);
+ /* Ignore if we've been shutdown. This may cause data to be partially
+ * sent even when 'wholedata' was requested if the OS only sent partial
+ * buffer.
+ */
+ if (asock->shutdown & SHUT_TX)
+ return;
+
if (bytes_sent > 0 && op_key->activesock_data) {
/* whole_data is requested. Make sure we send all the data */
struct send_data *sd = (struct send_data*)op_key->activesock_data;
@@ -756,6 +786,10 @@ PJ_DEF(pj_status_t) pj_activesock_start_accept(pj_activesock_t *asock,
PJ_ASSERT_RETURN(asock, PJ_EINVAL);
PJ_ASSERT_RETURN(asock->accept_op==NULL, PJ_EINVALIDOP);
+ /* Ignore if we've been shutdown */
+ if (asock->shutdown)
+ return PJ_EINVALIDOP;
+
asock->accept_op = (struct accept_op*)
pj_pool_calloc(pool, asock->async_count,
sizeof(struct accept_op));
@@ -798,6 +832,10 @@ static void ioqueue_on_accept_complete(pj_ioqueue_key_t *key,
PJ_UNUSED_ARG(new_sock);
+ /* Ignore if we've been shutdown */
+ if (asock->shutdown)
+ return;
+
do {
if (status == asock->last_err && status != PJ_SUCCESS) {
asock->err_counter++;
@@ -835,6 +873,10 @@ static void ioqueue_on_accept_complete(pj_ioqueue_key_t *key,
pj_sock_close(accept_op->new_sock);
}
+ /* Don't start another accept() if we've been shutdown */
+ if (asock->shutdown)
+ return;
+
/* Prepare next accept() */
accept_op->new_sock = PJ_INVALID_SOCKET;
accept_op->rem_addr_len = sizeof(accept_op->rem_addr);
@@ -853,6 +895,10 @@ PJ_DEF(pj_status_t) pj_activesock_start_connect( pj_activesock_t *asock,
int addr_len)
{
PJ_UNUSED_ARG(pool);
+
+ if (asock->shutdown)
+ return PJ_EINVALIDOP;
+
return pj_ioqueue_connect(asock->key, remaddr, addr_len);
}
@@ -861,6 +907,10 @@ static void ioqueue_on_connect_complete(pj_ioqueue_key_t *key,
{
pj_activesock_t *asock = (pj_activesock_t*) pj_ioqueue_get_user_data(key);
+ /* Ignore if we've been shutdown */
+ if (asock->shutdown)
+ return;
+
if (asock->cb.on_connect_complete) {
pj_bool_t ret;
diff --git a/pjlib/src/pj/errno.c b/pjlib/src/pj/errno.c
index 1cb8e72..a0f7406 100644
--- a/pjlib/src/pj/errno.c
+++ b/pjlib/src/pj/errno.c
@@ -1,4 +1,4 @@
-/* $Id: errno.c 3664 2011-07-19 03:42:28Z nanang $ */
+/* $Id: errno.c 4359 2013-02-21 11:18:36Z bennylp $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -77,7 +77,8 @@ static const struct
PJ_BUILD_ERR(PJ_ETOOSMALL, "Size is too short"),
PJ_BUILD_ERR(PJ_EIGNORED, "Ignored"),
PJ_BUILD_ERR(PJ_EIPV6NOTSUP, "IPv6 is not supported"),
- PJ_BUILD_ERR(PJ_EAFNOTSUP, "Unsupported address family")
+ PJ_BUILD_ERR(PJ_EAFNOTSUP, "Unsupported address family"),
+ PJ_BUILD_ERR(PJ_EGONE, "Object no longer exists")
};
#endif /* PJ_HAS_ERROR_STRING */
diff --git a/pjlib/src/pj/guid.c b/pjlib/src/pj/guid.c
index 8ad5f78..88b6c37 100644
--- a/pjlib/src/pj/guid.c
+++ b/pjlib/src/pj/guid.c
@@ -1,4 +1,4 @@
-/* $Id: guid.c 3553 2011-05-05 06:14:19Z nanang $ */
+/* $Id: guid.c 4208 2012-07-18 07:52:33Z ming $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -17,11 +17,32 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <pj/ctype.h>
#include <pj/guid.h>
#include <pj/pool.h>
+PJ_DEF(pj_str_t*) pj_generate_unique_string_lower(pj_str_t *str)
+{
+ int i;
+
+ pj_generate_unique_string(str);
+ for (i = 0; i < str->slen; i++)
+ str->ptr[i] = (char)pj_tolower(str->ptr[i]);
+
+ return str;
+}
+
PJ_DEF(void) pj_create_unique_string(pj_pool_t *pool, pj_str_t *str)
{
str->ptr = (char*)pj_pool_alloc(pool, PJ_GUID_STRING_LENGTH);
pj_generate_unique_string(str);
}
+
+PJ_DEF(void) pj_create_unique_string_lower(pj_pool_t *pool, pj_str_t *str)
+{
+ int i;
+
+ pj_create_unique_string(pool, str);
+ for (i = 0; i < str->slen; i++)
+ str->ptr[i] = (char)pj_tolower(str->ptr[i]);
+}
diff --git a/pjlib/src/pj/hash.c b/pjlib/src/pj/hash.c
index 00f167f..b37e8ff 100644
--- a/pjlib/src/pj/hash.c
+++ b/pjlib/src/pj/hash.c
@@ -1,4 +1,4 @@
-/* $Id: hash.c 3553 2011-05-05 06:14:19Z nanang $ */
+/* $Id: hash.c 4296 2012-11-07 04:56:26Z nanang $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -79,16 +79,21 @@ PJ_DEF(pj_uint32_t) pj_hash_calc_tolower( pj_uint32_t hval,
#if defined(PJ_HASH_USE_OWN_TOLOWER) && PJ_HASH_USE_OWN_TOLOWER != 0
for (i=0; i<key->slen; ++i) {
pj_uint8_t c = key->ptr[i];
+ char lower;
if (c & 64)
- result[i] = (char)(c | 32);
+ lower = (char)(c | 32);
else
- result[i] = (char)c;
- hval = hval * PJ_HASH_MULTIPLIER + result[i];
+ lower = (char)c;
+ if (result)
+ result[i] = lower;
+ hval = hval * PJ_HASH_MULTIPLIER + lower;
}
#else
for (i=0; i<key->slen; ++i) {
- result[i] = (char)pj_tolower(key->ptr[i]);
- hval = hval * PJ_HASH_MULTIPLIER + result[i];
+ char lower = (char)pj_tolower(key->ptr[i]);
+ if (result)
+ result[i] = lower;
+ hval = hval * PJ_HASH_MULTIPLIER + lower;
}
#endif
@@ -128,7 +133,7 @@ PJ_DEF(pj_hash_table_t*) pj_hash_create(pj_pool_t *pool, unsigned size)
static pj_hash_entry **find_entry( pj_pool_t *pool, pj_hash_table_t *ht,
const void *key, unsigned keylen,
void *val, pj_uint32_t *hval,
- void *entry_buf)
+ void *entry_buf, pj_bool_t lower)
{
pj_uint32_t hash;
pj_hash_entry **p_entry, *entry;
@@ -146,14 +151,20 @@ static pj_hash_entry **find_entry( pj_pool_t *pool, pj_hash_table_t *ht,
if (keylen==PJ_HASH_KEY_STRING) {
const pj_uint8_t *p = (const pj_uint8_t*)key;
for ( ; *p; ++p ) {
- hash = hash * PJ_HASH_MULTIPLIER + *p;
+ if (lower)
+ hash = hash * PJ_HASH_MULTIPLIER + pj_tolower(*p);
+ else
+ hash = hash * PJ_HASH_MULTIPLIER + *p;
}
keylen = p - (const unsigned char*)key;
} else {
const pj_uint8_t *p = (const pj_uint8_t*)key,
*end = p + keylen;
for ( ; p!=end; ++p) {
- hash = hash * PJ_HASH_MULTIPLIER + *p;
+ if (lower)
+ hash = hash * PJ_HASH_MULTIPLIER + pj_tolower(*p);
+ else
+ hash = hash * PJ_HASH_MULTIPLIER + *p;
}
}
@@ -168,9 +179,11 @@ static pj_hash_entry **find_entry( pj_pool_t *pool, pj_hash_table_t *ht,
p_entry = &entry->next, entry = *p_entry)
{
if (entry->hash==hash && entry->keylen==keylen &&
- pj_memcmp(entry->key, key, keylen)==0)
+ ((lower && pj_ansi_strnicmp((const char*)entry->key,
+ (const char*)key, keylen)==0) ||
+ (!lower && pj_memcmp(entry->key, key, keylen)==0)))
{
- break;
+ break;
}
}
@@ -214,17 +227,27 @@ PJ_DEF(void *) pj_hash_get( pj_hash_table_t *ht,
pj_uint32_t *hval)
{
pj_hash_entry *entry;
- entry = *find_entry( NULL, ht, key, keylen, NULL, hval, NULL);
+ entry = *find_entry( NULL, ht, key, keylen, NULL, hval, NULL, PJ_FALSE);
return entry ? entry->value : NULL;
}
-PJ_DEF(void) pj_hash_set( pj_pool_t *pool, pj_hash_table_t *ht,
- const void *key, unsigned keylen, pj_uint32_t hval,
- void *value )
+PJ_DEF(void *) pj_hash_get_lower( pj_hash_table_t *ht,
+ const void *key, unsigned keylen,
+ pj_uint32_t *hval)
+{
+ pj_hash_entry *entry;
+ entry = *find_entry( NULL, ht, key, keylen, NULL, hval, NULL, PJ_TRUE);
+ return entry ? entry->value : NULL;
+}
+
+static void hash_set( pj_pool_t *pool, pj_hash_table_t *ht,
+ const void *key, unsigned keylen, pj_uint32_t hval,
+ void *value, void *entry_buf, pj_bool_t lower )
{
pj_hash_entry **p_entry;
- p_entry = find_entry( pool, ht, key, keylen, value, &hval, NULL);
+ p_entry = find_entry( pool, ht, key, keylen, value, &hval, entry_buf,
+ lower);
if (*p_entry) {
if (value == NULL) {
/* delete entry */
@@ -241,29 +264,35 @@ PJ_DEF(void) pj_hash_set( pj_pool_t *pool, pj_hash_table_t *ht,
}
}
+PJ_DEF(void) pj_hash_set( pj_pool_t *pool, pj_hash_table_t *ht,
+ const void *key, unsigned keylen, pj_uint32_t hval,
+ void *value )
+{
+ hash_set(pool, ht, key, keylen, hval, value, NULL, PJ_FALSE);
+}
+
+PJ_DEF(void) pj_hash_set_lower( pj_pool_t *pool, pj_hash_table_t *ht,
+ const void *key, unsigned keylen,
+ pj_uint32_t hval, void *value )
+{
+ hash_set(pool, ht, key, keylen, hval, value, NULL, PJ_TRUE);
+}
+
PJ_DEF(void) pj_hash_set_np( pj_hash_table_t *ht,
const void *key, unsigned keylen,
pj_uint32_t hval, pj_hash_entry_buf entry_buf,
void *value)
{
- pj_hash_entry **p_entry;
+ hash_set(NULL, ht, key, keylen, hval, value, (void *)entry_buf, PJ_FALSE);
+}
- p_entry = find_entry( NULL, ht, key, keylen, value, &hval,
- (void*)entry_buf );
- if (*p_entry) {
- if (value == NULL) {
- /* delete entry */
- PJ_LOG(6, ("hashtbl", "%p: p_entry %p deleted", ht, *p_entry));
- *p_entry = (*p_entry)->next;
- --ht->count;
-
- } else {
- /* overwrite */
- (*p_entry)->value = value;
- PJ_LOG(6, ("hashtbl", "%p: p_entry %p value set to %p", ht,
- *p_entry, value));
- }
- }
+PJ_DEF(void) pj_hash_set_np_lower( pj_hash_table_t *ht,
+ const void *key, unsigned keylen,
+ pj_uint32_t hval,
+ pj_hash_entry_buf entry_buf,
+ void *value)
+{
+ hash_set(NULL, ht, key, keylen, hval, value, (void *)entry_buf, PJ_TRUE);
}
PJ_DEF(unsigned) pj_hash_count( pj_hash_table_t *ht )
diff --git a/pjlib/src/pj/ioqueue_common_abs.c b/pjlib/src/pj/ioqueue_common_abs.c
index ee4506d..b8036a5 100644
--- a/pjlib/src/pj/ioqueue_common_abs.c
+++ b/pjlib/src/pj/ioqueue_common_abs.c
@@ -1,4 +1,4 @@
-/* $Id: ioqueue_common_abs.c 3666 2011-07-19 08:40:20Z nanang $ */
+/* $Id: ioqueue_common_abs.c 4359 2013-02-21 11:18:36Z bennylp $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -70,6 +70,7 @@ static pj_status_t ioqueue_init_key( pj_pool_t *pool,
pj_ioqueue_t *ioqueue,
pj_ioqueue_key_t *key,
pj_sock_t sock,
+ pj_grp_lock_t *grp_lock,
void *user_data,
const pj_ioqueue_callback *cb)
{
@@ -114,10 +115,18 @@ static pj_status_t ioqueue_init_key( pj_pool_t *pool,
/* Create mutex for the key. */
#if !PJ_IOQUEUE_HAS_SAFE_UNREG
- rc = pj_mutex_create_simple(pool, NULL, &key->mutex);
+ rc = pj_lock_create_simple_mutex(poll, NULL, &key->lock);
#endif
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ /* Group lock */
+ key->grp_lock = grp_lock;
+ if (key->grp_lock) {
+ pj_grp_lock_add_ref_dbg(key->grp_lock, "ioqueue", 0);
+ }
- return rc;
+ return PJ_SUCCESS;
}
/*
@@ -189,10 +198,10 @@ PJ_INLINE(int) key_has_pending_connect(pj_ioqueue_key_t *key)
void ioqueue_dispatch_write_event(pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h)
{
/* Lock the key. */
- pj_mutex_lock(h->mutex);
+ pj_ioqueue_lock_key(h);
if (IS_CLOSING(h)) {
- pj_mutex_unlock(h->mutex);
+ pj_ioqueue_unlock_key(h);
return;
}
@@ -261,7 +270,7 @@ void ioqueue_dispatch_write_event(pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h)
* save it to a flag.
*/
has_lock = PJ_FALSE;
- pj_mutex_unlock(h->mutex);
+ pj_ioqueue_unlock_key(h);
} else {
has_lock = PJ_TRUE;
}
@@ -272,7 +281,7 @@ void ioqueue_dispatch_write_event(pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h)
/* Unlock if we still hold the lock */
if (has_lock) {
- pj_mutex_unlock(h->mutex);
+ pj_ioqueue_unlock_key(h);
}
/* Done. */
@@ -379,7 +388,8 @@ void ioqueue_dispatch_write_event(pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h)
* save it to a flag.
*/
has_lock = PJ_FALSE;
- pj_mutex_unlock(h->mutex);
+ pj_ioqueue_unlock_key(h);
+ PJ_RACE_ME(5);
} else {
has_lock = PJ_TRUE;
}
@@ -392,11 +402,11 @@ void ioqueue_dispatch_write_event(pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h)
}
if (has_lock) {
- pj_mutex_unlock(h->mutex);
+ pj_ioqueue_unlock_key(h);
}
} else {
- pj_mutex_unlock(h->mutex);
+ pj_ioqueue_unlock_key(h);
}
/* Done. */
@@ -406,7 +416,7 @@ void ioqueue_dispatch_write_event(pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h)
* are signalled for the same event, but only one thread eventually
* able to process the event.
*/
- pj_mutex_unlock(h->mutex);
+ pj_ioqueue_unlock_key(h);
}
}
@@ -415,10 +425,10 @@ void ioqueue_dispatch_read_event( pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h )
pj_status_t rc;
/* Lock the key. */
- pj_mutex_lock(h->mutex);
+ pj_ioqueue_lock_key(h);
if (IS_CLOSING(h)) {
- pj_mutex_unlock(h->mutex);
+ pj_ioqueue_unlock_key(h);
return;
}
@@ -453,7 +463,8 @@ void ioqueue_dispatch_read_event( pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h )
* save it to a flag.
*/
has_lock = PJ_FALSE;
- pj_mutex_unlock(h->mutex);
+ pj_ioqueue_unlock_key(h);
+ PJ_RACE_ME(5);
} else {
has_lock = PJ_TRUE;
}
@@ -466,7 +477,7 @@ void ioqueue_dispatch_read_event( pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h )
}
if (has_lock) {
- pj_mutex_unlock(h->mutex);
+ pj_ioqueue_unlock_key(h);
}
}
else
@@ -567,7 +578,8 @@ void ioqueue_dispatch_read_event( pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h )
* save it to a flag.
*/
has_lock = PJ_FALSE;
- pj_mutex_unlock(h->mutex);
+ pj_ioqueue_unlock_key(h);
+ PJ_RACE_ME(5);
} else {
has_lock = PJ_TRUE;
}
@@ -580,7 +592,7 @@ void ioqueue_dispatch_read_event( pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h )
}
if (has_lock) {
- pj_mutex_unlock(h->mutex);
+ pj_ioqueue_unlock_key(h);
}
} else {
@@ -589,7 +601,7 @@ void ioqueue_dispatch_read_event( pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h )
* are signalled for the same event, but only one thread eventually
* able to process the event.
*/
- pj_mutex_unlock(h->mutex);
+ pj_ioqueue_unlock_key(h);
}
}
@@ -599,19 +611,19 @@ void ioqueue_dispatch_exception_event( pj_ioqueue_t *ioqueue,
{
pj_bool_t has_lock;
- pj_mutex_lock(h->mutex);
+ pj_ioqueue_lock_key(h);
if (!h->connecting) {
/* It is possible that more than one thread was woken up, thus
* the remaining thread will see h->connecting as zero because
* it has been processed by other thread.
*/
- pj_mutex_unlock(h->mutex);
+ pj_ioqueue_unlock_key(h);
return;
}
if (IS_CLOSING(h)) {
- pj_mutex_unlock(h->mutex);
+ pj_ioqueue_unlock_key(h);
return;
}
@@ -629,7 +641,8 @@ void ioqueue_dispatch_exception_event( pj_ioqueue_t *ioqueue,
* save it to a flag.
*/
has_lock = PJ_FALSE;
- pj_mutex_unlock(h->mutex);
+ pj_ioqueue_unlock_key(h);
+ PJ_RACE_ME(5);
} else {
has_lock = PJ_TRUE;
}
@@ -651,7 +664,7 @@ void ioqueue_dispatch_exception_event( pj_ioqueue_t *ioqueue,
}
if (has_lock) {
- pj_mutex_unlock(h->mutex);
+ pj_ioqueue_unlock_key(h);
}
}
@@ -713,18 +726,18 @@ PJ_DEF(pj_status_t) pj_ioqueue_recv( pj_ioqueue_key_t *key,
read_op->size = *length;
read_op->flags = flags;
- pj_mutex_lock(key->mutex);
+ pj_ioqueue_lock_key(key);
/* Check again. Handle may have been closed after the previous check
* in multithreaded app. If we add bad handle to the set it will
* corrupt the ioqueue set. See #913
*/
if (IS_CLOSING(key)) {
- pj_mutex_unlock(key->mutex);
+ pj_ioqueue_unlock_key(key);
return PJ_ECANCELLED;
}
pj_list_insert_before(&key->read_list, read_op);
ioqueue_add_to_set(key->ioqueue, key, READABLE_EVENT);
- pj_mutex_unlock(key->mutex);
+ pj_ioqueue_unlock_key(key);
return PJ_EPENDING;
}
@@ -789,18 +802,18 @@ PJ_DEF(pj_status_t) pj_ioqueue_recvfrom( pj_ioqueue_key_t *key,
read_op->rmt_addr = addr;
read_op->rmt_addrlen = addrlen;
- pj_mutex_lock(key->mutex);
+ pj_ioqueue_lock_key(key);
/* Check again. Handle may have been closed after the previous check
* in multithreaded app. If we add bad handle to the set it will
* corrupt the ioqueue set. See #913
*/
if (IS_CLOSING(key)) {
- pj_mutex_unlock(key->mutex);
+ pj_ioqueue_unlock_key(key);
return PJ_ECANCELLED;
}
pj_list_insert_before(&key->read_list, read_op);
ioqueue_add_to_set(key->ioqueue, key, READABLE_EVENT);
- pj_mutex_unlock(key->mutex);
+ pj_ioqueue_unlock_key(key);
return PJ_EPENDING;
}
@@ -903,18 +916,18 @@ PJ_DEF(pj_status_t) pj_ioqueue_send( pj_ioqueue_key_t *key,
write_op->written = 0;
write_op->flags = flags;
- pj_mutex_lock(key->mutex);
+ pj_ioqueue_lock_key(key);
/* Check again. Handle may have been closed after the previous check
* in multithreaded app. If we add bad handle to the set it will
* corrupt the ioqueue set. See #913
*/
if (IS_CLOSING(key)) {
- pj_mutex_unlock(key->mutex);
+ pj_ioqueue_unlock_key(key);
return PJ_ECANCELLED;
}
pj_list_insert_before(&key->write_list, write_op);
ioqueue_add_to_set(key->ioqueue, key, WRITEABLE_EVENT);
- pj_mutex_unlock(key->mutex);
+ pj_ioqueue_unlock_key(key);
return PJ_EPENDING;
}
@@ -1050,18 +1063,18 @@ retry_on_restart:
pj_memcpy(&write_op->rmt_addr, addr, addrlen);
write_op->rmt_addrlen = addrlen;
- pj_mutex_lock(key->mutex);
+ pj_ioqueue_lock_key(key);
/* Check again. Handle may have been closed after the previous check
* in multithreaded app. If we add bad handle to the set it will
* corrupt the ioqueue set. See #913
*/
if (IS_CLOSING(key)) {
- pj_mutex_unlock(key->mutex);
+ pj_ioqueue_unlock_key(key);
return PJ_ECANCELLED;
}
pj_list_insert_before(&key->write_list, write_op);
ioqueue_add_to_set(key->ioqueue, key, WRITEABLE_EVENT);
- pj_mutex_unlock(key->mutex);
+ pj_ioqueue_unlock_key(key);
return PJ_EPENDING;
}
@@ -1127,18 +1140,18 @@ PJ_DEF(pj_status_t) pj_ioqueue_accept( pj_ioqueue_key_t *key,
accept_op->addrlen= addrlen;
accept_op->local_addr = local;
- pj_mutex_lock(key->mutex);
+ pj_ioqueue_lock_key(key);
/* Check again. Handle may have been closed after the previous check
* in multithreaded app. If we add bad handle to the set it will
* corrupt the ioqueue set. See #913
*/
if (IS_CLOSING(key)) {
- pj_mutex_unlock(key->mutex);
+ pj_ioqueue_unlock_key(key);
return PJ_ECANCELLED;
}
pj_list_insert_before(&key->accept_list, accept_op);
ioqueue_add_to_set(key->ioqueue, key, READABLE_EVENT);
- pj_mutex_unlock(key->mutex);
+ pj_ioqueue_unlock_key(key);
return PJ_EPENDING;
}
@@ -1171,18 +1184,18 @@ PJ_DEF(pj_status_t) pj_ioqueue_connect( pj_ioqueue_key_t *key,
} else {
if (status == PJ_STATUS_FROM_OS(PJ_BLOCKING_CONNECT_ERROR_VAL)) {
/* Pending! */
- pj_mutex_lock(key->mutex);
+ pj_ioqueue_lock_key(key);
/* Check again. Handle may have been closed after the previous
* check in multithreaded app. See #913
*/
if (IS_CLOSING(key)) {
- pj_mutex_unlock(key->mutex);
+ pj_ioqueue_unlock_key(key);
return PJ_ECANCELLED;
}
key->connecting = PJ_TRUE;
ioqueue_add_to_set(key->ioqueue, key, WRITEABLE_EVENT);
ioqueue_add_to_set(key->ioqueue, key, EXCEPTION_EVENT);
- pj_mutex_unlock(key->mutex);
+ pj_ioqueue_unlock_key(key);
return PJ_EPENDING;
} else {
/* Error! */
@@ -1228,7 +1241,7 @@ PJ_DEF(pj_status_t) pj_ioqueue_post_completion( pj_ioqueue_key_t *key,
* Find the operation key in all pending operation list to
* really make sure that it's still there; then call the callback.
*/
- pj_mutex_lock(key->mutex);
+ pj_ioqueue_lock_key(key);
/* Find the operation in the pending read list. */
op_rec = (struct generic_operation*)key->read_list.next;
@@ -1236,7 +1249,7 @@ PJ_DEF(pj_status_t) pj_ioqueue_post_completion( pj_ioqueue_key_t *key,
if (op_rec == (void*)op_key) {
pj_list_erase(op_rec);
op_rec->op = PJ_IOQUEUE_OP_NONE;
- pj_mutex_unlock(key->mutex);
+ pj_ioqueue_unlock_key(key);
(*key->cb.on_read_complete)(key, op_key, bytes_status);
return PJ_SUCCESS;
@@ -1250,7 +1263,7 @@ PJ_DEF(pj_status_t) pj_ioqueue_post_completion( pj_ioqueue_key_t *key,
if (op_rec == (void*)op_key) {
pj_list_erase(op_rec);
op_rec->op = PJ_IOQUEUE_OP_NONE;
- pj_mutex_unlock(key->mutex);
+ pj_ioqueue_unlock_key(key);
(*key->cb.on_write_complete)(key, op_key, bytes_status);
return PJ_SUCCESS;
@@ -1264,7 +1277,7 @@ PJ_DEF(pj_status_t) pj_ioqueue_post_completion( pj_ioqueue_key_t *key,
if (op_rec == (void*)op_key) {
pj_list_erase(op_rec);
op_rec->op = PJ_IOQUEUE_OP_NONE;
- pj_mutex_unlock(key->mutex);
+ pj_ioqueue_unlock_key(key);
(*key->cb.on_accept_complete)(key, op_key,
PJ_INVALID_SOCKET,
@@ -1274,7 +1287,7 @@ PJ_DEF(pj_status_t) pj_ioqueue_post_completion( pj_ioqueue_key_t *key,
op_rec = op_rec->next;
}
- pj_mutex_unlock(key->mutex);
+ pj_ioqueue_unlock_key(key);
return PJ_EINVALIDOP;
}
@@ -1304,11 +1317,18 @@ PJ_DEF(pj_status_t) pj_ioqueue_set_concurrency(pj_ioqueue_key_t *key,
PJ_DEF(pj_status_t) pj_ioqueue_lock_key(pj_ioqueue_key_t *key)
{
- return pj_mutex_lock(key->mutex);
+ if (key->grp_lock)
+ return pj_grp_lock_acquire(key->grp_lock);
+ else
+ return pj_lock_acquire(key->lock);
}
PJ_DEF(pj_status_t) pj_ioqueue_unlock_key(pj_ioqueue_key_t *key)
{
- return pj_mutex_unlock(key->mutex);
+ if (key->grp_lock)
+ return pj_grp_lock_release(key->grp_lock);
+ else
+ return pj_lock_release(key->lock);
}
+
diff --git a/pjlib/src/pj/ioqueue_common_abs.h b/pjlib/src/pj/ioqueue_common_abs.h
index 3a41051..3bdbb52 100644
--- a/pjlib/src/pj/ioqueue_common_abs.h
+++ b/pjlib/src/pj/ioqueue_common_abs.h
@@ -101,7 +101,8 @@ union operation_key
#define DECLARE_COMMON_KEY \
PJ_DECL_LIST_MEMBER(struct pj_ioqueue_key_t); \
pj_ioqueue_t *ioqueue; \
- pj_mutex_t *mutex; \
+ pj_grp_lock_t *grp_lock; \
+ pj_lock_t *lock; \
pj_bool_t inside_callback; \
pj_bool_t destroy_requested; \
pj_bool_t allow_concurrent; \
diff --git a/pjlib/src/pj/ioqueue_epoll.c b/pjlib/src/pj/ioqueue_epoll.c
index 27845f5..c2564d1 100644
--- a/pjlib/src/pj/ioqueue_epoll.c
+++ b/pjlib/src/pj/ioqueue_epoll.c
@@ -1,4 +1,4 @@
-/* $Id: ioqueue_epoll.c 3553 2011-05-05 06:14:19Z nanang $ */
+/* $Id: ioqueue_epoll.c 4359 2013-02-21 11:18:36Z bennylp $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -262,11 +262,11 @@ PJ_DEF(pj_status_t) pj_ioqueue_create( pj_pool_t *pool,
key = PJ_POOL_ALLOC_T(pool, pj_ioqueue_key_t);
key->ref_count = 0;
- rc = pj_mutex_create_recursive(pool, NULL, &key->mutex);
+ rc = pj_lock_create_recursive_mutex(pool, NULL, &key->lock);
if (rc != PJ_SUCCESS) {
key = ioqueue->free_list.next;
while (key != &ioqueue->free_list) {
- pj_mutex_destroy(key->mutex);
+ pj_lock_destroy(key->lock);
key = key->next;
}
pj_mutex_destroy(ioqueue->ref_cnt_mutex);
@@ -323,19 +323,19 @@ PJ_DEF(pj_status_t) pj_ioqueue_destroy(pj_ioqueue_t *ioqueue)
/* Destroy reference counters */
key = ioqueue->active_list.next;
while (key != &ioqueue->active_list) {
- pj_mutex_destroy(key->mutex);
+ pj_lock_destroy(key->lock);
key = key->next;
}
key = ioqueue->closing_list.next;
while (key != &ioqueue->closing_list) {
- pj_mutex_destroy(key->mutex);
+ pj_lock_destroy(key->lock);
key = key->next;
}
key = ioqueue->free_list.next;
while (key != &ioqueue->free_list) {
- pj_mutex_destroy(key->mutex);
+ pj_lock_destroy(key->lock);
key = key->next;
}
@@ -422,7 +422,7 @@ PJ_DEF(pj_status_t) pj_ioqueue_register_sock( pj_pool_t *pool,
status = os_epoll_ctl(ioqueue->epfd, EPOLL_CTL_ADD, sock, &ev);
if (status < 0) {
rc = pj_get_os_error();
- pj_mutex_destroy(key->mutex);
+ pj_lock_destroy(key->lock);
key = NULL;
TRACE_((THIS_FILE,
"pj_ioqueue_register_sock error: os_epoll_ctl rc=%d",
@@ -497,7 +497,7 @@ PJ_DEF(pj_status_t) pj_ioqueue_unregister( pj_ioqueue_key_t *key)
* the key. We need to lock the key before ioqueue here to prevent
* deadlock.
*/
- pj_mutex_lock(key->mutex);
+ pj_lock_acquire(key->lock);
/* Also lock ioqueue */
pj_lock_acquire(ioqueue->lock);
@@ -531,9 +531,9 @@ PJ_DEF(pj_status_t) pj_ioqueue_unregister( pj_ioqueue_key_t *key)
decrement_counter(key);
/* Done. */
- pj_mutex_unlock(key->mutex);
+ pj_lock_release(key->lock);
#else
- pj_mutex_destroy(key->mutex);
+ pj_lock_destroy(key->lock);
#endif
return PJ_SUCCESS;
diff --git a/pjlib/src/pj/ioqueue_select.c b/pjlib/src/pj/ioqueue_select.c
index 74b87d8..1b08d28 100644
--- a/pjlib/src/pj/ioqueue_select.c
+++ b/pjlib/src/pj/ioqueue_select.c
@@ -1,4 +1,4 @@
-/* $Id: ioqueue_select.c 3553 2011-05-05 06:14:19Z nanang $ */
+/* $Id: ioqueue_select.c 4359 2013-02-21 11:18:36Z bennylp $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -39,6 +39,7 @@
#include <pj/sock_select.h>
#include <pj/sock_qos.h>
#include <pj/errno.h>
+#include <pj/rand.h>
/* Now that we have access to OS'es <sys/select>, lets check again that
* PJ_IOQUEUE_MAX_HANDLES is not greater than FD_SETSIZE
@@ -237,11 +238,11 @@ PJ_DEF(pj_status_t) pj_ioqueue_create( pj_pool_t *pool,
key = PJ_POOL_ALLOC_T(pool, pj_ioqueue_key_t);
key->ref_count = 0;
- rc = pj_mutex_create_recursive(pool, NULL, &key->mutex);
+ rc = pj_lock_create_recursive_mutex(pool, NULL, &key->lock);
if (rc != PJ_SUCCESS) {
key = ioqueue->free_list.next;
while (key != &ioqueue->free_list) {
- pj_mutex_destroy(key->mutex);
+ pj_lock_destroy(key->lock);
key = key->next;
}
pj_mutex_destroy(ioqueue->ref_cnt_mutex);
@@ -284,19 +285,19 @@ PJ_DEF(pj_status_t) pj_ioqueue_destroy(pj_ioqueue_t *ioqueue)
/* Destroy reference counters */
key = ioqueue->active_list.next;
while (key != &ioqueue->active_list) {
- pj_mutex_destroy(key->mutex);
+ pj_lock_destroy(key->lock);
key = key->next;
}
key = ioqueue->closing_list.next;
while (key != &ioqueue->closing_list) {
- pj_mutex_destroy(key->mutex);
+ pj_lock_destroy(key->lock);
key = key->next;
}
key = ioqueue->free_list.next;
while (key != &ioqueue->free_list) {
- pj_mutex_destroy(key->mutex);
+ pj_lock_destroy(key->lock);
key = key->next;
}
@@ -312,9 +313,10 @@ PJ_DEF(pj_status_t) pj_ioqueue_destroy(pj_ioqueue_t *ioqueue)
*
* Register socket handle to ioqueue.
*/
-PJ_DEF(pj_status_t) pj_ioqueue_register_sock( pj_pool_t *pool,
+PJ_DEF(pj_status_t) pj_ioqueue_register_sock2(pj_pool_t *pool,
pj_ioqueue_t *ioqueue,
pj_sock_t sock,
+ pj_grp_lock_t *grp_lock,
void *user_data,
const pj_ioqueue_callback *cb,
pj_ioqueue_key_t **p_key)
@@ -358,7 +360,7 @@ PJ_DEF(pj_status_t) pj_ioqueue_register_sock( pj_pool_t *pool,
key = (pj_ioqueue_key_t*)pj_pool_zalloc(pool, sizeof(pj_ioqueue_key_t));
#endif
- rc = ioqueue_init_key(pool, ioqueue, key, sock, user_data, cb);
+ rc = ioqueue_init_key(pool, ioqueue, key, sock, grp_lock, user_data, cb);
if (rc != PJ_SUCCESS) {
key = NULL;
goto on_return;
@@ -386,12 +388,27 @@ PJ_DEF(pj_status_t) pj_ioqueue_register_sock( pj_pool_t *pool,
on_return:
/* On error, socket may be left in non-blocking mode. */
+ if (rc != PJ_SUCCESS) {
+ if (key->grp_lock)
+ pj_grp_lock_dec_ref_dbg(key->grp_lock, "ioqueue", 0);
+ }
*p_key = key;
pj_lock_release(ioqueue->lock);
return rc;
}
+PJ_DEF(pj_status_t) pj_ioqueue_register_sock( pj_pool_t *pool,
+ pj_ioqueue_t *ioqueue,
+ pj_sock_t sock,
+ void *user_data,
+ const pj_ioqueue_callback *cb,
+ pj_ioqueue_key_t **p_key)
+{
+ return pj_ioqueue_register_sock2(pool, ioqueue, sock, NULL, user_data,
+ cb, p_key);
+}
+
#if PJ_IOQUEUE_HAS_SAFE_UNREG
/* Increment key's reference counter */
static void increment_counter(pj_ioqueue_key_t *key)
@@ -446,7 +463,7 @@ PJ_DEF(pj_status_t) pj_ioqueue_unregister( pj_ioqueue_key_t *key)
* the key. We need to lock the key before ioqueue here to prevent
* deadlock.
*/
- pj_mutex_lock(key->mutex);
+ pj_ioqueue_lock_key(key);
/* Also lock ioqueue */
pj_lock_acquire(ioqueue->lock);
@@ -485,9 +502,34 @@ PJ_DEF(pj_status_t) pj_ioqueue_unregister( pj_ioqueue_key_t *key)
decrement_counter(key);
/* Done. */
- pj_mutex_unlock(key->mutex);
+ if (key->grp_lock) {
+ /* just dec_ref and unlock. we will set grp_lock to NULL
+ * elsewhere */
+ pj_grp_lock_t *grp_lock = key->grp_lock;
+ // Don't set grp_lock to NULL otherwise the other thread
+ // will crash. Just leave it as dangling pointer, but this
+ // should be safe
+ //key->grp_lock = NULL;
+ pj_grp_lock_dec_ref_dbg(grp_lock, "ioqueue", 0);
+ pj_grp_lock_release(grp_lock);
+ } else {
+ pj_ioqueue_unlock_key(key);
+ }
#else
- pj_mutex_destroy(key->mutex);
+ if (key->grp_lock) {
+ /* set grp_lock to NULL and unlock */
+ pj_grp_lock_t *grp_lock = key->grp_lock;
+ // Don't set grp_lock to NULL otherwise the other thread
+ // will crash. Just leave it as dangling pointer, but this
+ // should be safe
+ //key->grp_lock = NULL;
+ pj_grp_lock_dec_ref_dbg(grp_lock, "ioqueue", 0);
+ pj_grp_lock_release(grp_lock);
+ } else {
+ pj_ioqueue_unlock_key(key);
+ }
+
+ pj_lock_destroy(key->lock);
#endif
return PJ_SUCCESS;
@@ -620,6 +662,10 @@ static void scan_closing_keys(pj_ioqueue_t *ioqueue)
if (PJ_TIME_VAL_GTE(now, h->free_time)) {
pj_list_erase(h);
+ // Don't set grp_lock to NULL otherwise the other thread
+ // will crash. Just leave it as dangling pointer, but this
+ // should be safe
+ //h->grp_lock = NULL;
pj_list_push_back(&ioqueue->free_list, h);
}
h = next;
@@ -781,7 +827,7 @@ on_error:
PJ_DEF(int) pj_ioqueue_poll( pj_ioqueue_t *ioqueue, const pj_time_val *timeout)
{
pj_fd_set_t rfdset, wfdset, xfdset;
- int count, counter;
+ int count, i, counter;
pj_ioqueue_key_t *h;
struct event
{
@@ -892,8 +938,17 @@ PJ_DEF(int) pj_ioqueue_poll( pj_ioqueue_t *ioqueue, const pj_time_val *timeout)
#endif
}
+ for (i=0; i<counter; ++i) {
+ if (event[i].key->grp_lock)
+ pj_grp_lock_add_ref_dbg(event[i].key->grp_lock, "ioqueue", 0);
+ }
+
+ PJ_RACE_ME(5);
+
pj_lock_release(ioqueue->lock);
+ PJ_RACE_ME(5);
+
count = counter;
/* Now process all events. The dispatch functions will take care
@@ -918,6 +973,10 @@ PJ_DEF(int) pj_ioqueue_poll( pj_ioqueue_t *ioqueue, const pj_time_val *timeout)
#if PJ_IOQUEUE_HAS_SAFE_UNREG
decrement_counter(event[counter].key);
#endif
+
+ if (event[counter].key->grp_lock)
+ pj_grp_lock_dec_ref_dbg(event[counter].key->grp_lock,
+ "ioqueue", 0);
}
diff --git a/pjlib/src/pj/ioqueue_symbian.cpp b/pjlib/src/pj/ioqueue_symbian.cpp
index 01f4db5..0b310d1 100644
--- a/pjlib/src/pj/ioqueue_symbian.cpp
+++ b/pjlib/src/pj/ioqueue_symbian.cpp
@@ -1,4 +1,4 @@
-/* $Id: ioqueue_symbian.cpp 3841 2011-10-24 09:28:13Z ming $ */
+/* $Id: ioqueue_symbian.cpp 4374 2013-02-27 07:15:57Z riza $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -528,6 +528,19 @@ PJ_DEF(pj_status_t) pj_ioqueue_register_sock( pj_pool_t *pool,
return PJ_SUCCESS;
}
+PJ_DEF(pj_status_t) pj_ioqueue_register_sock2(pj_pool_t *pool,
+ pj_ioqueue_t *ioqueue,
+ pj_sock_t sock,
+ pj_grp_lock_t *grp_lock,
+ void *user_data,
+ const pj_ioqueue_callback *cb,
+ pj_ioqueue_key_t **p_key)
+{
+ PJ_UNUSED_ARG(grp_lock);
+
+ return pj_ioqueue_register_sock(pool, ioqueue, sock, user_data, cb, p_key);
+}
+
/*
* Unregister from the I/O Queue framework.
*/
diff --git a/pjlib/src/pj/ip_helper_generic.c b/pjlib/src/pj/ip_helper_generic.c
index 3a43423..94d5a1f 100644
--- a/pjlib/src/pj/ip_helper_generic.c
+++ b/pjlib/src/pj/ip_helper_generic.c
@@ -1,4 +1,4 @@
-/* $Id: ip_helper_generic.c 3553 2011-05-05 06:14:19Z nanang $ */
+/* $Id: ip_helper_generic.c 4355 2013-02-19 16:27:37Z bennylp $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -165,9 +165,6 @@ static pj_status_t if_enum_by_af(int af,
return PJ_RETURN_OS_ERROR(oserr);
}
- /* Done with socket */
- pj_sock_close(sock);
-
/* Interface interfaces */
ifr = (struct ifreq*) ifc.ifc_req;
count = ifc.ifc_len / sizeof(struct ifreq);
@@ -177,6 +174,7 @@ static pj_status_t if_enum_by_af(int af,
*p_cnt = 0;
for (i=0; i<count; ++i) {
struct ifreq *itf = &ifr[i];
+ struct ifreq iff = *itf;
struct sockaddr *ad = &itf->ifr_addr;
TRACE_((THIS_FILE, " checking interface %s", itf->ifr_name));
@@ -188,13 +186,19 @@ static pj_status_t if_enum_by_af(int af,
continue;
}
- if ((itf->ifr_flags & IFF_UP)==0) {
+ if (ioctl(sock, SIOCGIFFLAGS, &iff) != 0) {
+ TRACE_((THIS_FILE, " ioctl(SIOCGIFFLAGS) failed: %s",
+ get_os_errmsg()));
+ continue; /* Failed to get flags, continue */
+ }
+
+ if ((iff.ifr_flags & IFF_UP)==0) {
TRACE_((THIS_FILE, " interface is down"));
continue; /* Skip when interface is down */
}
#if PJ_IP_HELPER_IGNORE_LOOPBACK_IF
- if (itf->ifr_flags & IFF_LOOPBACK) {
+ if (iff.ifr_flags & IFF_LOOPBACK) {
TRACE_((THIS_FILE, " loopback interface"));
continue; /* Skip loopback interface */
}
@@ -220,10 +224,14 @@ static pj_status_t if_enum_by_af(int af,
(*p_cnt)++;
}
+ /* Done with socket */
+ pj_sock_close(sock);
+
TRACE_((THIS_FILE, "done, found %d address(es)", *p_cnt));
return (*p_cnt != 0) ? PJ_SUCCESS : PJ_ENOTFOUND;
}
+
#elif defined(PJ_HAS_NET_IF_H) && PJ_HAS_NET_IF_H != 0
/* Note: this does not work with IPv6 */
static pj_status_t if_enum_by_af(int af, unsigned *p_cnt, pj_sockaddr ifs[])
diff --git a/pjlib/src/pj/lock.c b/pjlib/src/pj/lock.c
index a7879af..34e2d1e 100644
--- a/pjlib/src/pj/lock.c
+++ b/pjlib/src/pj/lock.c
@@ -1,4 +1,4 @@
-/* $Id: lock.c 3553 2011-05-05 06:14:19Z nanang $ */
+/* $Id: lock.c 4412 2013-03-05 03:12:32Z riza $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -20,10 +20,12 @@
#include <pj/lock.h>
#include <pj/os.h>
#include <pj/assert.h>
+#include <pj/log.h>
#include <pj/pool.h>
#include <pj/string.h>
#include <pj/errno.h>
+#define THIS_FILE "lock.c"
typedef void LOCK_OBJ;
@@ -196,3 +198,518 @@ PJ_DEF(pj_status_t) pj_lock_destroy( pj_lock_t *lock )
return (*lock->destroy)(lock->lock_object);
}
+
+/******************************************************************************
+ * Group lock
+ */
+
+/* Individual lock in the group lock */
+typedef struct grp_lock_item
+{
+ PJ_DECL_LIST_MEMBER(struct grp_lock_item);
+ int prio;
+ pj_lock_t *lock;
+
+} grp_lock_item;
+
+/* Destroy callbacks */
+typedef struct grp_destroy_callback
+{
+ PJ_DECL_LIST_MEMBER(struct grp_destroy_callback);
+ void *comp;
+ void (*handler)(void*);
+} grp_destroy_callback;
+
+#if PJ_GRP_LOCK_DEBUG
+/* Store each add_ref caller */
+typedef struct grp_lock_ref
+{
+ PJ_DECL_LIST_MEMBER(struct grp_lock_ref);
+ const char *file;
+ int line;
+} grp_lock_ref;
+#endif
+
+/* The group lock */
+struct pj_grp_lock_t
+{
+ pj_lock_t base;
+
+ pj_pool_t *pool;
+ pj_atomic_t *ref_cnt;
+ pj_lock_t *own_lock;
+
+ pj_thread_t *owner;
+ int owner_cnt;
+
+ grp_lock_item lock_list;
+ grp_destroy_callback destroy_list;
+
+#if PJ_GRP_LOCK_DEBUG
+ grp_lock_ref ref_list;
+ grp_lock_ref ref_free_list;
+#endif
+};
+
+
+PJ_DEF(void) pj_grp_lock_config_default(pj_grp_lock_config *cfg)
+{
+ pj_bzero(cfg, sizeof(*cfg));
+}
+
+static void grp_lock_set_owner_thread(pj_grp_lock_t *glock)
+{
+ if (!glock->owner) {
+ glock->owner = pj_thread_this();
+ glock->owner_cnt = 1;
+ } else {
+ pj_assert(glock->owner == pj_thread_this());
+ glock->owner_cnt++;
+ }
+}
+
+static void grp_lock_unset_owner_thread(pj_grp_lock_t *glock)
+{
+ pj_assert(glock->owner == pj_thread_this());
+ pj_assert(glock->owner_cnt > 0);
+ if (--glock->owner_cnt <= 0) {
+ glock->owner = NULL;
+ glock->owner_cnt = 0;
+ }
+}
+
+static pj_status_t grp_lock_acquire(LOCK_OBJ *p)
+{
+ pj_grp_lock_t *glock = (pj_grp_lock_t*)p;
+ grp_lock_item *lck;
+
+ pj_assert(pj_atomic_get(glock->ref_cnt) > 0);
+
+ lck = glock->lock_list.next;
+ while (lck != &glock->lock_list) {
+ pj_lock_acquire(lck->lock);
+ lck = lck->next;
+ }
+ grp_lock_set_owner_thread(glock);
+ pj_grp_lock_add_ref(glock);
+ return PJ_SUCCESS;
+}
+
+static pj_status_t grp_lock_tryacquire(LOCK_OBJ *p)
+{
+ pj_grp_lock_t *glock = (pj_grp_lock_t*)p;
+ grp_lock_item *lck;
+
+ pj_assert(pj_atomic_get(glock->ref_cnt) > 0);
+
+ lck = glock->lock_list.next;
+ while (lck != &glock->lock_list) {
+ pj_status_t status = pj_lock_tryacquire(lck->lock);
+ if (status != PJ_SUCCESS) {
+ lck = lck->prev;
+ while (lck != &glock->lock_list) {
+ pj_lock_release(lck->lock);
+ lck = lck->prev;
+ }
+ return status;
+ }
+ lck = lck->next;
+ }
+ grp_lock_set_owner_thread(glock);
+ pj_grp_lock_add_ref(glock);
+ return PJ_SUCCESS;
+}
+
+static pj_status_t grp_lock_release(LOCK_OBJ *p)
+{
+ pj_grp_lock_t *glock = (pj_grp_lock_t*)p;
+ grp_lock_item *lck;
+
+ grp_lock_unset_owner_thread(glock);
+
+ lck = glock->lock_list.prev;
+ while (lck != &glock->lock_list) {
+ pj_lock_release(lck->lock);
+ lck = lck->prev;
+ }
+ return pj_grp_lock_dec_ref(glock);
+}
+
+static pj_status_t grp_lock_destroy(LOCK_OBJ *p)
+{
+ pj_grp_lock_t *glock = (pj_grp_lock_t*)p;
+ pj_pool_t *pool = glock->pool;
+ grp_lock_item *lck;
+ grp_destroy_callback *cb;
+
+ if (!glock->pool) {
+ /* already destroyed?! */
+ return PJ_EINVAL;
+ }
+
+ /* Release all chained locks */
+ lck = glock->lock_list.next;
+ while (lck != &glock->lock_list) {
+ if (lck->lock != glock->own_lock) {
+ int i;
+ for (i=0; i<glock->owner_cnt; ++i)
+ pj_lock_release(lck->lock);
+ }
+ lck = lck->next;
+ }
+
+ /* Call callbacks */
+ cb = glock->destroy_list.next;
+ while (cb != &glock->destroy_list) {
+ grp_destroy_callback *next = cb->next;
+ cb->handler(cb->comp);
+ cb = next;
+ }
+
+ pj_lock_destroy(glock->own_lock);
+ pj_atomic_destroy(glock->ref_cnt);
+ glock->pool = NULL;
+ pj_pool_release(pool);
+
+ return PJ_SUCCESS;
+}
+
+
+PJ_DEF(pj_status_t) pj_grp_lock_create( pj_pool_t *pool,
+ const pj_grp_lock_config *cfg,
+ pj_grp_lock_t **p_grp_lock)
+{
+ pj_grp_lock_t *glock;
+ grp_lock_item *own_lock;
+ pj_status_t status;
+
+ PJ_ASSERT_RETURN(pool && p_grp_lock, PJ_EINVAL);
+
+ PJ_UNUSED_ARG(cfg);
+
+ pool = pj_pool_create(pool->factory, "glck%p", 512, 512, NULL);
+ if (!pool)
+ return PJ_ENOMEM;
+
+ glock = PJ_POOL_ZALLOC_T(pool, pj_grp_lock_t);
+ glock->base.lock_object = glock;
+ glock->base.acquire = &grp_lock_acquire;
+ glock->base.tryacquire = &grp_lock_tryacquire;
+ glock->base.release = &grp_lock_release;
+ glock->base.destroy = &grp_lock_destroy;
+
+ glock->pool = pool;
+ pj_list_init(&glock->lock_list);
+ pj_list_init(&glock->destroy_list);
+#if PJ_GRP_LOCK_DEBUG
+ pj_list_init(&glock->ref_list);
+ pj_list_init(&glock->ref_free_list);
+#endif
+
+ status = pj_atomic_create(pool, 0, &glock->ref_cnt);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ status = pj_lock_create_recursive_mutex(pool, pool->obj_name,
+ &glock->own_lock);
+ if (status != PJ_SUCCESS)
+ goto on_error;
+
+ own_lock = PJ_POOL_ZALLOC_T(pool, grp_lock_item);
+ own_lock->lock = glock->own_lock;
+ pj_list_push_back(&glock->lock_list, own_lock);
+
+ *p_grp_lock = glock;
+ return PJ_SUCCESS;
+
+on_error:
+ grp_lock_destroy(glock);
+ return status;
+}
+
+PJ_DEF(pj_status_t) pj_grp_lock_destroy( pj_grp_lock_t *grp_lock)
+{
+ return grp_lock_destroy(grp_lock);
+}
+
+PJ_DEF(pj_status_t) pj_grp_lock_acquire( pj_grp_lock_t *grp_lock)
+{
+ return grp_lock_acquire(grp_lock);
+}
+
+PJ_DEF(pj_status_t) pj_grp_lock_tryacquire( pj_grp_lock_t *grp_lock)
+{
+ return grp_lock_tryacquire(grp_lock);
+}
+
+PJ_DEF(pj_status_t) pj_grp_lock_release( pj_grp_lock_t *grp_lock)
+{
+ return grp_lock_release(grp_lock);
+}
+
+PJ_DEF(pj_status_t) pj_grp_lock_replace( pj_grp_lock_t *old_lock,
+ pj_grp_lock_t *new_lock)
+{
+ grp_destroy_callback *ocb;
+
+ /* Move handlers from old to new */
+ ocb = old_lock->destroy_list.next;
+ while (ocb != &old_lock->destroy_list) {
+ grp_destroy_callback *ncb;
+
+ ncb = PJ_POOL_ALLOC_T(new_lock->pool, grp_destroy_callback);
+ ncb->comp = ocb->comp;
+ ncb->handler = ocb->handler;
+ pj_list_push_back(&new_lock->destroy_list, ncb);
+
+ ocb = ocb->next;
+ }
+
+ pj_list_init(&old_lock->destroy_list);
+
+ grp_lock_destroy(old_lock);
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_grp_lock_add_handler( pj_grp_lock_t *glock,
+ pj_pool_t *pool,
+ void *comp,
+ void (*destroy)(void *comp))
+{
+ grp_destroy_callback *cb;
+
+ grp_lock_acquire(glock);
+
+ if (pool == NULL)
+ pool = glock->pool;
+
+ cb = PJ_POOL_ZALLOC_T(pool, grp_destroy_callback);
+ cb->comp = comp;
+ cb->handler = destroy;
+ pj_list_push_back(&glock->destroy_list, cb);
+
+ grp_lock_release(glock);
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_grp_lock_del_handler( pj_grp_lock_t *glock,
+ void *comp,
+ void (*destroy)(void *comp))
+{
+ grp_destroy_callback *cb;
+
+ grp_lock_acquire(glock);
+
+ cb = glock->destroy_list.next;
+ while (cb != &glock->destroy_list) {
+ if (cb->comp == comp && cb->handler == destroy)
+ break;
+ cb = cb->next;
+ }
+
+ if (cb != &glock->destroy_list)
+ pj_list_erase(cb);
+
+ grp_lock_release(glock);
+ return PJ_SUCCESS;
+}
+
+static pj_status_t grp_lock_add_ref(pj_grp_lock_t *glock)
+{
+ pj_atomic_inc(glock->ref_cnt);
+ return PJ_SUCCESS;
+}
+
+static pj_status_t grp_lock_dec_ref(pj_grp_lock_t *glock)
+{
+ int cnt; /* for debugging */
+ if ((cnt=pj_atomic_dec_and_get(glock->ref_cnt)) == 0) {
+ grp_lock_destroy(glock);
+ return PJ_EGONE;
+ }
+ pj_assert(cnt > 0);
+ pj_grp_lock_dump(glock);
+ return PJ_SUCCESS;
+}
+
+#if PJ_GRP_LOCK_DEBUG
+PJ_DEF(pj_status_t) pj_grp_lock_add_ref_dbg(pj_grp_lock_t *glock,
+ const char *file,
+ int line)
+{
+ grp_lock_ref *ref;
+ pj_status_t status;
+
+ pj_enter_critical_section();
+ if (!pj_list_empty(&glock->ref_free_list)) {
+ ref = glock->ref_free_list.next;
+ pj_list_erase(ref);
+ } else {
+ ref = PJ_POOL_ALLOC_T(glock->pool, grp_lock_ref);
+ }
+
+ ref->file = file;
+ ref->line = line;
+ pj_list_push_back(&glock->ref_list, ref);
+
+ pj_leave_critical_section();
+
+ status = grp_lock_add_ref(glock);
+
+ if (status != PJ_SUCCESS) {
+ pj_enter_critical_section();
+ pj_list_erase(ref);
+ pj_list_push_back(&glock->ref_free_list, ref);
+ pj_leave_critical_section();
+ }
+
+ return status;
+}
+
+PJ_DEF(pj_status_t) pj_grp_lock_dec_ref_dbg(pj_grp_lock_t *glock,
+ const char *file,
+ int line)
+{
+ grp_lock_ref *ref;
+
+ pj_enter_critical_section();
+ /* Find the same source file */
+ ref = glock->ref_list.next;
+ while (ref != &glock->ref_list) {
+ if (strcmp(ref->file, file) == 0) {
+ pj_list_erase(ref);
+ pj_list_push_back(&glock->ref_free_list, ref);
+ break;
+ }
+ ref = ref->next;
+ }
+ pj_leave_critical_section();
+
+ if (ref == &glock->ref_list) {
+ PJ_LOG(2,(THIS_FILE, "pj_grp_lock_dec_ref_dbg() could not find "
+ "matching ref for %s", file));
+ }
+
+ return grp_lock_dec_ref(glock);
+}
+#else
+PJ_DEF(pj_status_t) pj_grp_lock_add_ref(pj_grp_lock_t *glock)
+{
+ return grp_lock_add_ref(glock);
+}
+
+PJ_DEF(pj_status_t) pj_grp_lock_dec_ref(pj_grp_lock_t *glock)
+{
+ return grp_lock_dec_ref(glock);
+}
+#endif
+
+PJ_DEF(int) pj_grp_lock_get_ref(pj_grp_lock_t *glock)
+{
+ return pj_atomic_get(glock->ref_cnt);
+}
+
+PJ_DEF(pj_status_t) pj_grp_lock_chain_lock( pj_grp_lock_t *glock,
+ pj_lock_t *lock,
+ int pos)
+{
+ grp_lock_item *lck, *new_lck;
+ int i;
+
+ grp_lock_acquire(glock);
+
+ for (i=0; i<glock->owner_cnt; ++i)
+ pj_lock_acquire(lock);
+
+ lck = glock->lock_list.next;
+ while (lck != &glock->lock_list) {
+ if (lck->prio >= pos)
+ break;
+ lck = lck->next;
+ }
+
+ new_lck = PJ_POOL_ZALLOC_T(glock->pool, grp_lock_item);
+ new_lck->prio = pos;
+ new_lck->lock = lock;
+ pj_list_insert_before(lck, new_lck);
+
+ /* this will also release the new lock */
+ grp_lock_release(glock);
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_grp_lock_unchain_lock( pj_grp_lock_t *glock,
+ pj_lock_t *lock)
+{
+ grp_lock_item *lck;
+
+ grp_lock_acquire(glock);
+
+ lck = glock->lock_list.next;
+ while (lck != &glock->lock_list) {
+ if (lck->lock == lock)
+ break;
+ lck = lck->next;
+ }
+
+ if (lck != &glock->lock_list) {
+ int i;
+
+ pj_list_erase(lck);
+ for (i=0; i<glock->owner_cnt; ++i)
+ pj_lock_release(lck->lock);
+ }
+
+ grp_lock_release(glock);
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(void) pj_grp_lock_dump(pj_grp_lock_t *grp_lock)
+{
+#if PJ_GRP_LOCK_DEBUG
+ grp_lock_ref *ref = grp_lock->ref_list.next;
+ char info_buf[1000];
+ pj_str_t info;
+
+ info.ptr = info_buf;
+ info.slen = 0;
+
+ pj_grp_lock_acquire(grp_lock);
+ pj_enter_critical_section();
+
+ while (ref != &grp_lock->ref_list && info.slen < sizeof(info_buf)) {
+ char *start = info.ptr + info.slen;
+ int max_len = sizeof(info_buf) - info.slen;
+ int len;
+
+ len = pj_ansi_snprintf(start, max_len, "%s:%d ", ref->file, ref->line);
+ if (len < 1 || len > max_len) {
+ len = strlen(ref->file);
+ if (len > max_len - 1)
+ len = max_len - 1;
+
+ memcpy(start, ref->file, len);
+ start[len++] = ' ';
+ }
+
+ info.slen += len;
+
+ ref = ref->next;
+ }
+
+ if (ref != &grp_lock->ref_list) {
+ int i;
+ for (i=0; i<4; ++i)
+ info_buf[sizeof(info_buf)-i-1] = '.';
+ }
+ info.ptr[info.slen-1] = '\0';
+
+ pj_leave_critical_section();
+ pj_grp_lock_release(grp_lock);
+
+ PJ_LOG(4,(THIS_FILE, "Group lock %p, ref_cnt=%d. Reference holders: %s",
+ grp_lock, pj_grp_lock_get_ref(grp_lock), info.ptr));
+#else
+ PJ_UNUSED_ARG(grp_lock);
+#endif
+}
diff --git a/pjlib/src/pj/os_core_unix.c b/pjlib/src/pj/os_core_unix.c
index 7810eb2..ff8ba68 100644
--- a/pjlib/src/pj/os_core_unix.c
+++ b/pjlib/src/pj/os_core_unix.c
@@ -1,4 +1,4 @@
-/* $Id: os_core_unix.c 3999 2012-03-30 07:10:13Z bennylp $ */
+/* $Id: os_core_unix.c 4359 2013-02-21 11:18:36Z bennylp $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -97,7 +97,18 @@ struct pj_sem_t
#if defined(PJ_HAS_EVENT_OBJ) && PJ_HAS_EVENT_OBJ != 0
struct pj_event_t
{
- char obj_name[PJ_MAX_OBJ_NAME];
+ enum event_state {
+ EV_STATE_OFF,
+ EV_STATE_SET,
+ EV_STATE_PULSED
+ } state;
+
+ pj_mutex_t mutex;
+ pthread_cond_t cond;
+
+ pj_bool_t auto_reset;
+ unsigned threads_waiting;
+ unsigned threads_to_release;
};
#endif /* PJ_HAS_EVENT_OBJ */
@@ -1700,13 +1711,48 @@ PJ_DEF(pj_status_t) pj_event_create(pj_pool_t *pool, const char *name,
pj_bool_t manual_reset, pj_bool_t initial,
pj_event_t **ptr_event)
{
- pj_assert(!"Not supported!");
- PJ_UNUSED_ARG(pool);
- PJ_UNUSED_ARG(name);
- PJ_UNUSED_ARG(manual_reset);
- PJ_UNUSED_ARG(initial);
- PJ_UNUSED_ARG(ptr_event);
- return PJ_EINVALIDOP;
+ pj_event_t *event;
+
+ event = PJ_POOL_ALLOC_T(pool, pj_event_t);
+
+ init_mutex(&event->mutex, name, PJ_MUTEX_SIMPLE);
+ pthread_cond_init(&event->cond, 0);
+ event->auto_reset = !manual_reset;
+ event->threads_waiting = 0;
+
+ if (initial) {
+ event->state = EV_STATE_SET;
+ event->threads_to_release = 1;
+ } else {
+ event->state = EV_STATE_OFF;
+ event->threads_to_release = 0;
+ }
+
+ *ptr_event = event;
+ return PJ_SUCCESS;
+}
+
+static void event_on_one_release(pj_event_t *event)
+{
+ if (event->state == EV_STATE_SET) {
+ if (event->auto_reset) {
+ event->threads_to_release = 0;
+ event->state = EV_STATE_OFF;
+ } else {
+ /* Manual reset remains on */
+ }
+ } else {
+ if (event->auto_reset) {
+ /* Only release one */
+ event->threads_to_release = 0;
+ event->state = EV_STATE_OFF;
+ } else {
+ event->threads_to_release--;
+ pj_assert(event->threads_to_release >= 0);
+ if (event->threads_to_release==0)
+ event->state = EV_STATE_OFF;
+ }
+ }
}
/*
@@ -1714,8 +1760,14 @@ PJ_DEF(pj_status_t) pj_event_create(pj_pool_t *pool, const char *name,
*/
PJ_DEF(pj_status_t) pj_event_wait(pj_event_t *event)
{
- PJ_UNUSED_ARG(event);
- return PJ_EINVALIDOP;
+ pthread_mutex_lock(&event->mutex.mutex);
+ event->threads_waiting++;
+ while (event->state == EV_STATE_OFF)
+ pthread_cond_wait(&event->cond, &event->mutex.mutex);
+ event->threads_waiting--;
+ event_on_one_release(event);
+ pthread_mutex_unlock(&event->mutex.mutex);
+ return PJ_SUCCESS;
}
/*
@@ -1723,8 +1775,16 @@ PJ_DEF(pj_status_t) pj_event_wait(pj_event_t *event)
*/
PJ_DEF(pj_status_t) pj_event_trywait(pj_event_t *event)
{
- PJ_UNUSED_ARG(event);
- return PJ_EINVALIDOP;
+ pj_status_t status;
+
+ pthread_mutex_lock(&event->mutex.mutex);
+ status = event->state != EV_STATE_OFF ? PJ_SUCCESS : -1;
+ if (status==PJ_SUCCESS) {
+ event_on_one_release(event);
+ }
+ pthread_mutex_unlock(&event->mutex.mutex);
+
+ return status;
}
/*
@@ -1732,8 +1792,15 @@ PJ_DEF(pj_status_t) pj_event_trywait(pj_event_t *event)
*/
PJ_DEF(pj_status_t) pj_event_set(pj_event_t *event)
{
- PJ_UNUSED_ARG(event);
- return PJ_EINVALIDOP;
+ pthread_mutex_lock(&event->mutex.mutex);
+ event->threads_to_release = 1;
+ event->state = EV_STATE_SET;
+ if (event->auto_reset)
+ pthread_cond_signal(&event->cond);
+ else
+ pthread_cond_broadcast(&event->cond);
+ pthread_mutex_unlock(&event->mutex.mutex);
+ return PJ_SUCCESS;
}
/*
@@ -1741,8 +1808,18 @@ PJ_DEF(pj_status_t) pj_event_set(pj_event_t *event)
*/
PJ_DEF(pj_status_t) pj_event_pulse(pj_event_t *event)
{
- PJ_UNUSED_ARG(event);
- return PJ_EINVALIDOP;
+ pthread_mutex_lock(&event->mutex.mutex);
+ if (event->threads_waiting) {
+ event->threads_to_release = event->auto_reset ? 1 :
+ event->threads_waiting;
+ event->state = EV_STATE_PULSED;
+ if (event->threads_to_release==1)
+ pthread_cond_signal(&event->cond);
+ else
+ pthread_cond_broadcast(&event->cond);
+ }
+ pthread_mutex_unlock(&event->mutex.mutex);
+ return PJ_SUCCESS;
}
/*
@@ -1750,8 +1827,11 @@ PJ_DEF(pj_status_t) pj_event_pulse(pj_event_t *event)
*/
PJ_DEF(pj_status_t) pj_event_reset(pj_event_t *event)
{
- PJ_UNUSED_ARG(event);
- return PJ_EINVALIDOP;
+ pthread_mutex_lock(&event->mutex.mutex);
+ event->state = EV_STATE_OFF;
+ event->threads_to_release = 0;
+ pthread_mutex_unlock(&event->mutex.mutex);
+ return PJ_SUCCESS;
}
/*
@@ -1759,8 +1839,9 @@ PJ_DEF(pj_status_t) pj_event_reset(pj_event_t *event)
*/
PJ_DEF(pj_status_t) pj_event_destroy(pj_event_t *event)
{
- PJ_UNUSED_ARG(event);
- return PJ_EINVALIDOP;
+ pj_mutex_destroy(&event->mutex);
+ pthread_cond_destroy(&event->cond);
+ return PJ_SUCCESS;
}
#endif /* PJ_HAS_EVENT_OBJ */
diff --git a/pjlib/src/pj/os_info.c b/pjlib/src/pj/os_info.c
index be67120..1d6e007 100644
--- a/pjlib/src/pj/os_info.c
+++ b/pjlib/src/pj/os_info.c
@@ -1,4 +1,4 @@
-/* $Id: os_info.c 3553 2011-05-05 06:14:19Z nanang $ */
+/* $Id: os_info.c 4411 2013-03-04 04:34:38Z nanang $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -242,6 +242,9 @@ PJ_DEF(const pj_sys_info*) pj_get_sys_info(void)
} else {
si.os_name = pj_str("Unknown");
}
+
+ /* Avoid compile warning on Symbian. */
+ goto get_sdk_info;
}
#endif
diff --git a/pjlib/src/pj/pool.c b/pjlib/src/pj/pool.c
index 9992df7..90443b9 100644
--- a/pjlib/src/pj/pool.c
+++ b/pjlib/src/pj/pool.c
@@ -1,4 +1,4 @@
-/* $Id: pool.c 3553 2011-05-05 06:14:19Z nanang $ */
+/* $Id: pool.c 4298 2012-11-22 05:00:01Z nanang $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -88,7 +88,7 @@ static pj_pool_block *pj_pool_create_block( pj_pool_t *pool, pj_size_t size)
* If no space is available in all the blocks, a new block might be created
* (depending on whether the pool is allowed to resize).
*/
-PJ_DEF(void*) pj_pool_allocate_find(pj_pool_t *pool, unsigned size)
+PJ_DEF(void*) pj_pool_allocate_find(pj_pool_t *pool, pj_size_t size)
{
pj_pool_block *block = pool->block_list.next;
void *p;
@@ -121,7 +121,7 @@ PJ_DEF(void*) pj_pool_allocate_find(pj_pool_t *pool, unsigned size)
if (pool->increment_size <
size + sizeof(pj_pool_block) + PJ_POOL_ALIGNMENT)
{
- unsigned count;
+ pj_size_t count;
count = (size + pool->increment_size + sizeof(pj_pool_block) +
PJ_POOL_ALIGNMENT) /
pool->increment_size;
diff --git a/pjlib/src/pj/pool_caching.c b/pjlib/src/pj/pool_caching.c
index a15c3d9..60659ec 100644
--- a/pjlib/src/pj/pool_caching.c
+++ b/pjlib/src/pj/pool_caching.c
@@ -1,4 +1,4 @@
-/* $Id: pool_caching.c 3553 2011-05-05 06:14:19Z nanang $ */
+/* $Id: pool_caching.c 4298 2012-11-22 05:00:01Z nanang $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -178,7 +178,11 @@ static pj_pool_t* cpool_create_pool(pj_pool_factory *pf,
pj_pool_init_int(pool, name, increment_sz, callback);
/* Update pool manager's free capacity. */
- cp->capacity -= pj_pool_get_capacity(pool);
+ if (cp->capacity > pj_pool_get_capacity(pool)) {
+ cp->capacity -= pj_pool_get_capacity(pool);
+ } else {
+ cp->capacity = 0;
+ }
PJ_LOG(6, (pool->obj_name, "pool reused, size=%u", pool->capacity));
}
@@ -199,7 +203,7 @@ static pj_pool_t* cpool_create_pool(pj_pool_factory *pf,
static void cpool_release_pool( pj_pool_factory *pf, pj_pool_t *pool)
{
pj_caching_pool *cp = (pj_caching_pool*)pf;
- unsigned pool_capacity;
+ pj_size_t pool_capacity;
unsigned i;
PJ_CHECK_STACK();
diff --git a/pjlib/src/pj/sock_bsd.c b/pjlib/src/pj/sock_bsd.c
index ed9fde9..8ed2d5d 100644
--- a/pjlib/src/pj/sock_bsd.c
+++ b/pjlib/src/pj/sock_bsd.c
@@ -1,4 +1,4 @@
-/* $Id: sock_bsd.c 4170 2012-06-19 07:40:19Z bennylp $ */
+/* $Id: sock_bsd.c 4233 2012-08-21 11:16:06Z ming $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -723,7 +723,6 @@ PJ_DEF(pj_status_t) pj_sock_recvfrom(pj_sock_t sock,
{
PJ_CHECK_STACK();
PJ_ASSERT_RETURN(buf && len, PJ_EINVAL);
- PJ_ASSERT_RETURN(from && fromlen, (*len=-1, PJ_EINVAL));
*len = recvfrom(sock, (char*)buf, *len, flags,
(struct sockaddr*)from, (socklen_t*)fromlen);
@@ -731,7 +730,9 @@ PJ_DEF(pj_status_t) pj_sock_recvfrom(pj_sock_t sock,
if (*len < 0)
return PJ_RETURN_OS_ERROR(pj_get_native_netos_error());
else {
- PJ_SOCKADDR_RESET_LEN(from);
+ if (from) {
+ PJ_SOCKADDR_RESET_LEN(from);
+ }
return PJ_SUCCESS;
}
}
diff --git a/pjlib/src/pj/sock_common.c b/pjlib/src/pj/sock_common.c
index dd2ef6e..1528b5f 100644
--- a/pjlib/src/pj/sock_common.c
+++ b/pjlib/src/pj/sock_common.c
@@ -1,4 +1,4 @@
-/* $Id: sock_common.c 3841 2011-10-24 09:28:13Z ming $ */
+/* $Id: sock_common.c 4343 2013-02-07 09:35:34Z nanang $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -24,6 +24,7 @@
#include <pj/ip_helper.h>
#include <pj/os.h>
#include <pj/addr_resolv.h>
+#include <pj/rand.h>
#include <pj/string.h>
#include <pj/compat/socket.h>
@@ -956,42 +957,54 @@ PJ_DEF(pj_status_t) pj_gethostip(int af, pj_sockaddr *addr)
return PJ_SUCCESS;
}
-/* Get the default IP interface */
-PJ_DEF(pj_status_t) pj_getdefaultipinterface(int af, pj_sockaddr *addr)
+/* Get IP interface for sending to the specified destination */
+PJ_DEF(pj_status_t) pj_getipinterface(int af,
+ const pj_str_t *dst,
+ pj_sockaddr *itf_addr,
+ pj_bool_t allow_resolve,
+ pj_sockaddr *p_dst_addr)
{
+ pj_sockaddr dst_addr;
pj_sock_t fd;
- pj_str_t cp;
- pj_sockaddr a;
int len;
pj_uint8_t zero[64];
pj_status_t status;
- addr->addr.sa_family = (pj_uint16_t)af;
-
- status = pj_sock_socket(af, pj_SOCK_DGRAM(), 0, &fd);
+ pj_sockaddr_init(af, &dst_addr, NULL, 53);
+ status = pj_inet_pton(af, dst, pj_sockaddr_get_addr(&dst_addr));
if (status != PJ_SUCCESS) {
- return status;
- }
+ /* "dst" is not an IP address. */
+ if (allow_resolve) {
+ status = pj_sockaddr_init(af, &dst_addr, dst, 53);
+ } else {
+ pj_str_t cp;
- if (af == PJ_AF_INET) {
- cp = pj_str("1.1.1.1");
- } else {
- cp = pj_str("1::1");
+ if (af == PJ_AF_INET) {
+ cp = pj_str("1.1.1.1");
+ } else {
+ cp = pj_str("1::1");
+ }
+ status = pj_sockaddr_init(af, &dst_addr, &cp, 53);
+ }
+
+ if (status != PJ_SUCCESS)
+ return status;
}
- status = pj_sockaddr_init(af, &a, &cp, 53);
+
+ /* Create UDP socket and connect() to the destination IP */
+ status = pj_sock_socket(af, pj_SOCK_DGRAM(), 0, &fd);
if (status != PJ_SUCCESS) {
- pj_sock_close(fd);
return status;
}
- status = pj_sock_connect(fd, &a, pj_sockaddr_get_len(&a));
+ status = pj_sock_connect(fd, &dst_addr, pj_sockaddr_get_len(&dst_addr));
if (status != PJ_SUCCESS) {
pj_sock_close(fd);
return status;
}
- len = sizeof(a);
- status = pj_sock_getsockname(fd, &a, &len);
+ len = sizeof(*itf_addr);
+ status = pj_sock_getsockname(fd, itf_addr, &len);
if (status != PJ_SUCCESS) {
pj_sock_close(fd);
return status;
@@ -1001,18 +1014,70 @@ PJ_DEF(pj_status_t) pj_getdefaultipinterface(int af, pj_sockaddr *addr)
/* Check that the address returned is not zero */
pj_bzero(zero, sizeof(zero));
- if (pj_memcmp(pj_sockaddr_get_addr(&a), zero,
- pj_sockaddr_get_addr_len(&a))==0)
+ if (pj_memcmp(pj_sockaddr_get_addr(itf_addr), zero,
+ pj_sockaddr_get_addr_len(itf_addr))==0)
{
return PJ_ENOTFOUND;
}
- pj_sockaddr_copy_addr(addr, &a);
+ if (p_dst_addr)
+ *p_dst_addr = dst_addr;
- /* Success */
return PJ_SUCCESS;
}
+/* Get the default IP interface */
+PJ_DEF(pj_status_t) pj_getdefaultipinterface(int af, pj_sockaddr *addr)
+{
+ pj_str_t cp;
+
+ if (af == PJ_AF_INET) {
+ cp = pj_str("1.1.1.1");
+ } else {
+ cp = pj_str("1::1");
+ }
+
+ return pj_getipinterface(af, &cp, addr, PJ_FALSE, NULL);
+}
+
+
+/*
+ * Bind socket at random port.
+ */
+PJ_DEF(pj_status_t) pj_sock_bind_random( pj_sock_t sockfd,
+ const pj_sockaddr_t *addr,
+ pj_uint16_t port_range,
+ pj_uint16_t max_try)
+{
+ pj_sockaddr bind_addr;
+ int addr_len;
+ pj_uint16_t base_port;
+ pj_status_t status = PJ_SUCCESS;
+
+ PJ_CHECK_STACK();
+
+ PJ_ASSERT_RETURN(addr, PJ_EINVAL);
+
+ pj_sockaddr_cp(&bind_addr, addr);
+ addr_len = pj_sockaddr_get_len(addr);
+ base_port = pj_sockaddr_get_port(addr);
+
+ if (base_port == 0 || port_range == 0) {
+ return pj_sock_bind(sockfd, &bind_addr, addr_len);
+ }
+
+ for (; max_try; --max_try) {
+ pj_uint16_t port;
+ port = (pj_uint16_t)(base_port + pj_rand() % (port_range + 1));
+ pj_sockaddr_set_port(&bind_addr, port);
+ status = pj_sock_bind(sockfd, &bind_addr, addr_len);
+ if (status == PJ_SUCCESS)
+ break;
+ }
+
+ return status;
+}
+
/* Only need to implement these in DLL build */
#if defined(PJ_DLL)
diff --git a/pjlib/src/pj/ssl_sock_ossl.c b/pjlib/src/pj/ssl_sock_ossl.c
index 82ad2a5..4b1d0a7 100644
--- a/pjlib/src/pj/ssl_sock_ossl.c
+++ b/pjlib/src/pj/ssl_sock_ossl.c
@@ -1,4 +1,4 @@
-/* $Id: ssl_sock_ossl.c 4146 2012-05-30 06:35:59Z nanang $ */
+/* $Id: ssl_sock_ossl.c 4349 2013-02-14 09:38:31Z nanang $ */
/*
* Copyright (C) 2009-2011 Teluu Inc. (http://www.teluu.com)
*
@@ -105,9 +105,10 @@ typedef struct read_data_t
ssock->param.read_buffer_size)
/*
- * Structure of SSL socket write buffer.
+ * Structure of SSL socket write data.
*/
typedef struct write_data_t {
+ PJ_DECL_LIST_MEMBER(struct write_data_t);
pj_ioqueue_op_key_t key;
pj_size_t record_len;
pj_ioqueue_op_key_t *app_key;
@@ -121,23 +122,14 @@ typedef struct write_data_t {
} write_data_t;
/*
- * Structure of SSL socket write state.
+ * Structure of SSL socket write buffer (circular buffer).
*/
-typedef struct write_state_t {
+typedef struct send_buf_t {
char *buf;
pj_size_t max_len;
char *start;
pj_size_t len;
- write_data_t *last_data;
-} write_state_t;
-
-/*
- * Structure of write data pending.
- */
-typedef struct write_pending_t {
- PJ_DECL_LIST_MEMBER(struct write_pending_t);
- write_data_t data;
-} write_pending_t;
+} send_buf_t;
/*
* Secure socket structure definition.
@@ -173,10 +165,12 @@ struct pj_ssl_sock_t
void **asock_rbuf;
read_data_t *ssock_rbuf;
- write_state_t write_state;
- write_pending_t write_pending;
- write_pending_t write_pending_empty;
- pj_lock_t *write_mutex; /* protect write BIO and write_state */
+ write_data_t write_pending;/* list of pending write to OpenSSL */
+ write_data_t write_pending_empty; /* cache for write_pending */
+ pj_bool_t flushing_write_pend; /* flag of flushing is ongoing*/
+ send_buf_t send_buf;
+ write_data_t send_pending; /* list of pending write to network */
+ pj_lock_t *write_mutex; /* protect write BIO and send_buf */
SSL_CTX *ossl_ctx;
SSL *ossl_ssl;
@@ -197,6 +191,8 @@ struct pj_ssl_cert_t
};
+static write_data_t* alloc_send_data(pj_ssl_sock_t *ssock, pj_size_t len);
+static void free_send_data(pj_ssl_sock_t *ssock, write_data_t *wdata);
static pj_status_t flush_delayed_send(pj_ssl_sock_t *ssock);
/*
@@ -1054,6 +1050,179 @@ static pj_bool_t on_handshake_complete(pj_ssl_sock_t *ssock,
return PJ_TRUE;
}
+static write_data_t* alloc_send_data(pj_ssl_sock_t *ssock, pj_size_t len)
+{
+ send_buf_t *send_buf = &ssock->send_buf;
+ pj_size_t avail_len, skipped_len = 0;
+ char *reg1, *reg2;
+ pj_size_t reg1_len, reg2_len;
+ write_data_t *p;
+
+ /* Check buffer availability */
+ avail_len = send_buf->max_len - send_buf->len;
+ if (avail_len < len)
+ return NULL;
+
+ /* If buffer empty, reset start pointer and return it */
+ if (send_buf->len == 0) {
+ send_buf->start = send_buf->buf;
+ send_buf->len = len;
+ p = (write_data_t*)send_buf->start;
+ goto init_send_data;
+ }
+
+ /* Free space may be wrapped/splitted into two regions, so let's
+ * analyze them if any region can hold the write data.
+ */
+ reg1 = send_buf->start + send_buf->len;
+ if (reg1 >= send_buf->buf + send_buf->max_len)
+ reg1 -= send_buf->max_len;
+ reg1_len = send_buf->max_len - send_buf->len;
+ if (reg1 + reg1_len > send_buf->buf + send_buf->max_len) {
+ reg1_len = send_buf->buf + send_buf->max_len - reg1;
+ reg2 = send_buf->buf;
+ reg2_len = send_buf->start - send_buf->buf;
+ } else {
+ reg2 = NULL;
+ reg2_len = 0;
+ }
+
+ /* More buffer availability check, note that the write data must be in
+ * a contigue buffer.
+ */
+ avail_len = PJ_MAX(reg1_len, reg2_len);
+ if (avail_len < len)
+ return NULL;
+
+ /* Get the data slot */
+ if (reg1_len >= len) {
+ p = (write_data_t*)reg1;
+ } else {
+ p = (write_data_t*)reg2;
+ skipped_len = reg1_len;
+ }
+
+ /* Update buffer length */
+ send_buf->len += len + skipped_len;
+
+init_send_data:
+ /* Init the new send data */
+ pj_bzero(p, sizeof(*p));
+ pj_list_init(p);
+ pj_list_push_back(&ssock->send_pending, p);
+
+ return p;
+}
+
+static void free_send_data(pj_ssl_sock_t *ssock, write_data_t *wdata)
+{
+ send_buf_t *buf = &ssock->send_buf;
+ write_data_t *spl = &ssock->send_pending;
+
+ pj_assert(!pj_list_empty(&ssock->send_pending));
+
+ /* Free slot from the buffer */
+ if (spl->next == wdata && spl->prev == wdata) {
+ /* This is the only data, reset the buffer */
+ buf->start = buf->buf;
+ buf->len = 0;
+ } else if (spl->next == wdata) {
+ /* This is the first data, shift start pointer of the buffer and
+ * adjust the buffer length.
+ */
+ buf->start = (char*)wdata->next;
+ if (wdata->next > wdata) {
+ buf->len -= ((char*)wdata->next - buf->start);
+ } else {
+ /* Overlapped */
+ unsigned right_len, left_len;
+ right_len = buf->buf + buf->max_len - (char*)wdata;
+ left_len = (char*)wdata->next - buf->buf;
+ buf->len -= (right_len + left_len);
+ }
+ } else if (spl->prev == wdata) {
+ /* This is the last data, just adjust the buffer length */
+ if (wdata->prev < wdata) {
+ unsigned jump_len;
+ jump_len = (char*)wdata -
+ ((char*)wdata->prev + wdata->prev->record_len);
+ buf->len -= (wdata->record_len + jump_len);
+ } else {
+ /* Overlapped */
+ unsigned right_len, left_len;
+ right_len = buf->buf + buf->max_len -
+ ((char*)wdata->prev + wdata->prev->record_len);
+ left_len = (char*)wdata + wdata->record_len - buf->buf;
+ buf->len -= (right_len + left_len);
+ }
+ }
+ /* For data in the middle buffer, just do nothing on the buffer. The slot
+ * will be freed later when freeing the first/last data.
+ */
+
+ /* Remove the data from send pending list */
+ pj_list_erase(wdata);
+}
+
+#if 0
+/* Just for testing send buffer alloc/free */
+#include <pj/rand.h>
+pj_status_t pj_ssl_sock_ossl_test_send_buf(pj_pool_t *pool)
+{
+ enum { MAX_CHUNK_NUM = 20 };
+ unsigned chunk_size, chunk_cnt, i;
+ write_data_t *wdata[MAX_CHUNK_NUM] = {0};
+ pj_time_val now;
+ pj_ssl_sock_t *ssock = NULL;
+ pj_ssl_sock_param param;
+ pj_status_t status;
+
+ pj_gettimeofday(&now);
+ pj_srand((unsigned)now.sec);
+
+ pj_ssl_sock_param_default(&param);
+ status = pj_ssl_sock_create(pool, &param, &ssock);
+ if (status != PJ_SUCCESS) {
+ return status;
+ }
+
+ if (ssock->send_buf.max_len == 0) {
+ ssock->send_buf.buf = (char*)
+ pj_pool_alloc(ssock->pool,
+ ssock->param.send_buffer_size);
+ ssock->send_buf.max_len = ssock->param.send_buffer_size;
+ ssock->send_buf.start = ssock->send_buf.buf;
+ ssock->send_buf.len = 0;
+ }
+
+ chunk_size = ssock->param.send_buffer_size / MAX_CHUNK_NUM / 2;
+ chunk_cnt = 0;
+ for (i = 0; i < MAX_CHUNK_NUM; i++) {
+ wdata[i] = alloc_send_data(ssock, pj_rand() % chunk_size + 321);
+ if (wdata[i])
+ chunk_cnt++;
+ else
+ break;
+ }
+
+ while (chunk_cnt) {
+ i = pj_rand() % MAX_CHUNK_NUM;
+ if (wdata[i]) {
+ free_send_data(ssock, wdata[i]);
+ wdata[i] = NULL;
+ chunk_cnt--;
+ }
+ }
+
+ if (ssock->send_buf.len != 0)
+ status = PJ_EBUG;
+
+ pj_ssl_sock_close(ssock);
+ return status;
+}
+#endif
+
+
/* Flush write BIO to network socket. Note that any access to write BIO
* MUST be serialized, so mutex protection must cover any call to OpenSSL
* API (that possibly generate data for write BIO) along with the call to
@@ -1067,76 +1236,39 @@ static pj_status_t flush_write_bio(pj_ssl_sock_t *ssock,
{
char *data;
pj_ssize_t len;
-
- write_state_t *write_st = &ssock->write_state;
write_data_t *wdata;
- pj_size_t avail_len, needed_len, skipped_len = 0;
+ pj_size_t needed_len;
pj_status_t status;
+ pj_lock_acquire(ssock->write_mutex);
+
/* Check if there is data in write BIO, flush it if any */
- if (!BIO_pending(ssock->ossl_wbio))
+ if (!BIO_pending(ssock->ossl_wbio)) {
+ pj_lock_release(ssock->write_mutex);
return PJ_SUCCESS;
+ }
/* Get data and its length */
len = BIO_get_mem_data(ssock->ossl_wbio, &data);
- if (len == 0)
+ if (len == 0) {
+ pj_lock_release(ssock->write_mutex);
return PJ_SUCCESS;
+ }
/* Calculate buffer size needed, and align it to 8 */
needed_len = len + sizeof(write_data_t);
needed_len = ((needed_len + 7) >> 3) << 3;
- /* Check buffer availability */
- avail_len = write_st->max_len - write_st->len;
- if (avail_len < needed_len)
+ /* Allocate buffer for send data */
+ wdata = alloc_send_data(ssock, needed_len);
+ if (wdata == NULL) {
+ pj_lock_release(ssock->write_mutex);
return PJ_ENOMEM;
-
- /* More buffer availability check, note that the write data must be in
- * a contigue buffer.
- */
- if (write_st->len == 0) {
-
- write_st->start = write_st->buf;
- wdata = (write_data_t*)write_st->start;
-
- } else {
-
- char *reg1, *reg2;
- pj_size_t reg1_len, reg2_len;
-
- /* Unused slots may be wrapped/splitted into two regions, so let's
- * analyze them if any region can hold the write data.
- */
- reg1 = write_st->start + write_st->len;
- if (reg1 >= write_st->buf + write_st->max_len)
- reg1 -= write_st->max_len;
- reg1_len = write_st->max_len - write_st->len;
- if (reg1 + reg1_len > write_st->buf + write_st->max_len) {
- reg1_len = write_st->buf + write_st->max_len - reg1;
- reg2 = write_st->buf;
- reg2_len = write_st->start - write_st->buf;
- } else {
- reg2 = NULL;
- reg2_len = 0;
- }
- avail_len = PJ_MAX(reg1_len, reg2_len);
- if (avail_len < needed_len)
- return PJ_ENOMEM;
-
- /* Get write data pointer and update buffer length */
- if (reg1_len >= needed_len) {
- wdata = (write_data_t*)reg1;
- } else {
- wdata = (write_data_t*)reg2;
- /* Unused slot in region 1 is skipped as current write data
- * doesn't fit it.
- */
- skipped_len = reg1_len;
- }
}
- /* Copy the data and set its properties into the buffer */
- pj_bzero(wdata, sizeof(write_data_t));
+ /* Copy the data and set its properties into the send data */
+ pj_ioqueue_op_key_init(&wdata->key, sizeof(pj_ioqueue_op_key_t));
+ wdata->key.user_data = wdata;
wdata->app_key = send_key;
wdata->record_len = needed_len;
wdata->data_len = len;
@@ -1144,6 +1276,12 @@ static pj_status_t flush_write_bio(pj_ssl_sock_t *ssock,
wdata->flags = flags;
pj_memcpy(&wdata->data, data, len);
+ /* Reset write BIO */
+ BIO_reset(ssock->ossl_wbio);
+
+ /* Ticket #1573: Don't hold mutex while calling PJLIB socket send(). */
+ pj_lock_release(ssock->write_mutex);
+
/* Send it */
if (ssock->param.sock_type == pj_SOCK_STREAM()) {
status = pj_activesock_send(ssock->asock, &wdata->key,
@@ -1157,24 +1295,13 @@ static pj_status_t flush_write_bio(pj_ssl_sock_t *ssock,
ssock->addr_len);
}
- /* Oh no, EWOULDBLOCK! */
- if (status == PJ_STATUS_FROM_OS(OSERR_EWOULDBLOCK)) {
- /* Just return PJ_SUCCESS here, the pending data will be sent in next
- * call of this function since the data is still stored in write BIO.
+ if (status != PJ_EPENDING) {
+ /* When the sending is not pending, remove the wdata from send
+ * pending list.
*/
- return PJ_SUCCESS;
- }
-
- /* Reset write BIO after flushed */
- BIO_reset(ssock->ossl_wbio);
-
- if (status == PJ_EPENDING) {
- /* Update write state */
- pj_assert(skipped_len==0 || write_st->last_data);
- write_st->len += needed_len + skipped_len;
- if (write_st->last_data)
- write_st->last_data->record_len += skipped_len;
- write_st->last_data = wdata;
+ pj_lock_acquire(ssock->write_mutex);
+ free_send_data(ssock, wdata);
+ pj_lock_release(ssock->write_mutex);
}
return status;
@@ -1213,22 +1340,19 @@ static pj_status_t do_handshake(pj_ssl_sock_t *ssock)
pj_status_t status;
int err;
- pj_lock_acquire(ssock->write_mutex);
-
/* Perform SSL handshake */
+ pj_lock_acquire(ssock->write_mutex);
err = SSL_do_handshake(ssock->ossl_ssl);
+ pj_lock_release(ssock->write_mutex);
/* SSL_do_handshake() may put some pending data into SSL write BIO,
* flush it if any.
*/
status = flush_write_bio(ssock, &ssock->handshake_op_key, 0, 0);
if (status != PJ_SUCCESS && status != PJ_EPENDING) {
- pj_lock_release(ssock->write_mutex);
return status;
}
- pj_lock_release(ssock->write_mutex);
-
if (err < 0) {
err = SSL_get_error(ssock->ossl_ssl, err);
if (err != SSL_ERROR_NONE && err != SSL_ERROR_WANT_READ)
@@ -1356,9 +1480,15 @@ static pj_bool_t asock_on_data_read (pj_activesock_t *asock,
/* Update certificates */
update_certs_info(ssock);
- pj_lock_acquire(ssock->write_mutex);
+ // Ticket #1573: Don't hold mutex while calling
+ // PJLIB socket send().
+ //pj_lock_acquire(ssock->write_mutex);
status = flush_delayed_send(ssock);
- pj_lock_release(ssock->write_mutex);
+ //pj_lock_release(ssock->write_mutex);
+
+ /* If flushing is ongoing, treat it as success */
+ if (status == PJ_EBUSY)
+ status = PJ_SUCCESS;
if (status != PJ_SUCCESS && status != PJ_EPENDING) {
PJ_PERROR(1,(ssock->pool->obj_name, status,
@@ -1418,11 +1548,14 @@ static pj_bool_t asock_on_data_sent (pj_activesock_t *asock,
} else if (send_key != &ssock->handshake_op_key) {
/* Some data has been sent, notify application */
- write_data_t *wdata = (write_data_t*)send_key;
+ write_data_t *wdata = (write_data_t*)send_key->user_data;
if (ssock->param.cb.on_data_sent) {
pj_bool_t ret;
+ pj_ssize_t sent_len;
+
+ sent_len = (sent > 0)? wdata->plain_data_len : sent;
ret = (*ssock->param.cb.on_data_sent)(ssock, wdata->app_key,
- wdata->plain_data_len);
+ sent_len);
if (!ret) {
/* We've been destroyed */
return PJ_FALSE;
@@ -1431,12 +1564,7 @@ static pj_bool_t asock_on_data_sent (pj_activesock_t *asock,
/* Update write buffer state */
pj_lock_acquire(ssock->write_mutex);
- ssock->write_state.start += wdata->record_len;
- ssock->write_state.len -= wdata->record_len;
- if (ssock->write_state.last_data == wdata) {
- pj_assert(ssock->write_state.len == 0);
- ssock->write_state.last_data = NULL;
- }
+ free_send_data(ssock, wdata);
pj_lock_release(ssock->write_mutex);
} else {
@@ -1547,13 +1675,13 @@ static pj_bool_t asock_on_accept_complete (pj_activesock_t *asock,
goto on_return;
/* Prepare write/send state */
- pj_assert(ssock->write_state.max_len == 0);
- ssock->write_state.buf = (char*)
- pj_pool_alloc(ssock->pool,
- ssock->param.send_buffer_size);
- ssock->write_state.max_len = ssock->param.send_buffer_size;
- ssock->write_state.start = ssock->write_state.buf;
- ssock->write_state.len = 0;
+ pj_assert(ssock->send_buf.max_len == 0);
+ ssock->send_buf.buf = (char*)
+ pj_pool_alloc(ssock->pool,
+ ssock->param.send_buffer_size);
+ ssock->send_buf.max_len = ssock->param.send_buffer_size;
+ ssock->send_buf.start = ssock->send_buf.buf;
+ ssock->send_buf.len = 0;
/* Start handshake timer */
if (ssock->param.timer_heap && (ssock->param.timeout.sec != 0 ||
@@ -1626,13 +1754,13 @@ static pj_bool_t asock_on_connect_complete (pj_activesock_t *asock,
goto on_return;
/* Prepare write/send state */
- pj_assert(ssock->write_state.max_len == 0);
- ssock->write_state.buf = (char*)
+ pj_assert(ssock->send_buf.max_len == 0);
+ ssock->send_buf.buf = (char*)
pj_pool_alloc(ssock->pool,
ssock->param.send_buffer_size);
- ssock->write_state.max_len = ssock->param.send_buffer_size;
- ssock->write_state.start = ssock->write_state.buf;
- ssock->write_state.len = 0;
+ ssock->send_buf.max_len = ssock->param.send_buffer_size;
+ ssock->send_buf.start = ssock->send_buf.buf;
+ ssock->send_buf.len = 0;
#ifdef SSL_set_tlsext_host_name
/* Set server name to connect */
@@ -1805,7 +1933,10 @@ PJ_DEF(pj_status_t) pj_ssl_sock_create (pj_pool_t *pool,
ssock->ssl_state = SSL_STATE_NULL;
pj_list_init(&ssock->write_pending);
pj_list_init(&ssock->write_pending_empty);
+ pj_list_init(&ssock->send_pending);
pj_timer_entry_init(&ssock->timer, 0, ssock, &on_timer);
+ pj_ioqueue_op_key_init(&ssock->handshake_op_key,
+ sizeof(pj_ioqueue_op_key_t));
/* Create secure socket mutex */
status = pj_lock_create_recursive_mutex(pool, pool->obj_name,
@@ -2038,10 +2169,7 @@ PJ_DEF(pj_status_t) pj_ssl_sock_start_recvfrom2 (pj_ssl_sock_t *ssock,
return PJ_ENOTSUP;
}
-/* Write plain data to SSL and flush write BIO. Note that accessing
- * write BIO must be serialized, so a call to this function must be
- * protected by write mutex of SSL socket.
- */
+/* Write plain data to SSL and flush write BIO. */
static pj_status_t ssl_write(pj_ssl_sock_t *ssock,
pj_ioqueue_op_key_t *send_key,
const void *data,
@@ -2056,7 +2184,9 @@ static pj_status_t ssl_write(pj_ssl_sock_t *ssock,
* negotitation may be on progress, so sending data should be delayed
* until re-negotiation is completed.
*/
+ pj_lock_acquire(ssock->write_mutex);
nwritten = SSL_write(ssock->ossl_ssl, data, size);
+ pj_lock_release(ssock->write_mutex);
if (nwritten == size) {
/* All data written, flush write BIO to network socket */
@@ -2087,56 +2217,81 @@ static pj_status_t ssl_write(pj_ssl_sock_t *ssock,
return status;
}
-/* Flush delayed data sending in the write pending list. Note that accessing
- * write pending list must be serialized, so a call to this function must be
- * protected by write mutex of SSL socket.
- */
+/* Flush delayed data sending in the write pending list. */
static pj_status_t flush_delayed_send(pj_ssl_sock_t *ssock)
{
+ /* Check for another ongoing flush */
+ if (ssock->flushing_write_pend)
+ return PJ_EBUSY;
+
+ pj_lock_acquire(ssock->write_mutex);
+
+ /* Again, check for another ongoing flush */
+ if (ssock->flushing_write_pend) {
+ pj_lock_release(ssock->write_mutex);
+ return PJ_EBUSY;
+ }
+
+ /* Set ongoing flush flag */
+ ssock->flushing_write_pend = PJ_TRUE;
+
while (!pj_list_empty(&ssock->write_pending)) {
- write_pending_t *wp;
+ write_data_t *wp;
pj_status_t status;
wp = ssock->write_pending.next;
- status = ssl_write(ssock, &wp->data.key, wp->data.data.ptr,
- wp->data.plain_data_len, wp->data.flags);
- if (status != PJ_SUCCESS)
+ /* Ticket #1573: Don't hold mutex while calling socket send. */
+ pj_lock_release(ssock->write_mutex);
+
+ status = ssl_write(ssock, &wp->key, wp->data.ptr,
+ wp->plain_data_len, wp->flags);
+ if (status != PJ_SUCCESS) {
+ /* Reset ongoing flush flag first. */
+ ssock->flushing_write_pend = PJ_FALSE;
return status;
+ }
+ pj_lock_acquire(ssock->write_mutex);
pj_list_erase(wp);
pj_list_push_back(&ssock->write_pending_empty, wp);
}
+ /* Reset ongoing flush flag */
+ ssock->flushing_write_pend = PJ_FALSE;
+
+ pj_lock_release(ssock->write_mutex);
+
return PJ_SUCCESS;
}
-/* Sending is delayed, push back the sending data into pending list. Note that
- * accessing write pending list must be serialized, so a call to this function
- * must be protected by write mutex of SSL socket.
- */
+/* Sending is delayed, push back the sending data into pending list. */
static pj_status_t delay_send (pj_ssl_sock_t *ssock,
pj_ioqueue_op_key_t *send_key,
const void *data,
pj_ssize_t size,
unsigned flags)
{
- write_pending_t *wp;
+ write_data_t *wp;
+
+ pj_lock_acquire(ssock->write_mutex);
/* Init write pending instance */
if (!pj_list_empty(&ssock->write_pending_empty)) {
wp = ssock->write_pending_empty.next;
pj_list_erase(wp);
} else {
- wp = PJ_POOL_ZALLOC_T(ssock->pool, write_pending_t);
+ wp = PJ_POOL_ZALLOC_T(ssock->pool, write_data_t);
}
- wp->data.app_key = send_key;
- wp->data.plain_data_len = size;
- wp->data.data.ptr = data;
- wp->data.flags = flags;
+ wp->app_key = send_key;
+ wp->plain_data_len = size;
+ wp->data.ptr = data;
+ wp->flags = flags;
pj_list_push_back(&ssock->write_pending, wp);
+
+ pj_lock_release(ssock->write_mutex);
/* Must return PJ_EPENDING */
return PJ_EPENDING;
@@ -2156,14 +2311,15 @@ PJ_DEF(pj_status_t) pj_ssl_sock_send (pj_ssl_sock_t *ssock,
PJ_ASSERT_RETURN(ssock && data && size && (*size>0), PJ_EINVAL);
PJ_ASSERT_RETURN(ssock->ssl_state==SSL_STATE_ESTABLISHED, PJ_EINVALIDOP);
- pj_lock_acquire(ssock->write_mutex);
+ // Ticket #1573: Don't hold mutex while calling PJLIB socket send().
+ //pj_lock_acquire(ssock->write_mutex);
/* Flush delayed send first. Sending data might be delayed when
* re-negotiation is on-progress.
*/
status = flush_delayed_send(ssock);
if (status == PJ_EBUSY) {
- /* Re-negotiation is on progress, delay sending */
+ /* Re-negotiation or flushing is on progress, delay sending */
status = delay_send(ssock, send_key, data, *size, flags);
goto on_return;
} else if (status != PJ_SUCCESS) {
@@ -2178,7 +2334,7 @@ PJ_DEF(pj_status_t) pj_ssl_sock_send (pj_ssl_sock_t *ssock,
}
on_return:
- pj_lock_release(ssock->write_mutex);
+ //pj_lock_release(ssock->write_mutex);
return status;
}
diff --git a/pjlib/src/pj/timer.c b/pjlib/src/pj/timer.c
index e78cba3..7951955 100644
--- a/pjlib/src/pj/timer.c
+++ b/pjlib/src/pj/timer.c
@@ -1,4 +1,4 @@
-/* $Id: timer.c 4154 2012-06-05 10:41:17Z bennylp $ */
+/* $Id: timer.c 4359 2013-02-21 11:18:36Z bennylp $ */
/*
* The PJLIB's timer heap is based (or more correctly, copied and modied)
* from ACE library by Douglas C. Schmidt. ACE is an excellent OO framework
@@ -35,6 +35,7 @@
#include <pj/errno.h>
#include <pj/lock.h>
#include <pj/log.h>
+#include <pj/rand.h>
#define THIS_FILE "timer.c"
@@ -451,20 +452,27 @@ PJ_DEF(pj_timer_entry*) pj_timer_entry_init( pj_timer_entry *entry,
entry->id = id;
entry->user_data = user_data;
entry->cb = cb;
+ entry->_grp_lock = NULL;
return entry;
}
#if PJ_TIMER_DEBUG
-PJ_DEF(pj_status_t) pj_timer_heap_schedule_dbg( pj_timer_heap_t *ht,
- pj_timer_entry *entry,
- const pj_time_val *delay,
- const char *src_file,
- int src_line)
+static pj_status_t schedule_w_grp_lock_dbg(pj_timer_heap_t *ht,
+ pj_timer_entry *entry,
+ const pj_time_val *delay,
+ pj_bool_t set_id,
+ int id_val,
+ pj_grp_lock_t *grp_lock,
+ const char *src_file,
+ int src_line)
#else
-PJ_DEF(pj_status_t) pj_timer_heap_schedule( pj_timer_heap_t *ht,
- pj_timer_entry *entry,
- const pj_time_val *delay)
+static pj_status_t schedule_w_grp_lock(pj_timer_heap_t *ht,
+ pj_timer_entry *entry,
+ const pj_time_val *delay,
+ pj_bool_t set_id,
+ int id_val,
+ pj_grp_lock_t *grp_lock)
#endif
{
pj_status_t status;
@@ -485,13 +493,66 @@ PJ_DEF(pj_status_t) pj_timer_heap_schedule( pj_timer_heap_t *ht,
lock_timer_heap(ht);
status = schedule_entry(ht, entry, &expires);
+ if (status == PJ_SUCCESS) {
+ if (set_id)
+ entry->id = id_val;
+ entry->_grp_lock = grp_lock;
+ if (entry->_grp_lock) {
+ pj_grp_lock_add_ref(entry->_grp_lock);
+ }
+ }
unlock_timer_heap(ht);
return status;
}
-PJ_DEF(int) pj_timer_heap_cancel( pj_timer_heap_t *ht,
- pj_timer_entry *entry)
+
+#if PJ_TIMER_DEBUG
+PJ_DEF(pj_status_t) pj_timer_heap_schedule_dbg( pj_timer_heap_t *ht,
+ pj_timer_entry *entry,
+ const pj_time_val *delay,
+ const char *src_file,
+ int src_line)
+{
+ return schedule_w_grp_lock_dbg(ht, entry, delay, PJ_FALSE, 1, NULL,
+ src_file, src_line);
+}
+
+PJ_DEF(pj_status_t) pj_timer_heap_schedule_w_grp_lock_dbg(
+ pj_timer_heap_t *ht,
+ pj_timer_entry *entry,
+ const pj_time_val *delay,
+ int id_val,
+ pj_grp_lock_t *grp_lock,
+ const char *src_file,
+ int src_line)
+{
+ return schedule_w_grp_lock_dbg(ht, entry, delay, PJ_TRUE, id_val,
+ grp_lock, src_file, src_line);
+}
+
+#else
+PJ_DEF(pj_status_t) pj_timer_heap_schedule( pj_timer_heap_t *ht,
+ pj_timer_entry *entry,
+ const pj_time_val *delay)
+{
+ return schedule_w_grp_lock(ht, entry, delay, PJ_FALSE, 1, NULL);
+}
+
+PJ_DEF(pj_status_t) pj_timer_heap_schedule_w_grp_lock(pj_timer_heap_t *ht,
+ pj_timer_entry *entry,
+ const pj_time_val *delay,
+ int id_val,
+ pj_grp_lock_t *grp_lock)
+{
+ return schedule_w_grp_lock(ht, entry, delay, PJ_TRUE, id_val, grp_lock);
+}
+#endif
+
+static int cancel_timer(pj_timer_heap_t *ht,
+ pj_timer_entry *entry,
+ pj_bool_t set_id,
+ int id_val)
{
int count;
@@ -499,11 +560,32 @@ PJ_DEF(int) pj_timer_heap_cancel( pj_timer_heap_t *ht,
lock_timer_heap(ht);
count = cancel(ht, entry, 1);
+ if (set_id) {
+ entry->id = id_val;
+ }
+ if (entry->_grp_lock) {
+ pj_grp_lock_t *grp_lock = entry->_grp_lock;
+ entry->_grp_lock = NULL;
+ pj_grp_lock_dec_ref(grp_lock);
+ }
unlock_timer_heap(ht);
return count;
}
+PJ_DEF(int) pj_timer_heap_cancel( pj_timer_heap_t *ht,
+ pj_timer_entry *entry)
+{
+ return cancel_timer(ht, entry, PJ_FALSE, 0);
+}
+
+PJ_DEF(int) pj_timer_heap_cancel_if_active(pj_timer_heap_t *ht,
+ pj_timer_entry *entry,
+ int id_val)
+{
+ return cancel_timer(ht, entry, PJ_TRUE, id_val);
+}
+
PJ_DEF(unsigned) pj_timer_heap_poll( pj_timer_heap_t *ht,
pj_time_val *next_delay )
{
@@ -512,25 +594,38 @@ PJ_DEF(unsigned) pj_timer_heap_poll( pj_timer_heap_t *ht,
PJ_ASSERT_RETURN(ht, 0);
+ lock_timer_heap(ht);
if (!ht->cur_size && next_delay) {
next_delay->sec = next_delay->msec = PJ_MAXINT32;
+ unlock_timer_heap(ht);
return 0;
}
count = 0;
pj_gettickcount(&now);
- lock_timer_heap(ht);
while ( ht->cur_size &&
PJ_TIME_VAL_LTE(ht->heap[0]->_timer_value, now) &&
count < ht->max_entries_per_poll )
{
pj_timer_entry *node = remove_node(ht, 0);
+ pj_grp_lock_t *grp_lock;
+
++count;
+ grp_lock = node->_grp_lock;
+ node->_grp_lock = NULL;
+
unlock_timer_heap(ht);
+
+ PJ_RACE_ME(5);
+
if (node->cb)
(*node->cb)(ht, node);
+
+ if (grp_lock)
+ pj_grp_lock_dec_ref(grp_lock);
+
lock_timer_heap(ht);
}
if (ht->cur_size && next_delay) {
diff --git a/pjlib/src/pj/timer_symbian.cpp b/pjlib/src/pj/timer_symbian.cpp
index 47aa984..8b18525 100644
--- a/pjlib/src/pj/timer_symbian.cpp
+++ b/pjlib/src/pj/timer_symbian.cpp
@@ -1,4 +1,4 @@
-/* $Id: timer_symbian.cpp 3553 2011-05-05 06:14:19Z nanang $ */
+/* $Id: timer_symbian.cpp 4374 2013-02-27 07:15:57Z riza $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -392,6 +392,24 @@ PJ_DEF(pj_status_t) pj_timer_heap_schedule( pj_timer_heap_t *ht,
return PJ_SUCCESS;
}
+PJ_DEF(pj_status_t) pj_timer_heap_schedule_w_grp_lock(pj_timer_heap_t *ht,
+ pj_timer_entry *entry,
+ const pj_time_val *delay,
+ int id_val,
+ pj_grp_lock_t *grp_lock)
+{
+ pj_status_t status;
+
+ PJ_UNUSED_ARG(grp_lock);
+
+ status = pj_timer_heap_schedule(ht, entry, delay);
+
+ if (status == PJ_SUCCESS)
+ entry->id = id_val;
+
+ return status;
+}
+
PJ_DEF(int) pj_timer_heap_cancel( pj_timer_heap_t *ht,
pj_timer_entry *entry)
{
@@ -411,6 +429,17 @@ PJ_DEF(int) pj_timer_heap_cancel( pj_timer_heap_t *ht,
}
}
+PJ_DEF(int) pj_timer_heap_cancel_if_active(pj_timer_heap_t *ht,
+ pj_timer_entry *entry,
+ int id_val)
+{
+ int count = pj_timer_heap_cancel(ht, entry);
+ if (count == 1)
+ entry->id = id_val;
+
+ return count;
+}
+
PJ_DEF(unsigned) pj_timer_heap_poll( pj_timer_heap_t *ht,
pj_time_val *next_delay )
{
diff --git a/pjlib/src/pjlib-test/activesock.c b/pjlib/src/pjlib-test/activesock.c
index 399cc29..6fe5091 100644
--- a/pjlib/src/pjlib-test/activesock.c
+++ b/pjlib/src/pjlib-test/activesock.c
@@ -1,4 +1,4 @@
-/* $Id: activesock.c 3553 2011-05-05 06:14:19Z nanang $ */
+/* $Id: activesock.c 4238 2012-08-31 06:17:56Z nanang $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -213,6 +213,7 @@ static int udp_ping_pong_test(void)
for (i=0; i<10 && last_rx1 == srv1->rx_cnt && last_rx2 == srv2->rx_cnt; ++i) {
pj_time_val delay = {0, 10};
#ifdef PJ_SYMBIAN
+ PJ_UNUSED_ARG(delay);
pj_symbianos_poll(-1, 100);
#else
pj_ioqueue_poll(ioqueue, &delay);
diff --git a/pjlib/src/pjlib-test/ioq_tcp.c b/pjlib/src/pjlib-test/ioq_tcp.c
index 8ecbc0f..faf0646 100644
--- a/pjlib/src/pjlib-test/ioq_tcp.c
+++ b/pjlib/src/pjlib-test/ioq_tcp.c
@@ -1,4 +1,4 @@
-/* $Id: ioq_tcp.c 3553 2011-05-05 06:14:19Z nanang $ */
+/* $Id: ioq_tcp.c 4238 2012-08-31 06:17:56Z nanang $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -363,7 +363,7 @@ static int compliance_test_0(pj_bool_t allow_concur)
#ifdef PJ_SYMBIAN
callback_call_count = 0;
- pj_symbianos_poll(-1, 1000);
+ pj_symbianos_poll(-1, PJ_TIME_VAL_MSEC(timeout));
status = callback_call_count;
#else
status = pj_ioqueue_poll(ioque, &timeout);
@@ -412,7 +412,7 @@ static int compliance_test_0(pj_bool_t allow_concur)
if (pending_op == 0) {
pj_time_val timeout = {1, 0};
#ifdef PJ_SYMBIAN
- status = pj_symbianos_poll(-1, 1000);
+ status = pj_symbianos_poll(-1, PJ_TIME_VAL_MSEC(timeout));
#else
status = pj_ioqueue_poll(ioque, &timeout);
#endif
@@ -542,7 +542,7 @@ static int compliance_test_1(pj_bool_t allow_concur)
#ifdef PJ_SYMBIAN
callback_call_count = 0;
- pj_symbianos_poll(-1, 1000);
+ pj_symbianos_poll(-1, PJ_TIME_VAL_MSEC(timeout));
status = callback_call_count;
#else
status = pj_ioqueue_poll(ioque, &timeout);
@@ -576,7 +576,7 @@ static int compliance_test_1(pj_bool_t allow_concur)
if (pending_op == 0) {
pj_time_val timeout = {1, 0};
#ifdef PJ_SYMBIAN
- status = pj_symbianos_poll(-1, 1000);
+ status = pj_symbianos_poll(-1, PJ_TIME_VAL_MSEC(timeout));
#else
status = pj_ioqueue_poll(ioque, &timeout);
#endif
@@ -771,7 +771,7 @@ static int compliance_test_2(pj_bool_t allow_concur)
pj_time_val timeout = {1, 0};
#ifdef PJ_SYMBIAN
- status = pj_symbianos_poll(-1, 1000);
+ status = pj_symbianos_poll(-1, PJ_TIME_VAL_MSEC(timeout));
#else
status = pj_ioqueue_poll(ioque, &timeout);
#endif
@@ -797,7 +797,7 @@ static int compliance_test_2(pj_bool_t allow_concur)
if (pending_op == 0) {
pj_time_val timeout = {1, 0};
#ifdef PJ_SYMBIAN
- status = pj_symbianos_poll(-1, 1000);
+ status = pj_symbianos_poll(-1, PJ_TIME_VAL_MSEC(timeout));
#else
status = pj_ioqueue_poll(ioque, &timeout);
#endif
diff --git a/pjlib/src/pjlib-test/ioq_udp.c b/pjlib/src/pjlib-test/ioq_udp.c
index 6928641..8180e3d 100644
--- a/pjlib/src/pjlib-test/ioq_udp.c
+++ b/pjlib/src/pjlib-test/ioq_udp.c
@@ -1,4 +1,4 @@
-/* $Id: ioq_udp.c 3553 2011-05-05 06:14:19Z nanang $ */
+/* $Id: ioq_udp.c 4238 2012-08-31 06:17:56Z nanang $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -267,7 +267,7 @@ static int compliance_test(pj_bool_t allow_concur)
TRACE_("poll...");
#ifdef PJ_SYMBIAN
- rc = pj_symbianos_poll(-1, 5000);
+ rc = pj_symbianos_poll(-1, PJ_TIME_VAL_MSEC(timeout));
#else
rc = pj_ioqueue_poll(ioque, &timeout);
#endif
@@ -783,7 +783,7 @@ static int bench_test(pj_bool_t allow_concur, int bufsize,
do {
pj_time_val timeout = { 1, 0 };
#ifdef PJ_SYMBIAN
- rc = pj_symbianos_poll(-1, 1000);
+ rc = pj_symbianos_poll(-1, PJ_TIME_VAL_MSEC(timeout));
#else
rc = pj_ioqueue_poll(ioque, &timeout);
#endif
@@ -812,6 +812,7 @@ static int bench_test(pj_bool_t allow_concur, int bufsize,
do {
pj_time_val timeout = { 0, 10 };
#ifdef PJ_SYMBIAN
+ PJ_UNUSED_ARG(timeout);
rc = pj_symbianos_poll(-1, 100);
#else
rc = pj_ioqueue_poll(ioque, &timeout);
diff --git a/pjlib/src/pjlib-test/ssl_sock.c b/pjlib/src/pjlib-test/ssl_sock.c
index d05a4db..d983eba 100644
--- a/pjlib/src/pjlib-test/ssl_sock.c
+++ b/pjlib/src/pjlib-test/ssl_sock.c
@@ -1,4 +1,4 @@
-/* $Id: ssl_sock.c 3553 2011-05-05 06:14:19Z nanang $ */
+/* $Id: ssl_sock.c 4247 2012-09-07 08:58:48Z nanang $ */
/*
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
@@ -1329,11 +1329,34 @@ on_return:
return status;
}
+#if 0 && (!defined(PJ_SYMBIAN) || PJ_SYMBIAN==0)
+pj_status_t pj_ssl_sock_ossl_test_send_buf(pj_pool_t *pool);
+static int ossl_test_send_buf()
+{
+ pj_pool_t *pool;
+ pj_status_t status;
+
+ pool = pj_pool_create(mem, "send_buf", 256, 256, NULL);
+ status = pj_ssl_sock_ossl_test_send_buf(pool);
+ pj_pool_release(pool);
+ return status;
+}
+#else
+static int ossl_test_send_buf()
+{
+ return 0;
+}
+#endif
int ssl_sock_test(void)
{
int ret;
+ PJ_LOG(3,("", "..test ossl send buf"));
+ ret = ossl_test_send_buf();
+ if (ret != 0)
+ return ret;
+
PJ_LOG(3,("", "..get cipher list test"));
ret = get_cipher_list();
if (ret != 0)