summaryrefslogtreecommitdiff
path: root/pjlib/src
diff options
context:
space:
mode:
Diffstat (limited to 'pjlib/src')
-rw-r--r--pjlib/src/pj++/compiletest.cpp44
-rw-r--r--pjlib/src/pj++/hash.hpp71
-rw-r--r--pjlib/src/pj++/ioqueue.hpp172
-rw-r--r--pjlib/src/pj++/list.hpp182
-rw-r--r--pjlib/src/pj++/os.hpp342
-rw-r--r--pjlib/src/pj++/pj++.cpp15
-rw-r--r--pjlib/src/pj++/pool.hpp84
-rw-r--r--pjlib/src/pj++/proactor.cpp296
-rw-r--r--pjlib/src/pj++/proactor.hpp86
-rw-r--r--pjlib/src/pj++/scanner.hpp171
-rw-r--r--pjlib/src/pj++/sock.hpp194
-rw-r--r--pjlib/src/pj++/string.hpp247
-rw-r--r--pjlib/src/pj++/timer.hpp105
-rw-r--r--pjlib/src/pj++/tree.hpp107
-rw-r--r--pjlib/src/pj++/types.hpp59
-rw-r--r--pjlib/src/pj/addr_resolv_linux_kernel.c14
-rw-r--r--pjlib/src/pj/addr_resolv_sock.c44
-rw-r--r--pjlib/src/pj/array.c63
-rw-r--r--pjlib/src/pj/compat/longjmp_i386.S42
-rw-r--r--pjlib/src/pj/compat/setjmp_i386.S61
-rw-r--r--pjlib/src/pj/compat/sigjmp.c21
-rw-r--r--pjlib/src/pj/compat/string.c33
-rw-r--r--pjlib/src/pj/config.c40
-rw-r--r--pjlib/src/pj/equeue_winnt.c13
-rw-r--r--pjlib/src/pj/errno.c107
-rw-r--r--pjlib/src/pj/except.c148
-rw-r--r--pjlib/src/pj/extra-exports.c38
-rw-r--r--pjlib/src/pj/fifobuf.c182
-rw-r--r--pjlib/src/pj/guid.c19
-rw-r--r--pjlib/src/pj/guid_simple.c60
-rw-r--r--pjlib/src/pj/guid_win32.c61
-rw-r--r--pjlib/src/pj/hash.c252
-rw-r--r--pjlib/src/pj/ioqueue_dummy.c186
-rw-r--r--pjlib/src/pj/ioqueue_epoll.c852
-rw-r--r--pjlib/src/pj/ioqueue_linux_kernel.c150
-rw-r--r--pjlib/src/pj/ioqueue_select.c947
-rw-r--r--pjlib/src/pj/ioqueue_winnt.c852
-rw-r--r--pjlib/src/pj/list.c18
-rw-r--r--pjlib/src/pj/lock.c190
-rw-r--r--pjlib/src/pj/log.c217
-rw-r--r--pjlib/src/pj/log_writer_printk.c20
-rw-r--r--pjlib/src/pj/log_writer_stdout.c66
-rw-r--r--pjlib/src/pj/md5.c404
-rw-r--r--pjlib/src/pj/os_core_linux_kernel.c685
-rw-r--r--pjlib/src/pj/os_core_unix.c1182
-rw-r--r--pjlib/src/pj/os_core_win32.c1182
-rw-r--r--pjlib/src/pj/os_error_linux_kernel.c73
-rw-r--r--pjlib/src/pj/os_error_unix.c52
-rw-r--r--pjlib/src/pj/os_error_win32.c161
-rw-r--r--pjlib/src/pj/os_time_ansi.c65
-rw-r--r--pjlib/src/pj/os_time_linux_kernel.c58
-rw-r--r--pjlib/src/pj/os_timestamp_common.c129
-rw-r--r--pjlib/src/pj/os_timestamp_linux.c137
-rw-r--r--pjlib/src/pj/os_timestamp_linux_kernel.c70
-rw-r--r--pjlib/src/pj/os_timestamp_win32.c38
-rw-r--r--pjlib/src/pj/pool.c265
-rw-r--r--pjlib/src/pj/pool_caching.c210
-rw-r--r--pjlib/src/pj/pool_dbg_win32.c226
-rw-r--r--pjlib/src/pj/pool_policy_kmalloc.c54
-rw-r--r--pjlib/src/pj/pool_policy_malloc.c58
-rw-r--r--pjlib/src/pj/rand.c29
-rw-r--r--pjlib/src/pj/rbtree.c416
-rw-r--r--pjlib/src/pj/scanner.c556
-rw-r--r--pjlib/src/pj/sock_bsd.c572
-rw-r--r--pjlib/src/pj/sock_linux_kernel.c749
-rw-r--r--pjlib/src/pj/sock_select.c101
-rw-r--r--pjlib/src/pj/string.c124
-rw-r--r--pjlib/src/pj/stun.c118
-rw-r--r--pjlib/src/pj/stun_client.c270
-rw-r--r--pjlib/src/pj/symbols.c404
-rw-r--r--pjlib/src/pj/timer.c504
-rw-r--r--pjlib/src/pj/tounix4
-rw-r--r--pjlib/src/pj/types.c36
-rw-r--r--pjlib/src/pj/xml.c392
-rw-r--r--pjlib/src/pjlib-samples/except.c79
-rw-r--r--pjlib/src/pjlib-samples/list.c66
-rw-r--r--pjlib/src/pjlib-samples/log.c36
-rw-r--r--pjlib/src/pjlib-test/atomic.c94
-rw-r--r--pjlib/src/pjlib-test/echo_clt.c267
-rw-r--r--pjlib/src/pjlib-test/echo_srv.c331
-rw-r--r--pjlib/src/pjlib-test/errno.c162
-rw-r--r--pjlib/src/pjlib-test/exception.c156
-rw-r--r--pjlib/src/pjlib-test/fifobuf.c100
-rw-r--r--pjlib/src/pjlib-test/ioq_perf.c466
-rw-r--r--pjlib/src/pjlib-test/ioq_tcp.c474
-rw-r--r--pjlib/src/pjlib-test/ioq_udp.c664
-rw-r--r--pjlib/src/pjlib-test/list.c209
-rw-r--r--pjlib/src/pjlib-test/main.c73
-rw-r--r--pjlib/src/pjlib-test/main_mod.c33
-rw-r--r--pjlib/src/pjlib-test/mutex.c164
-rw-r--r--pjlib/src/pjlib-test/os.c10
-rw-r--r--pjlib/src/pjlib-test/pool.c164
-rw-r--r--pjlib/src/pjlib-test/pool_perf.c134
-rw-r--r--pjlib/src/pjlib-test/rand.c43
-rw-r--r--pjlib/src/pjlib-test/rbtree.c150
-rw-r--r--pjlib/src/pjlib-test/select.c208
-rw-r--r--pjlib/src/pjlib-test/sleep.c198
-rw-r--r--pjlib/src/pjlib-test/sock.c459
-rw-r--r--pjlib/src/pjlib-test/sock_perf.c183
-rw-r--r--pjlib/src/pjlib-test/string.c168
-rw-r--r--pjlib/src/pjlib-test/test.c196
-rw-r--r--pjlib/src/pjlib-test/test.h90
-rw-r--r--pjlib/src/pjlib-test/thread.c290
-rw-r--r--pjlib/src/pjlib-test/timer.c169
-rw-r--r--pjlib/src/pjlib-test/timestamp.c140
-rw-r--r--pjlib/src/pjlib-test/udp_echo_srv_sync.c168
-rw-r--r--pjlib/src/pjlib-test/util.c129
-rw-r--r--pjlib/src/pjlib-test/xml.c127
108 files changed, 22595 insertions, 0 deletions
diff --git a/pjlib/src/pj++/compiletest.cpp b/pjlib/src/pj++/compiletest.cpp
new file mode 100644
index 00000000..84e80aeb
--- /dev/null
+++ b/pjlib/src/pj++/compiletest.cpp
@@ -0,0 +1,44 @@
+/* $Header: /pjproject/pjlib/src/pj++/compiletest.cpp 4 8/24/05 10:29a Bennylp $ */
+#include <pjlib++.hpp>
+
+
+#if 0
+struct MyNode
+{
+ PJ_DECL_LIST_MEMBER(struct MyNode)
+ int data;
+};
+
+int test()
+{
+ typedef PJ_List<MyNode> MyList;
+ MyList list;
+ MyList::iterator it, end = list.end();
+
+ for (it=list.begin(); it!=end; ++it) {
+ MyNode *n = *it;
+ }
+
+ return 0;
+}
+
+int test_scan()
+{
+ PJ_Scanner scan;
+ PJ_String s;
+ PJ_CharSpec cs;
+
+ scan.get(&cs, &s);
+ return 0;
+}
+
+int test_scan_c()
+{
+ pj_scanner scan;
+ pj_str_t s;
+ pj_char_spec cs;
+
+ pj_scan_get(&scan, cs, &s);
+ return 0;
+}
+#endif
diff --git a/pjlib/src/pj++/hash.hpp b/pjlib/src/pj++/hash.hpp
new file mode 100644
index 00000000..d1fd162f
--- /dev/null
+++ b/pjlib/src/pj++/hash.hpp
@@ -0,0 +1,71 @@
+/* $Header: /pjproject/pjlib/src/pj++/hash.hpp 5 8/24/05 10:29a Bennylp $ */
+#ifndef __PJPP_HASH_H__
+#define __PJPP_HASH_H__
+
+#include <pj++/types.hpp>
+#include <pj/hash.h>
+
+class PJ_Hash_Table
+{
+public:
+ class iterator
+ {
+ public:
+ iterator() {}
+ explicit iterator(pj_hash_table_t *h, pj_hash_iterator_t *i) : ht_(h), it_(i) {}
+ iterator(const iterator &rhs) : ht_(rhs.ht_), it_(rhs.it_) {}
+ void operator++() { it_ = pj_hash_next(ht_, it_); }
+ bool operator==(const iterator &rhs) { return ht_ == rhs.ht_ && it_ == rhs.it_; }
+ iterator & operator=(const iterator &rhs) { ht_=rhs.ht_; it_=rhs.it_; return *this; }
+ private:
+ pj_hash_table_t *ht_;
+ pj_hash_iterator_t it_val_;
+ pj_hash_iterator_t *it_;
+
+ friend class PJ_Hash_Table;
+ };
+
+ static PJ_Hash_Table *create(PJ_Pool *pool, unsigned size)
+ {
+ return (PJ_Hash_Table*) pj_hash_create(pool->pool_(), size);
+ }
+
+ static pj_uint32_t calc(pj_uint32_t initial_hval, const void *key, unsigned keylen)
+ {
+ return pj_hash_calc(initial_hval, key, keylen);
+ }
+
+ pj_hash_table_t *hash_table_()
+ {
+ return (pj_hash_table_t*)this;
+ }
+
+ void *get(const void *key, unsigned keylen)
+ {
+ return pj_hash_get(this->hash_table_(), key, keylen);
+ }
+
+ void set(PJ_Pool *pool, const void *key, unsigned keylen, void *value)
+ {
+ pj_hash_set(pool->pool_(), this->hash_table_(), key, keylen, value);
+ }
+
+ unsigned count()
+ {
+ return pj_hash_count(this->hash_table_());
+ }
+
+ iterator begin()
+ {
+ iterator it(this->hash_table_(), NULL);
+ it.it_ = pj_hash_first(this->hash_table_(), &it.it_val_);
+ return it;
+ }
+
+ iterator end()
+ {
+ return iterator(this->hash_table_(), NULL);
+ }
+};
+
+#endif /* __PJPP_HASH_H__ */
diff --git a/pjlib/src/pj++/ioqueue.hpp b/pjlib/src/pj++/ioqueue.hpp
new file mode 100644
index 00000000..5ecb34ce
--- /dev/null
+++ b/pjlib/src/pj++/ioqueue.hpp
@@ -0,0 +1,172 @@
+/* $Header: /pjproject/pjlib/src/pj++/ioqueue.hpp 4 8/24/05 10:29a Bennylp $ */
+#ifndef __PJPP_IOQUEUE_H__
+#define __PJPP_IOQUEUE_H__
+
+#include <pj++/sock.hpp>
+#include <pj++/pool.hpp>
+#include <pj++/types.hpp>
+#include <pj/ioqueue.h>
+
+class PJ_IOQueue;
+
+class PJ_IOQueue_Event_Handler
+{
+public:
+ virtual ~PJ_IOQueue_Event_Handler()
+ {
+ }
+
+ pj_ioqueue_key_t* get_key() const
+ {
+ return key_;
+ }
+
+protected:
+ //
+ // Override this to get notification from I/O Queue
+ //
+ virtual void on_read_complete(pj_ssize_t bytes_read)
+ {
+ }
+
+ virtual void on_write_complete(pj_ssize_t bytes_sent)
+ {
+ }
+
+ virtual void on_accept_complete(int status)
+ {
+ }
+
+ virtual void on_connect_complete(int status)
+ {
+ }
+
+protected:
+ PJ_IOQueue_Event_Handler()
+ : ioqueue_(NULL), key_(NULL)
+ {
+ }
+
+private:
+ PJ_IOQueue *ioqueue_;
+ pj_ioqueue_key_t *key_;
+
+ static void read_complete_cb(pj_ioqueue_key_t *key, pj_ssize_t bytes_read)
+ {
+ PJ_IOQueue_Event_Handler *handler =
+ (PJ_IOQueue_Event_Handler*)pj_ioqueue_get_user_data(key);
+ handler->on_read_complete(bytes_read);
+ }
+
+ static void write_complete_cb(pj_ioqueue_key_t *key, pj_ssize_t bytes_sent);
+ static void accept_complete_cb(pj_ioqueue_key_t *key, int status);
+ static void connect_complete_cb(pj_ioqueue_key_t *key, int status);
+
+ friend class PJ_IOQueue;
+};
+
+
+class PJ_IOQueue
+{
+ typedef pj_ioqueue_t *B_;
+
+public:
+ typedef pj_ioqueue_key_t Key;
+
+ enum Operation
+ {
+ OP_NONE = PJ_IOQUEUE_OP_NONE,
+ OP_READ = PJ_IOQUEUE_OP_READ,
+ OP_RECV_FROM = PJ_IOQUEUE_OP_RECV_FROM,
+ OP_WRITE = PJ_IOQUEUE_OP_WRITE,
+ OP_SEND_TO = PJ_IOQUEUE_OP_SEND_TO,
+#if PJ_HAS_TCP
+ OP_ACCEPT = PJ_IOQUEUE_OP_ACCEPT,
+ OP_CONNECT = PJ_IOQUEUE_OP_CONNECT,
+#endif
+ };
+
+ enum Status
+ {
+ IS_PENDING = PJ_IOQUEUE_PENDING
+ };
+
+ static PJ_IOQueue *create(PJ_Pool *pool, pj_size_t max_fd)
+ {
+ return (PJ_IOQueue*) pj_ioqueue_create(pool->pool_(), max_fd);
+ }
+
+ operator B_()
+ {
+ return (pj_ioqueue_t*)(PJ_IOQueue*)this;
+ }
+
+ pj_ioqueue_t *ioq_()
+ {
+ return (B_)this;
+ }
+
+ void destroy()
+ {
+ pj_ioqueue_destroy(this->ioq_());
+ }
+
+ Key *register_handle(PJ_Pool *pool, pj_oshandle_t hnd, void *user_data)
+ {
+ return pj_ioqueue_register(pool->pool_(), this->ioq_(), hnd, user_data);
+ }
+
+ Key *register_socket(PJ_Pool *pool, pj_sock_t hnd, void *user_data)
+ {
+ return pj_ioqueue_register(pool->pool_(), this->ioq_(), (pj_oshandle_t)hnd, user_data);
+ }
+
+ pj_status_t unregister(Key *key)
+ {
+ return pj_ioqueue_unregister(this->ioq_(), key);
+ }
+
+ void *get_user_data(Key *key)
+ {
+ return pj_ioqueue_get_user_data(key);
+ }
+
+ int poll(Key **key, pj_ssize_t *bytes_status, Operation *op, const PJ_Time_Val *timeout)
+ {
+ return pj_ioqueue_poll(this->ioq_(), key, bytes_status, (pj_ioqueue_operation_e*)op, timeout);
+ }
+
+#if PJ_HAS_TCP
+ pj_status_t connect(Key *key, const pj_sockaddr_t *addr, int addrlen)
+ {
+ return pj_ioqueue_connect(this->ioq_(), key, addr, addrlen);
+ }
+
+ pj_status_t accept(Key *key, PJ_Socket *sock, pj_sockaddr_t *local, pj_sockaddr_t *remote, int *addrlen)
+ {
+ return pj_ioqueue_accept(this->ioq_(), key, &sock->get_handle(), local, remote, addrlen);
+ }
+#endif
+
+ int read(Key *key, void *buf, pj_size_t len)
+ {
+ return pj_ioqueue_read(this->ioq_(), key, buf, len);
+ }
+
+ int recvfrom(Key *key, void *buf, pj_size_t len, pj_sockaddr_t *addr, int *addrlen)
+ {
+ return pj_ioqueue_recvfrom(this->ioq_(), key, buf, len, addr, addrlen);
+ }
+
+ int write(Key *key, const void *data, pj_size_t len)
+ {
+ return pj_ioqueue_write(this->ioq_(), key, data, len);
+ }
+
+ int sendto(Key *key, const void *data, pj_size_t len, const pj_sockaddr_t *addr, int addrlen)
+ {
+ return pj_ioqueue_sendto(this->ioq_(), key, data, len, addr, addrlen);
+ }
+};
+
+#endif /* __PJPP_IOQUEUE_H__ */
diff --git a/pjlib/src/pj++/list.hpp b/pjlib/src/pj++/list.hpp
new file mode 100644
index 00000000..76452917
--- /dev/null
+++ b/pjlib/src/pj++/list.hpp
@@ -0,0 +1,182 @@
+/* $Header: /pjproject/pjlib/src/pj++/list.hpp 2 2/24/05 11:23a Bennylp $ */
+#ifndef __PJPP_LIST_H__
+#define __PJPP_LIST_H__
+
+#include <pj/list.h>
+
+template <typename T>
+struct PJ_List_Node
+{
+ PJ_DECL_LIST_MEMBER(T)
+};
+
+
+template <class Node>
+class PJ_List
+{
+public:
+ PJ_List() { pj_list_init(&root_); if (0) compiletest(); }
+ ~PJ_List() {}
+
+ class const_iterator
+ {
+ public:
+ const_iterator() : node_(NULL) {}
+ const_iterator(const Node *nd) : node_((Node*)nd) {}
+ const Node * operator *() { return node_; }
+ const Node * operator -> () { return node_; }
+ const_iterator operator++() { return const_iterator(node_->next); }
+ bool operator==(const const_iterator &rhs) { return node_ == rhs.node_; }
+ bool operator!=(const const_iterator &rhs) { return node_ != rhs.node_; }
+
+ protected:
+ Node *node_;
+ };
+
+ class iterator : public const_iterator
+ {
+ public:
+ iterator() {}
+ iterator(Node *nd) : const_iterator(nd) {}
+ Node * operator *() { return node_; }
+ Node * operator -> () { return node_; }
+ iterator operator++() { return iterator(node_->next); }
+ bool operator==(const iterator &rhs) { return node_ == rhs.node_; }
+ bool operator!=(const iterator &rhs) { return node_ != rhs.node_; }
+ };
+
+ bool empty() const
+ {
+ return pj_list_empty(&root_);
+ }
+
+ iterator begin()
+ {
+ return iterator(root_.next);
+ }
+
+ const_iterator begin() const
+ {
+ return const_iterator(root_.next);
+ }
+
+ const_iterator end() const
+ {
+ return const_iterator((Node*)&root_);
+ }
+
+ iterator end()
+ {
+ return iterator((Node*)&root_);
+ }
+
+ void insert_before (iterator &pos, Node *node)
+ {
+ pj_list_insert_before( *pos, node );
+ }
+
+ void insert_after(iterator &pos, Node *node)
+ {
+ pj_list_insert_after(*pos, node);
+ }
+
+ void merge_first(Node *list2)
+ {
+ pj_list_merge_first(&root_, list2);
+ }
+
+ void merge_last(PJ_List *list)
+ {
+ pj_list_merge_last(&root_, &list->root_);
+ }
+
+ void insert_nodes_before(iterator &pos, PJ_List *list2)
+ {
+ pj_list_insert_nodes_before(*pos, &list2->root_);
+ }
+
+ void insert_nodes_after(iterator &pos, PJ_List *list2)
+ {
+ pj_list_insert_nodes_after(*pos, &list2->root_);
+ }
+
+ void erase(iterator &it)
+ {
+ pj_list_erase(*it);
+ }
+
+ Node *front()
+ {
+ return root_.next;
+ }
+
+ const Node *front() const
+ {
+ return root_.next;
+ }
+
+ void pop_front()
+ {
+ pj_list_erase(root_.next);
+ }
+
+ Node *back()
+ {
+ return root_.prev;
+ }
+
+ const Node *back() const
+ {
+ return root_.prev;
+ }
+
+ void pop_back()
+ {
+ pj_list_erase(root_.prev);
+ }
+
+ iterator find(Node *node)
+ {
+ Node *n = pj_list_find_node(&root_, node);
+ return n ? iterator(n) : end();
+ }
+
+ const_iterator find(Node *node) const
+ {
+ Node *n = pj_list_find_node(&root_, node);
+ return n ? const_iterator(n) : end();
+ }
+
+ void push_back(Node *node)
+ {
+ pj_list_insert_after(root_.prev, node);
+ }
+
+ void push_front(Node *node)
+ {
+ pj_list_insert_before(root_.next, node);
+ }
+
+ void clear()
+ {
+ root_.next = &root_;
+ root_.prev = &root_;
+ }
+
+private:
+ struct RootNode
+ {
+ PJ_DECL_LIST_MEMBER(Node)
+ } root_;
+
+ void compiletest()
+ {
+ // If you see error in this line,
+ // it's because Node is not derived from PJ_List_Node.
+ Node *n = (Node*)0;
+ n = n->next; n = n->prev;
+ }
+};
+
+
+#endif /* __PJPP_LIST_H__ */
diff --git a/pjlib/src/pj++/os.hpp b/pjlib/src/pj++/os.hpp
new file mode 100644
index 00000000..c3827528
--- /dev/null
+++ b/pjlib/src/pj++/os.hpp
@@ -0,0 +1,342 @@
+/* $Header: /pjproject/pjlib/src/pj++/os.hpp 2 2/24/05 11:23a Bennylp $ */
+#ifndef __PJPP_OS_H__
+#define __PJPP_OS_H__
+
+#include <pj/os.h>
+#include <pj++/types.hpp>
+#include <pj++/pool.hpp>
+
+class PJ_Thread
+{
+public:
+ enum Flags
+ {
+ FLAG_SUSPENDED = PJ_THREAD_SUSPENDED
+ };
+
+ static PJ_Thread *create( PJ_Pool *pool, const char *thread_name,
+ pj_thread_proc *proc, void *arg,
+ pj_size_t stack_size, void *stack_ptr,
+ unsigned flags)
+ {
+ return (PJ_Thread*) pj_thread_create( pool->pool_(), thread_name, proc, arg, stack_size, stack_ptr, flags);
+ }
+
+ static PJ_Thread *register_current_thread(const char *name, pj_thread_desc desc)
+ {
+ return (PJ_Thread*) pj_thread_register(name, desc);
+ }
+
+ static PJ_Thread *get_current_thread()
+ {
+ return (PJ_Thread*) pj_thread_this();
+ }
+
+ static pj_status_t sleep(unsigned msec)
+ {
+ return pj_thread_sleep(msec);
+ }
+
+ static pj_status_t usleep(unsigned usec)
+ {
+ return pj_thread_usleep(usec);
+ }
+
+ pj_thread_t *pj_thread_t_()
+ {
+ return (pj_thread_t*)this;
+ }
+
+ const char *get_name()
+ {
+ return pj_thread_get_name( this->pj_thread_t_() );
+ }
+
+ pj_status_t resume()
+ {
+ return pj_thread_resume( this->pj_thread_t_() );
+ }
+
+ pj_status_t join()
+ {
+ return pj_thread_join( this->pj_thread_t_() );
+ }
+
+ pj_status_t destroy()
+ {
+ return pj_thread_destroy( this->pj_thread_t_() );
+ }
+};
+
+
+class PJ_Thread_Local
+{
+public:
+ static PJ_Thread_Local *alloc()
+ {
+ long index = pj_thread_local_alloc();
+ return index < 0 ? NULL : (PJ_Thread_Local*)index;
+ }
+ void free()
+ {
+ pj_thread_local_free( this->tls_() );
+ }
+
+ long tls_() const
+ {
+ return (long)this;
+ }
+
+ void set(void *value)
+ {
+ pj_thread_local_set( this->tls_(), value );
+ }
+
+ void *get()
+ {
+ return pj_thread_local_get( this->tls_() );
+ }
+};
+
+
+class PJ_Atomic
+{
+public:
+ static PJ_Atomic *create(PJ_Pool *pool, long initial)
+ {
+ return (PJ_Atomic*) pj_atomic_create(pool->pool_(), initial);
+ }
+
+ pj_atomic_t *pj_atomic_t_()
+ {
+ return (pj_atomic_t*)this;
+ }
+
+ pj_status_t destroy()
+ {
+ return pj_atomic_destroy( this->pj_atomic_t_() );
+ }
+
+ long set(long val)
+ {
+ return pj_atomic_set( this->pj_atomic_t_(), val);
+ }
+
+ long get()
+ {
+ return pj_atomic_get( this->pj_atomic_t_() );
+ }
+
+ long inc()
+ {
+ return pj_atomic_inc( this->pj_atomic_t_() );
+ }
+
+ long dec()
+ {
+ return pj_atomic_dec( this->pj_atomic_t_() );
+ }
+};
+
+
+class PJ_Mutex
+{
+public:
+ enum Type
+ {
+ DEFAULT = PJ_MUTEX_DEFAULT,
+ SIMPLE = PJ_MUTEX_SIMPLE,
+ RECURSE = PJ_MUTEX_RECURSE,
+ };
+
+ static PJ_Mutex *create( PJ_Pool *pool, const char *name, Type type)
+ {
+ return (PJ_Mutex*) pj_mutex_create( pool->pool_(), name, type);
+ }
+
+ pj_mutex_t *pj_mutex_()
+ {
+ return (pj_mutex_t*)this;
+ }
+
+ pj_status_t destroy()
+ {
+ return pj_mutex_destroy( this->pj_mutex_() );
+ }
+
+ pj_status_t lock()
+ {
+ return pj_mutex_lock( this->pj_mutex_() );
+ }
+
+ pj_status_t unlock()
+ {
+ return pj_mutex_unlock( this->pj_mutex_() );
+ }
+
+ pj_status_t trylock()
+ {
+ return pj_mutex_trylock( this->pj_mutex_() );
+ }
+
+#if PJ_DEBUG
+ pj_status_t is_locked()
+ {
+ return pj_mutex_is_locked( this->pj_mutex_() );
+ }
+#endif
+};
+
+
+class PJ_Semaphore
+{
+public:
+ static PJ_Semaphore *create( PJ_Pool *pool, const char *name, unsigned initial, unsigned max)
+ {
+ return (PJ_Semaphore*) pj_sem_create( pool->pool_(), name, initial, max);
+ }
+
+ pj_sem_t *pj_sem_t_()
+ {
+ return (pj_sem_t*)this;
+ }
+
+ pj_status_t destroy()
+ {
+ return pj_sem_destroy(this->pj_sem_t_());
+ }
+
+ pj_status_t wait()
+ {
+ return pj_sem_wait(this->pj_sem_t_());
+ }
+
+ pj_status_t lock()
+ {
+ return wait();
+ }
+
+ pj_status_t trywait()
+ {
+ return pj_sem_trywait(this->pj_sem_t_());
+ }
+
+ pj_status_t trylock()
+ {
+ return trywait();
+ }
+
+ pj_status_t post()
+ {
+ return pj_sem_post(this->pj_sem_t_());
+ }
+
+ pj_status_t unlock()
+ {
+ return post();
+ }
+};
+
+
+class PJ_Event
+{
+public:
+ static PJ_Event *create( PJ_Pool *pool, const char *name, bool manual_reset, bool initial)
+ {
+ return (PJ_Event*) pj_event_create(pool->pool_(), name, manual_reset, initial);
+ }
+
+ pj_event_t *pj_event_t_()
+ {
+ return (pj_event_t*)this;
+ }
+
+ pj_status_t destroy()
+ {
+ return pj_event_destroy(this->pj_event_t_());
+ }
+
+ pj_status_t wait()
+ {
+ return pj_event_wait(this->pj_event_t_());
+ }
+
+ pj_status_t trywait()
+ {
+ return pj_event_trywait(this->pj_event_t_());
+ }
+
+ pj_status_t set()
+ {
+ return pj_event_set(this->pj_event_t_());
+ }
+
+ pj_status_t pulse()
+ {
+ return pj_event_pulse(this->pj_event_t_());
+ }
+
+ pj_status_t reset()
+ {
+ return pj_event_reset(this->pj_event_t_());
+ }
+};
+
+class PJ_OS
+{
+public:
+ static pj_status_t gettimeofday( PJ_Time_Val *tv )
+ {
+ return pj_gettimeofday(tv);
+ }
+
+ static pj_status_t time_decode( const PJ_Time_Val *tv, pj_parsed_time *pt )
+ {
+ return pj_time_decode(tv, pt);
+ }
+
+ static pj_status_t time_encode(const pj_parsed_time *pt, PJ_Time_Val *tv)
+ {
+ return pj_time_encode(pt, tv);
+ }
+
+ static pj_status_t time_local_to_gmt( PJ_Time_Val *tv )
+ {
+ return pj_time_local_to_gmt( tv );
+ }
+
+ static pj_status_t time_gmt_to_local( PJ_Time_Val *tv)
+ {
+ return pj_time_gmt_to_local( tv );
+ }
+};
+
+
+inline pj_status_t PJ_Time_Val::gettimeofday()
+{
+ return PJ_OS::gettimeofday(this);
+}
+
+inline pj_parsed_time PJ_Time_Val::decode()
+{
+ pj_parsed_time pt;
+ PJ_OS::time_decode(this, &pt);
+ return pt;
+}
+
+inline pj_status_t PJ_Time_Val::encode(const pj_parsed_time *pt)
+{
+ return PJ_OS::time_encode(pt, this);
+}
+
+inline pj_status_t PJ_Time_Val::to_gmt()
+{
+ return PJ_OS::time_local_to_gmt(this);
+}
+
+inline pj_status_t PJ_Time_Val::to_local()
+{
+ return PJ_OS::time_gmt_to_local(this);
+}
+
+#endif /* __PJPP_OS_H__ */
diff --git a/pjlib/src/pj++/pj++.cpp b/pjlib/src/pj++/pj++.cpp
new file mode 100644
index 00000000..1a41ec32
--- /dev/null
+++ b/pjlib/src/pj++/pj++.cpp
@@ -0,0 +1,15 @@
+/* $Header: /pjproject/pjlib/src/pj++/pj++.cpp 4 4/17/05 11:59a Bennylp $ */
+#include <pj++/scanner.hpp>
+#include <pj++/timer.hpp>
+#include <pj/except.h>
+
+void PJ_Scanner::syntax_error_handler_throw_pj(pj_scanner *)
+{
+ PJ_THROW( PJ_Scanner::SYNTAX_ERROR );
+}
+
+void PJ_Timer_Entry::timer_heap_callback(pj_timer_heap_t *, pj_timer_entry *e)
+{
+ PJ_Timer_Entry *entry = static_cast<PJ_Timer_Entry*>(e);
+ entry->on_timeout();
+}
diff --git a/pjlib/src/pj++/pool.hpp b/pjlib/src/pj++/pool.hpp
new file mode 100644
index 00000000..9ceffa7d
--- /dev/null
+++ b/pjlib/src/pj++/pool.hpp
@@ -0,0 +1,84 @@
+/* $Header: /pjproject/pjlib/src/pj++/pool.hpp 4 8/24/05 10:29a Bennylp $ */
+#ifndef __PJPP_POOL_H__
+#define __PJPP_POOL_H__
+
+#include <pj/pool.h>
+
+class PJ_Pool
+{
+public:
+ const char *getobjname() const
+ {
+ return pj_pool_getobjname(this->pool_());
+ }
+
+ pj_pool_t *pool_()
+ {
+ return (pj_pool_t*)this;
+ }
+
+ const pj_pool_t *pool_() const
+ {
+ return (const pj_pool_t*)this;
+ }
+
+ void release()
+ {
+ pj_pool_release(this->pool_());
+ }
+
+ void reset()
+ {
+ pj_pool_reset(this->pool_());
+ }
+
+ pj_size_t get_capacity()
+ {
+ pj_pool_get_capacity(this->pool_());
+ }
+
+ pj_size_t get_used_size()
+ {
+ pj_pool_get_used_size(this->pool_());
+ }
+
+ void *alloc(pj_size_t size)
+ {
+ return pj_pool_alloc(this->pool_(), size);
+ }
+
+ void *calloc(pj_size_t count, pj_size_t elem)
+ {
+ return pj_pool_calloc(this->pool_(), count, elem);
+ }
+};
+
+class PJ_Caching_Pool
+{
+public:
+ void init(pj_size_t max_capacity,
+ const pj_pool_factory_policy *pol=&pj_pool_factory_default_policy)
+ {
+ pj_caching_pool_init(&cp_, pol, max_capacity);
+ }
+
+ void destroy()
+ {
+ pj_caching_pool_destroy(&cp_);
+ }
+
+ PJ_Pool *create_pool(const char *name, pj_size_t initial_size, pj_size_t increment_size, pj_pool_callback *callback)
+ {
+ return (PJ_Pool*) (*cp_.factory.create_pool)(&cp_.factory, name, initial_size, increment_size, callback);
+ }
+
+ void release_pool( PJ_Pool *pool )
+ {
+ pj_pool_release(pool->pool_());
+ }
+
+private:
+ pj_caching_pool cp_;
+};
+
+#endif /* __PJPP_POOL_H__ */
diff --git a/pjlib/src/pj++/proactor.cpp b/pjlib/src/pj++/proactor.cpp
new file mode 100644
index 00000000..58c342e0
--- /dev/null
+++ b/pjlib/src/pj++/proactor.cpp
@@ -0,0 +1,296 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj++/proactor.cpp 7 10/29/05 11:51a Bennylp $ */
+#include <pj++/proactor.hpp>
+#include <pj/string.h> // memset
+
+static struct pj_ioqueue_callback ioqueue_cb =
+{
+ &PJ_Event_Handler::read_complete_cb,
+ &PJ_Event_Handler::write_complete_cb,
+ &PJ_Event_Handler::accept_complete_cb,
+ &PJ_Event_Handler::connect_complete_cb,
+};
+
+PJ_Event_Handler::PJ_Event_Handler()
+: proactor_(NULL), key_(NULL)
+{
+ pj_memset(&timer_, 0, sizeof(timer_));
+ timer_.user_data = this;
+ timer_.cb = &timer_callback;
+}
+
+PJ_Event_Handler::~PJ_Event_Handler()
+{
+}
+
+#if PJ_HAS_TCP
+bool PJ_Event_Handler::connect(const PJ_INET_Addr &addr)
+{
+ pj_assert(key_ != NULL && proactor_ != NULL);
+
+ if (key_ == NULL || proactor_ == NULL)
+ return false;
+
+ int status = pj_ioqueue_connect(proactor_->get_io_queue(), key_,
+ &addr, sizeof(PJ_INET_Addr));
+ if (status == 0) {
+ on_connect_complete(0);
+ return true;
+ } else if (status == PJ_IOQUEUE_PENDING) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool PJ_Event_Handler::accept(PJ_Socket *sock, PJ_INET_Addr *local, PJ_INET_Addr *remote)
+{
+ pj_assert(key_ != NULL && proactor_ != NULL);
+
+ if (key_ == NULL || proactor_ == NULL)
+ return false;
+
+ int status = pj_ioqueue_accept(proactor_->get_io_queue(), key_,
+ &sock->get_handle(),
+ local_addr, remote,
+ (remote? sizeof(*remote) : 0));
+ if (status == 0) {
+ on_accept_complete(0);
+ return true;
+ } else if (status == PJ_IOQUEUE_PENDING) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+#endif
+
+bool PJ_Event_Handler::read(void *buf, pj_size_t len)
+{
+ pj_assert(key_ != NULL && proactor_ != NULL);
+
+ if (key_ == NULL || proactor_ == NULL)
+ return false;
+
+ int bytes_status = pj_ioqueue_read(proactor_->get_io_queue(),
+ key_, buf, len);
+ if (bytes_status >= 0) {
+ on_read_complete(bytes_status);
+ return true;
+ } else if (bytes_status == PJ_IOQUEUE_PENDING) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool PJ_Event_Handler::recvfrom(void *buf, pj_size_t len, PJ_INET_Addr *addr)
+{
+ pj_assert(key_ != NULL && proactor_ != NULL);
+
+ if (key_ == NULL || proactor_ == NULL)
+ return false;
+
+
+ tmp_recvfrom_addr_len = sizeof(PJ_INET_Addr);
+
+ int bytes_status = pj_ioqueue_recvfrom(proactor_->get_io_queue(),
+ key_, buf, len,
+ addr,
+ (addr? &tmp_recvfrom_addr_len : NULL));
+ if (bytes_status >= 0) {
+ on_read_complete(bytes_status);
+ return true;
+ } else if (bytes_status == PJ_IOQUEUE_PENDING) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool PJ_Event_Handler::write(const void *data, pj_size_t len)
+{
+ pj_assert(key_ != NULL && proactor_ != NULL);
+
+ if (key_ == NULL || proactor_ == NULL)
+ return false;
+
+ int bytes_status = pj_ioqueue_write(proactor_->get_io_queue(),
+ key_, data, len);
+ if (bytes_status >= 0) {
+ on_write_complete(bytes_status);
+ return true;
+ } else if (bytes_status == PJ_IOQUEUE_PENDING) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool PJ_Event_Handler::sendto(const void *data, pj_size_t len, const PJ_INET_Addr &addr)
+{
+ pj_assert(key_ != NULL && proactor_ != NULL);
+
+ if (key_ == NULL || proactor_ == NULL)
+ return false;
+
+ int bytes_status = pj_ioqueue_sendto(proactor_->get_io_queue(),
+ key_, data, len,
+ &addr, sizeof(PJ_INET_Addr));
+ if (bytes_status >= 0) {
+ on_write_complete(bytes_status);
+ return true;
+ } else if (bytes_status == PJ_IOQUEUE_PENDING) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+
+void PJ_Event_Handler::read_complete_cb(pj_ioqueue_key_t *key, pj_ssize_t bytes_read)
+{
+ PJ_Event_Handler *handler =
+ (PJ_Event_Handler*) pj_ioqueue_get_user_data(key);
+
+ handler->on_read_complete(bytes_read);
+}
+
+void PJ_Event_Handler::write_complete_cb(pj_ioqueue_key_t *key, pj_ssize_t bytes_sent)
+{
+ PJ_Event_Handler *handler =
+ (PJ_Event_Handler*) pj_ioqueue_get_user_data(key);
+
+ handler->on_write_complete(bytes_sent);
+}
+
+void PJ_Event_Handler::accept_complete_cb(pj_ioqueue_key_t *key, int status)
+{
+#if PJ_HAS_TCP
+ PJ_Event_Handler *handler =
+ (PJ_Event_Handler*) pj_ioqueue_get_user_data(key);
+
+ handler->on_accept_complete(status);
+#endif
+}
+
+void PJ_Event_Handler::connect_complete_cb(pj_ioqueue_key_t *key, int status)
+{
+#if PJ_HAS_TCP
+ PJ_Event_Handler *handler =
+ (PJ_Event_Handler*) pj_ioqueue_get_user_data(key);
+
+ handler->on_connect_complete(status);
+#endif
+}
+
+void PJ_Event_Handler::timer_callback( pj_timer_heap_t *timer_heap,
+ struct pj_timer_entry *entry)
+{
+ PJ_Event_Handler *handler = (PJ_Event_Handler*) entry->user_data;
+ handler->on_timeout(entry->id);
+}
+
+
+PJ_Proactor *PJ_Proactor::create(PJ_Pool *pool, pj_size_t max_fd,
+ pj_size_t timer_entry_count, unsigned timer_flags)
+{
+ PJ_Proactor *p = (PJ_Proactor*) pool->calloc(1, sizeof(PJ_Proactor));
+ if (!p) return NULL;
+
+ p->ioq_ = pj_ioqueue_create(pool->pool_(), max_fd);
+ if (!p->ioq_) return NULL;
+
+ p->th_ = pj_timer_heap_create(pool->pool_(), timer_entry_count, timer_flags);
+ if (!p->th_) return NULL;
+
+ return p;
+}
+
+void PJ_Proactor::destroy()
+{
+ pj_ioqueue_destroy(ioq_);
+}
+
+bool PJ_Proactor::register_handler(PJ_Pool *pool, PJ_Event_Handler *handler)
+{
+ pj_assert(handler->key_ == NULL && handler->proactor_ == NULL);
+
+ if (handler->key_ != NULL)
+ return false;
+
+ handler->key_ = pj_ioqueue_register_sock(pool->pool_(), ioq_,
+ handler->get_handle(),
+ handler, &ioqueue_cb);
+ if (handler->key_ != NULL) {
+ handler->proactor_ = this;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+void PJ_Proactor::unregister_handler(PJ_Event_Handler *handler)
+{
+ if (handler->key_ == NULL) return;
+ pj_ioqueue_unregister(ioq_, handler->key_);
+ handler->key_ = NULL;
+ handler->proactor_ = NULL;
+}
+
+bool PJ_Proactor::schedule_timer( pj_timer_heap_t *timer, PJ_Event_Handler *handler,
+ const PJ_Time_Val &delay, int id)
+{
+ handler->timer_.id = id;
+ return pj_timer_heap_schedule(timer, &handler->timer_, &delay) == 0;
+}
+
+bool PJ_Proactor::schedule_timer(PJ_Event_Handler *handler, const PJ_Time_Val &delay,
+ int id)
+{
+ return schedule_timer(th_, handler, delay, id);
+}
+
+bool PJ_Proactor::cancel_timer(PJ_Event_Handler *handler)
+{
+ return pj_timer_heap_cancel(th_, &handler->timer_) == 1;
+}
+
+bool PJ_Proactor::handle_events(PJ_Time_Val *max_timeout)
+{
+ pj_time_val timeout;
+
+ timeout.sec = timeout.msec = 0; /* timeout is 'out' var. */
+
+ if (pj_timer_heap_poll( th_, &timeout ) > 0)
+ return true;
+
+ if (timeout.sec < 0) timeout.sec = PJ_MAXINT32;
+
+ /* If caller specifies maximum time to wait, then compare the value with
+ * the timeout to wait from timer, and use the minimum value.
+ */
+ if (max_timeout && PJ_TIME_VAL_GT(timeout, *max_timeout)) {
+ timeout = *max_timeout;
+ }
+
+ /* Poll events in ioqueue. */
+ int result;
+
+ result = pj_ioqueue_poll(ioq_, &timeout);
+ if (result != 1)
+ return false;
+
+ return true;
+}
+
+pj_ioqueue_t *PJ_Proactor::get_io_queue()
+{
+ return ioq_;
+}
+
+pj_timer_heap_t *PJ_Proactor::get_timer_heap()
+{
+ return th_;
+}
+
diff --git a/pjlib/src/pj++/proactor.hpp b/pjlib/src/pj++/proactor.hpp
new file mode 100644
index 00000000..d5dc0367
--- /dev/null
+++ b/pjlib/src/pj++/proactor.hpp
@@ -0,0 +1,86 @@
+/* $Header: /pjproject/pjlib/src/pj++/proactor.hpp 3 8/24/05 10:29a Bennylp $ */
+#ifndef __PJPP_EVENT_HANDLER_H__
+#define __PJPP_EVENT_HANDLER_H__
+
+#include <pj/ioqueue.h>
+#include <pj++/pool.hpp>
+#include <pj++/sock.hpp>
+#include <pj++/timer.hpp>
+
+class PJ_Proactor;
+
+
+class PJ_Event_Handler
+{
+ friend class PJ_Proactor;
+public:
+ PJ_Event_Handler();
+ virtual ~PJ_Event_Handler();
+
+ virtual pj_oshandle_t get_handle() = 0;
+
+ bool read(void *buf, pj_size_t len);
+ bool recvfrom(void *buf, pj_size_t len, PJ_INET_Addr *addr);
+ bool write(const void *data, pj_size_t len);
+ bool sendto(const void *data, pj_size_t len, const PJ_INET_Addr &addr);
+#if PJ_HAS_TCP
+ bool connect(const PJ_INET_Addr &addr);
+ bool accept(PJ_Socket *sock, PJ_INET_Addr *local=NULL, PJ_INET_Addr *remote=NULL);
+#endif
+
+protected:
+ //
+ // Overridables
+ //
+ virtual void on_timeout(int data) {}
+ virtual void on_read_complete(pj_ssize_t bytes_read) {}
+ virtual void on_write_complete(pj_ssize_t bytes_sent) {}
+#if PJ_HAS_TCP
+ virtual void on_connect_complete(int status) {}
+ virtual void on_accept_complete(int status) {}
+#endif
+
+private:
+ PJ_Proactor *proactor_;
+ pj_ioqueue_key_t *key_;
+ pj_timer_entry timer_;
+ int tmp_recvfrom_addr_len;
+
+public:
+ // Internal IO Queue/timer callback.
+ static void timer_callback( pj_timer_heap_t *timer_heap, struct pj_timer_entry *entry);
+ static void read_complete_cb(pj_ioqueue_key_t *key, pj_ssize_t bytes_read);
+ static void write_complete_cb(pj_ioqueue_key_t *key, pj_ssize_t bytes_sent);
+ static void accept_complete_cb(pj_ioqueue_key_t *key, int status);
+ static void connect_complete_cb(pj_ioqueue_key_t *key, int status);
+};
+
+class PJ_Proactor
+{
+public:
+ static PJ_Proactor *create(PJ_Pool *pool, pj_size_t max_fd,
+ pj_size_t timer_entry_count, unsigned timer_flags=0);
+
+ void destroy();
+
+ bool register_handler(PJ_Pool *pool, PJ_Event_Handler *handler);
+ void unregister_handler(PJ_Event_Handler *handler);
+
+ static bool schedule_timer( pj_timer_heap_t *timer, PJ_Event_Handler *handler,
+ const PJ_Time_Val &delay, int id=-1);
+ bool schedule_timer(PJ_Event_Handler *handler, const PJ_Time_Val &delay, int id=-1);
+ bool cancel_timer(PJ_Event_Handler *handler);
+
+ bool handle_events(PJ_Time_Val *timeout);
+
+ pj_ioqueue_t *get_io_queue();
+ pj_timer_heap_t *get_timer_heap();
+
+private:
+ pj_ioqueue_t *ioq_;
+ pj_timer_heap_t *th_;
+
+ PJ_Proactor() {}
+};
+
+#endif /* __PJPP_EVENT_HANDLER_H__ */
diff --git a/pjlib/src/pj++/scanner.hpp b/pjlib/src/pj++/scanner.hpp
new file mode 100644
index 00000000..1ab44e01
--- /dev/null
+++ b/pjlib/src/pj++/scanner.hpp
@@ -0,0 +1,171 @@
+/* $Header: /pjproject/pjlib/src/pj++/scanner.hpp 3 2/27/05 10:09p Bennylp $ */
+#ifndef __PJPP_SCANNER_H__
+#define __PJPP_SCANNER_H__
+
+#include <pj/scanner.h>
+#include <pj++/string.hpp>
+
+class PJ_CharSpec
+{
+public:
+ PJ_CharSpec() { pj_cs_init(cs__); }
+
+ void set(int c) { pj_cs_set(cs__, c); }
+ void add_range(int begin, int end) { pj_cs_add_range(cs__, begin, end); }
+ void add_alpha() { pj_cs_add_alpha(cs__); }
+ void add_num() { pj_cs_add_num(cs__); }
+ void add_str(const char *str) { pj_cs_add_str(cs__, str); }
+ void del_range(int begin, int end) { pj_cs_del_range(cs__, begin, end); }
+ void del_str(const char *str) { pj_cs_del_str(cs__, str); }
+ void invert() { pj_cs_invert(cs__); }
+ int match(int c) { return pj_cs_match(cs__, c); }
+
+ pj_char_spec_element_t *cs_()
+ {
+ return cs__;
+ }
+
+ const pj_char_spec_element_t *cs_() const
+ {
+ return cs__;
+ }
+
+private:
+ pj_char_spec cs__;
+};
+
+class PJ_Scanner
+{
+public:
+ PJ_Scanner() {}
+
+ enum
+ {
+ SYNTAX_ERROR = 101
+ };
+ static void syntax_error_handler_throw_pj(pj_scanner *);
+
+ typedef pj_scan_state State;
+
+ void init(char *buf, int len, unsigned options=PJ_SCAN_AUTOSKIP_WS,
+ pj_syn_err_func_ptr callback = &syntax_error_handler_throw_pj)
+ {
+ pj_scan_init(&scanner_, buf, len, options, callback);
+ }
+
+ void fini()
+ {
+ pj_scan_fini(&scanner_);
+ }
+
+ int eof() const
+ {
+ return pj_scan_is_eof(&scanner_);
+ }
+
+ int peek_char() const
+ {
+ return *scanner_.current;
+ }
+
+ int peek(const PJ_CharSpec *cs, PJ_String *out)
+ {
+ return pj_scan_peek(&scanner_, cs->cs_(), out);
+ }
+
+ int peek_n(pj_size_t len, PJ_String *out)
+ {
+ return pj_scan_peek_n(&scanner_, len, out);
+ }
+
+ int peek_until(const PJ_CharSpec *cs, PJ_String *out)
+ {
+ return pj_scan_peek_until(&scanner_, cs->cs_(), out);
+ }
+
+ void get(const PJ_CharSpec *cs, PJ_String *out)
+ {
+ pj_scan_get(&scanner_, cs->cs_(), out);
+ }
+
+ void get_n(unsigned N, PJ_String *out)
+ {
+ pj_scan_get_n(&scanner_, N, out);
+ }
+
+ int get_char()
+ {
+ return pj_scan_get_char(&scanner_);
+ }
+
+ void get_quote(int begin_quote, int end_quote, PJ_String *out)
+ {
+ pj_scan_get_quote(&scanner_, begin_quote, end_quote, out);
+ }
+
+ void get_newline()
+ {
+ pj_scan_get_newline(&scanner_);
+ }
+
+ void get_until(const PJ_CharSpec *cs, PJ_String *out)
+ {
+ pj_scan_get_until(&scanner_, cs->cs_(), out);
+ }
+
+ void get_until_ch(int until_ch, PJ_String *out)
+ {
+ pj_scan_get_until_ch(&scanner_, until_ch, out);
+ }
+
+ void get_until_chr(const char *spec, PJ_String *out)
+ {
+ pj_scan_get_until_chr(&scanner_, spec, out);
+ }
+
+ void advance_n(unsigned N, bool skip_ws=true)
+ {
+ pj_scan_advance_n(&scanner_, N, skip_ws);
+ }
+
+ int strcmp(const char *s, int len)
+ {
+ return pj_scan_strcmp(&scanner_, s, len);
+ }
+
+ int stricmp(const char *s, int len)
+ {
+ return pj_scan_stricmp(&scanner_, s, len);
+ }
+
+ void skip_ws()
+ {
+ pj_scan_skip_whitespace(&scanner_);
+ }
+
+ void save_state(State *state)
+ {
+ pj_scan_save_state(&scanner_, state);
+ }
+
+ void restore_state(State *state)
+ {
+ pj_scan_restore_state(&scanner_, state);
+ }
+
+ int get_pos_line() const
+ {
+ return scanner_.line;
+ }
+
+ int get_pos_col() const
+ {
+ return scanner_.col;
+ }
+
+
+private:
+ pj_scanner scanner_;
+};
+
+#endif /* __PJPP_SCANNER_H__ */
diff --git a/pjlib/src/pj++/sock.hpp b/pjlib/src/pj++/sock.hpp
new file mode 100644
index 00000000..aa62c158
--- /dev/null
+++ b/pjlib/src/pj++/sock.hpp
@@ -0,0 +1,194 @@
+/* $Header: /pjproject/pjlib/src/pj++/sock.hpp 2 2/24/05 11:23a Bennylp $ */
+#ifndef __PJPP_SOCK_H__
+#define __PJPP_SOCK_H__
+
+#include <pj/sock.h>
+
+class PJ_Addr
+{
+};
+
+class PJ_INET_Addr : public pj_sockaddr_in, public PJ_Addr
+{
+public:
+ pj_uint16_t get_port_number() const
+ {
+ return pj_sockaddr_get_port(this);
+ }
+
+ void set_port_number(pj_uint16_t port)
+ {
+ sin_family = PJ_AF_INET;
+ pj_sockaddr_set_port(this, port);
+ }
+
+ pj_uint32_t get_ip_address() const
+ {
+ return pj_sockaddr_get_addr(this);
+ }
+
+ const char *get_address() const
+ {
+ return pj_sockaddr_get_str_addr(this);
+ }
+
+ void set_ip_address(pj_uint32_t addr)
+ {
+ sin_family = PJ_AF_INET;
+ pj_sockaddr_set_addr(this, addr);
+ }
+
+ pj_status_t set_address(const pj_str_t *addr)
+ {
+ return pj_sockaddr_set_str_addr(this, addr);
+ }
+
+ pj_status_t set_address(const char *addr)
+ {
+ return pj_sockaddr_set_str_addr2(this, addr);
+ }
+
+ int cmp(const PJ_INET_Addr &rhs) const
+ {
+ return pj_sockaddr_cmp(this, &rhs);
+ }
+
+ bool operator==(const PJ_INET_Addr &rhs) const
+ {
+ return cmp(rhs) == 0;
+ }
+};
+
+class PJ_Socket
+{
+public:
+ PJ_Socket() {}
+ PJ_Socket(const PJ_Socket &rhs) : sock_(rhs.sock_) {}
+
+ void set_handle(pj_sock_t sock)
+ {
+ sock_ = sock;
+ }
+
+ pj_sock_t get_handle() const
+ {
+ return sock_;
+ }
+
+ pj_sock_t& get_handle()
+ {
+ return sock_;
+ }
+
+ bool socket(int af, int type, int proto, pj_uint32_t flag=0)
+ {
+ sock_ = pj_sock_socket(af, type, proto, flag);
+ return sock_ != -1;
+ }
+
+ bool bind(const PJ_INET_Addr &addr)
+ {
+ return pj_sock_bind(sock_, &addr, sizeof(PJ_INET_Addr)) == 0;
+ }
+
+ bool close()
+ {
+ return pj_sock_close(sock_) == 0;
+ }
+
+ bool getpeername(PJ_INET_Addr *addr)
+ {
+ int namelen;
+ return pj_sock_getpeername(sock_, addr, &namelen) == 0;
+ }
+
+ bool getsockname(PJ_INET_Addr *addr)
+ {
+ int namelen;
+ return pj_sock_getsockname(sock_, addr, &namelen) == 0;
+ }
+
+ bool getsockopt(int level, int optname, void *optval, int *optlen)
+ {
+ return pj_sock_getsockopt(sock_, level, optname, optval, optlen) == 0;
+ }
+
+ bool setsockopt(int level, int optname, const void *optval, int optlen)
+ {
+ return pj_sock_setsockopt(sock_, level, optname, optval, optlen) == 0;
+ }
+
+ bool ioctl(long cmd, pj_uint32_t *val)
+ {
+ return pj_sock_ioctl(sock_, cmd, val) == 0;
+ }
+
+ int recv(void *buf, int len, int flag = 0)
+ {
+ return pj_sock_recv(sock_, buf, len, flag);
+ }
+
+ int send(const void *buf, int len, int flag = 0)
+ {
+ return pj_sock_send(sock_, buf, len, flag);
+ }
+
+protected:
+ pj_sock_t sock_;
+};
+
+#if PJ_HAS_TCP
+class PJ_Sock_Stream : public PJ_Socket
+{
+public:
+ PJ_Sock_Stream() {}
+ PJ_Sock_Stream(const PJ_Sock_Stream &rhs) : PJ_Socket(rhs) {}
+ PJ_Sock_Stream &operator=(const PJ_Sock_Stream &rhs) { sock_ = rhs.sock_; return *this; }
+
+ bool listen(int backlog = 5)
+ {
+ return pj_sock_listen(sock_, backlog) == 0;
+ }
+
+ bool accept(PJ_Sock_Stream *new_sock, PJ_INET_Addr *addr, int *addrlen)
+ {
+ pj_sock_t s = pj_sock_accept(sock_, addr, addrlen);
+ if (s == -1)
+ return false;
+ new_sock->set_handle(s);
+ return true;
+ }
+
+ bool connect(const PJ_INET_Addr &addr)
+ {
+ return pj_sock_connect(sock_, &addr, sizeof(PJ_INET_Addr)) == 0;
+ }
+
+ bool shutdown(int how)
+ {
+ return pj_sock_shutdown(sock_, how) == 0;
+ }
+
+};
+#endif
+
+class PJ_Sock_Dgram : public PJ_Socket
+{
+public:
+ PJ_Sock_Dgram() {}
+ PJ_Sock_Dgram(const PJ_Sock_Dgram &rhs) : PJ_Socket(rhs) {}
+ PJ_Sock_Dgram &operator=(const PJ_Sock_Dgram &rhs) { sock_ = rhs.sock_; return *this; }
+
+ int recvfrom(void *buf, int len, int flag, PJ_INET_Addr *fromaddr)
+ {
+ int addrlen;
+ return pj_sock_recvfrom(sock_, buf, len, flag, fromaddr, &addrlen);
+ }
+
+ int sendto(const void *buf, int len, int flag, const PJ_INET_Addr &addr)
+ {
+ return pj_sock_sendto(sock_, buf, len, flag, &addr, sizeof(PJ_INET_Addr));
+ }
+};
+
+#endif /* __PJPP_SOCK_H__ */
diff --git a/pjlib/src/pj++/string.hpp b/pjlib/src/pj++/string.hpp
new file mode 100644
index 00000000..8bbb680d
--- /dev/null
+++ b/pjlib/src/pj++/string.hpp
@@ -0,0 +1,247 @@
+/* $Header: /pjproject/pjlib/src/pj++/string.hpp 2 2/24/05 11:23a Bennylp $ */
+#ifndef __PJPP_STRING_H__
+#define __PJPP_STRING_H__
+
+#include <pj/string.h>
+#include <pj++/pool.hpp>
+
+class PJ_String : public pj_str_t
+{
+public:
+ PJ_String()
+ {
+ pj_assert(sizeof(PJ_String) == sizeof(pj_str_t));
+ ptr=NULL; slen=0;
+ }
+
+ explicit PJ_String(char *str)
+ {
+ set(str);
+ }
+
+ PJ_String(PJ_Pool *pool, const char *src)
+ {
+ set(pool, src);
+ }
+
+ explicit PJ_String(pj_str_t *s)
+ {
+ set(s);
+ }
+
+ PJ_String(PJ_Pool *pool, const pj_str_t *s)
+ {
+ set(pool, s);
+ }
+
+ explicit PJ_String(PJ_String &rhs)
+ {
+ set(rhs);
+ }
+
+ PJ_String(PJ_Pool *pool, const PJ_String &rhs)
+ {
+ set(pool, rhs);
+ }
+
+ PJ_String(char *str, pj_size_t len)
+ {
+ set(str, len);
+ }
+
+ PJ_String(char *begin, char *end)
+ {
+ pj_strset3(this, begin, end);
+ }
+
+ pj_size_t length() const
+ {
+ return pj_strlen(this);
+ }
+
+ pj_size_t size() const
+ {
+ return length();
+ }
+
+ const char *buf() const
+ {
+ return ptr;
+ }
+
+ void set(char *str)
+ {
+ pj_strset2(this, str);
+ }
+
+ void set(PJ_Pool *pool, const char *s)
+ {
+ pj_strdup2(pool->pool_(), this, s);
+ }
+
+ void set(pj_str_t *s)
+ {
+ pj_strassign(this, s);
+ }
+
+ void set(PJ_Pool *pool, const pj_str_t *s)
+ {
+ pj_strdup(pool->pool_(), this, s);
+ }
+
+ void set(char *str, pj_size_t len)
+ {
+ pj_strset(this, str, len);
+ }
+
+ void set(char *begin, char *end)
+ {
+ pj_strset3(this, begin, end);
+ }
+
+ void set(PJ_String &rhs)
+ {
+ pj_strassign(this, &rhs);
+ }
+
+ void set(PJ_Pool *pool, const PJ_String *s)
+ {
+ pj_strdup(pool->pool_(), this, s);
+ }
+
+ void set(PJ_Pool *pool, const PJ_String &s)
+ {
+ pj_strdup(pool->pool_(), this, &s);
+ }
+
+ void strcpy(const pj_str_t *s)
+ {
+ pj_strcpy(this, s);
+ }
+
+ void strcpy(const PJ_String &rhs)
+ {
+ pj_strcpy(this, &rhs);
+ }
+
+ void strcpy(const char *s)
+ {
+ pj_strcpy2(this, s);
+ }
+
+ int strcmp(const char *s) const
+ {
+ return pj_strcmp2(this, s);
+ }
+
+ int strcmp(const pj_str_t *s) const
+ {
+ return pj_strcmp(this, s);
+ }
+
+ int strcmp(const PJ_String &rhs) const
+ {
+ return pj_strcmp(this, &rhs);
+ }
+
+ int strncmp(const char *s, pj_size_t len) const
+ {
+ return pj_strncmp2(this, s, len);
+ }
+
+ int strncmp(const pj_str_t *s, pj_size_t len) const
+ {
+ return pj_strncmp(this, s, len);
+ }
+
+ int strncmp(const PJ_String &rhs, pj_size_t len) const
+ {
+ return pj_strncmp(this, &rhs, len);
+ }
+
+ int stricmp(const char *s) const
+ {
+ return pj_stricmp2(this, s);
+ }
+
+ int stricmp(const pj_str_t *s) const
+ {
+ return pj_stricmp(this, s);
+ }
+
+ int stricmp(const PJ_String &rhs) const
+ {
+ return stricmp(&rhs);
+ }
+
+ int strnicmp(const char *s, pj_size_t len) const
+ {
+ return pj_strnicmp2(this, s, len);
+ }
+
+ int strnicmp(const pj_str_t *s, pj_size_t len) const
+ {
+ return pj_strnicmp(this, s, len);
+ }
+
+ int strnicmp(const PJ_String &rhs, pj_size_t len) const
+ {
+ return strnicmp(&rhs, len);
+ }
+
+ bool operator==(const char *s) const
+ {
+ return strcmp(s) == 0;
+ }
+
+ bool operator==(const pj_str_t *s) const
+ {
+ return strcmp(s) == 0;
+ }
+
+ bool operator==(const PJ_String &rhs) const
+ {
+ return pj_strcmp(this, &rhs) == 0;
+ }
+
+ char *strchr(int chr)
+ {
+ return pj_strchr(this, chr);
+ }
+
+ char *find(int chr)
+ {
+ return strchr(chr);
+ }
+
+ void strcat(const PJ_String &rhs)
+ {
+ pj_strcat(this, &rhs);
+ }
+
+ void ltrim()
+ {
+ pj_strltrim(this);
+ }
+
+ void rtrim()
+ {
+ pj_strrtrim(this);
+ }
+
+ void trim()
+ {
+ pj_strtrim(this);
+ }
+
+ unsigned long toul() const
+ {
+ return pj_strtoul(this);
+ }
+
+private:
+ //PJ_String(const PJ_String &rhs) {}
+ void operator=(const PJ_String &rhs) { pj_assert(false); }
+};
+
+#endif /* __PJPP_STRING_H__ */
diff --git a/pjlib/src/pj++/timer.hpp b/pjlib/src/pj++/timer.hpp
new file mode 100644
index 00000000..ccca633a
--- /dev/null
+++ b/pjlib/src/pj++/timer.hpp
@@ -0,0 +1,105 @@
+/* $Header: /pjproject/pjlib/src/pj++/timer.hpp 4 8/24/05 10:29a Bennylp $ */
+#ifndef __PJPP_TIMER_H__
+#define __PJPP_TIMER_H__
+
+#include <pj/timer.h>
+#include <pj++/types.hpp>
+
+class PJ_Timer_Heap;
+
+class PJ_Timer_Entry : private pj_timer_entry
+{
+ friend class PJ_Timer_Heap;
+
+public:
+ static void timer_heap_callback(pj_timer_heap_t *, pj_timer_entry *);
+
+ PJ_Timer_Entry() { cb = &timer_heap_callback; }
+ PJ_Timer_Entry(int arg_id, void *arg_user_data)
+ {
+ cb = &timer_heap_callback;
+ init(arg_id, arg_user_data);
+ }
+
+ virtual void on_timeout() = 0;
+
+ void init(int arg_id, void *arg_user_data)
+ {
+ id = arg_id;
+ user_data = arg_user_data;
+ }
+
+ int get_id() const
+ {
+ return id;
+ }
+
+ void set_id(int arg_id)
+ {
+ id = arg_id;
+ }
+
+ void set_user_data(void *arg_user_data)
+ {
+ user_data = arg_user_data;
+ }
+
+ void *get_user_data() const
+ {
+ return user_data;
+ }
+
+ const PJ_Time_Val &get_timeout() const
+ {
+ pj_assert(sizeof(PJ_Time_Val) == sizeof(pj_time_val));
+ return (PJ_Time_Val&)_timer_value;
+ }
+};
+
+class PJ_Timer_Heap
+{
+public:
+ PJ_Timer_Heap() {}
+
+ bool create(PJ_Pool *pool, pj_size_t initial_count,
+ unsigned flag = PJ_TIMER_HEAP_SYNCHRONIZE)
+ {
+ ht_ = pj_timer_heap_create(pool->pool_(), initial_count, flag);
+ return ht_ != NULL;
+ }
+
+ pj_timer_heap_t *get_timer_heap()
+ {
+ return ht_;
+ }
+
+ bool schedule( PJ_Timer_Entry *ent, const PJ_Time_Val &delay)
+ {
+ return pj_timer_heap_schedule(ht_, ent, &delay) == 0;
+ }
+
+ bool cancel(PJ_Timer_Entry *ent)
+ {
+ return pj_timer_heap_cancel(ht_, ent) == 1;
+ }
+
+ pj_size_t count()
+ {
+ return pj_timer_heap_count(ht_);
+ }
+
+ void earliest_time(PJ_Time_Val *t)
+ {
+ pj_timer_heap_earliest_time(ht_, t);
+ }
+
+ int poll(PJ_Time_Val *next_delay = NULL)
+ {
+ return pj_timer_heap_poll(ht_, next_delay);
+ }
+
+private:
+ pj_timer_heap_t *ht_;
+};
+
+#endif /* __PJPP_TIMER_H__ */
diff --git a/pjlib/src/pj++/tree.hpp b/pjlib/src/pj++/tree.hpp
new file mode 100644
index 00000000..d2243e57
--- /dev/null
+++ b/pjlib/src/pj++/tree.hpp
@@ -0,0 +1,107 @@
+/* $Header: /pjproject/pjlib/src/pj++/tree.hpp 2 2/24/05 11:23a Bennylp $ */
+#ifndef __PJPP_TREE_H__
+#define __PJPP_TREE_H__
+
+#include <pj/rbtree.h>
+
+class PJ_Tree
+{
+public:
+ typedef pj_rbtree_comp Comp;
+ class iterator;
+ class reverse_iterator;
+
+ class Node : private pj_rbtree_node
+ {
+ friend class PJ_Tree;
+ friend class iterator;
+ friend class reverse_iterator;
+
+ public:
+ Node() {}
+ explicit Node(void *data) { user_data = data; }
+ void set_user_data(void *data) { user_data = data; }
+ void *get_user_data() const { return user_data; }
+ };
+
+ class iterator
+ {
+ public:
+ iterator() {}
+ iterator(const iterator &rhs) : tr_(rhs.tr_), nd_(rhs.nd_) {}
+ iterator(pj_rbtree *tr, pj_rbtree_node *nd) : tr_(tr), nd_(nd) {}
+ Node *operator*() { return (Node*)nd_; }
+ bool operator==(const iterator &rhs) const { return tr_==rhs.tr_ && nd_==rhs.nd_; }
+ iterator &operator=(const iterator &rhs) { tr_=rhs.tr_; nd_=rhs.nd_; return *this; }
+ void operator++() { nd_=pj_rbtree_next(tr_, nd_); }
+ void operator--() { nd_=pj_rbtree_prev(tr_, nd_); }
+ protected:
+ pj_rbtree *tr_;
+ pj_rbtree_node *nd_;
+ };
+
+ class reverse_iterator : public iterator
+ {
+ public:
+ reverse_iterator() {}
+ reverse_iterator(const reverse_iterator &it) : iterator(it) {}
+ reverse_iterator(pj_rbtree *t, pj_rbtree_node *n) : iterator(t, n) {}
+ reverse_iterator &operator=(const reverse_iterator &rhs) { iterator::operator=(rhs); return *this; }
+ Node *operator*() { return (Node*)nd_; }
+ bool operator==(const reverse_iterator &rhs) const { return iterator::operator==(rhs); }
+ void operator++() { nd_=pj_rbtree_prev(tr_, nd_); }
+ void operator--() { nd_=pj_rbtree_next(tr_, nd_); }
+ };
+
+ explicit PJ_Tree(Comp *comp) { pj_rbtree_init(&t_, comp); }
+
+ iterator begin()
+ {
+ return iterator(&t_, pj_rbtree_first(&t_));
+ }
+
+ iterator end()
+ {
+ return iterator(&t_, NULL);
+ }
+
+ reverse_iterator rbegin()
+ {
+ return reverse_iterator(&t_, pj_rbtree_last(&t_));
+ }
+
+ reverse_iterator rend()
+ {
+ return reverse_iterator(&t_, NULL);
+ }
+
+ bool insert(Node *node)
+ {
+ return pj_rbtree_insert(&t_, node)==0 ? true : false;
+ }
+
+ Node *find(const void *key)
+ {
+ return (Node*)pj_rbtree_find(&t_, key);
+ }
+
+ Node *erase(Node *node)
+ {
+ return (Node*)pj_rbtree_erase(&t_, node);
+ }
+
+ unsigned max_height(Node *node=NULL)
+ {
+ return pj_rbtree_max_height(&t_, node);
+ }
+
+ unsigned min_height(Node *node=NULL)
+ {
+ return pj_rbtree_min_height(&t_, node);
+ }
+
+private:
+ pj_rbtree t_;
+};
+
+#endif /* __PJPP_TREE_H__ */
diff --git a/pjlib/src/pj++/types.hpp b/pjlib/src/pj++/types.hpp
new file mode 100644
index 00000000..888c8456
--- /dev/null
+++ b/pjlib/src/pj++/types.hpp
@@ -0,0 +1,59 @@
+/* $Header: /pjproject/pjlib/src/pj++/types.hpp 3 4/17/05 11:59a Bennylp $ */
+#ifndef __PJPP_TYPES_H__
+#define __PJPP_TYPES_H__
+
+#include <pj/types.h>
+
+class PJ_Pool;
+class PJ_Socket;
+
+
+class PJ_Time_Val : public pj_time_val
+{
+public:
+ PJ_Time_Val() {}
+ PJ_Time_Val(const PJ_Time_Val &rhs) { sec=rhs.sec; msec=rhs.msec; }
+ explicit PJ_Time_Val(const pj_time_val &tv) { sec = tv.sec; msec = tv.msec; }
+
+ long get_sec() const { return sec; }
+ long get_msec() const { return msec; }
+ void set_sec (long s) { sec = s; }
+ void set_msec(long ms) { msec = ms; normalize(); }
+ long to_msec() const { return PJ_TIME_VAL_MSEC((*this)); }
+
+ bool operator == (const PJ_Time_Val &rhs) const { return PJ_TIME_VAL_EQ((*this), rhs); }
+ bool operator > (const PJ_Time_Val &rhs) const { return PJ_TIME_VAL_GT((*this), rhs); }
+ bool operator >= (const PJ_Time_Val &rhs) const { return PJ_TIME_VAL_GTE((*this), rhs); }
+ bool operator < (const PJ_Time_Val &rhs) const { return PJ_TIME_VAL_LT((*this), rhs); }
+ bool operator <= (const PJ_Time_Val &rhs) const { return PJ_TIME_VAL_LTE((*this), rhs); }
+
+ PJ_Time_Val & operator = (const PJ_Time_Val &rhs) {
+ sec = rhs.sec;
+ msec = rhs.msec;
+ return *this;
+ }
+
+ PJ_Time_Val & operator += (const PJ_Time_Val &rhs) {
+ PJ_TIME_VAL_ADD((*this), rhs);
+ return *this;
+ }
+
+ PJ_Time_Val & operator -= (const PJ_Time_Val &rhs) {
+ PJ_TIME_VAL_SUB((*this), rhs);
+ return *this;
+ }
+
+ /* Must include os.hpp to use these, otherwise unresolved in linking */
+ pj_status_t gettimeofday();
+ pj_parsed_time decode();
+ pj_status_t encode(const pj_parsed_time *pt);
+ pj_status_t to_gmt();
+ pj_status_t to_local();
+
+
+private:
+ void normalize() { pj_time_val_normalize(this); }
+
+};
+
+#endif /* __PJPP_TYPES_H__ */
diff --git a/pjlib/src/pj/addr_resolv_linux_kernel.c b/pjlib/src/pj/addr_resolv_linux_kernel.c
new file mode 100644
index 00000000..7c085c60
--- /dev/null
+++ b/pjlib/src/pj/addr_resolv_linux_kernel.c
@@ -0,0 +1,14 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/addr_resolv_linux_kernel.c 1 10/05/05 4:41p Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/addr_resolv_linux_kernel.c $
+ *
+ * 1 10/05/05 4:41p Bennylp
+ * Created.
+ *
+ */
+#include <pj/addr_resolv.h>
+
+PJ_DEF(pj_status_t) pj_gethostbyname(const pj_str_t *hostname, pj_hostent *phe)
+{
+ return -1;
+}
+
diff --git a/pjlib/src/pj/addr_resolv_sock.c b/pjlib/src/pj/addr_resolv_sock.c
new file mode 100644
index 00000000..0200c65a
--- /dev/null
+++ b/pjlib/src/pj/addr_resolv_sock.c
@@ -0,0 +1,44 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/addr_resolv_sock.c 2 10/14/05 12:26a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/addr_resolv_sock.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 9/22/05 10:38a Bennylp
+ * Created.
+ *
+ */
+#include <pj/addr_resolv.h>
+#include <pj/assert.h>
+#include <pj/string.h>
+#include <pj/compat/socket.h>
+#include <pj/errno.h>
+
+
+PJ_DEF(pj_status_t) pj_gethostbyname(const pj_str_t *hostname, pj_hostent *phe)
+{
+ struct hostent *he;
+ char copy[PJ_MAX_HOSTNAME];
+
+ pj_assert(hostname && hostname ->slen < PJ_MAX_HOSTNAME);
+
+ if (hostname->slen >= PJ_MAX_HOSTNAME)
+ return PJ_ENAMETOOLONG;
+
+ pj_memcpy(copy, hostname->ptr, hostname->slen);
+ copy[ hostname->slen ] = '\0';
+
+ he = gethostbyname(copy);
+ if (!he)
+ return PJ_RETURN_OS_ERROR(pj_get_native_netos_error());
+
+ phe->h_name = he->h_name;
+ phe->h_aliases = he->h_aliases;
+ phe->h_addrtype = he->h_addrtype;
+ phe->h_length = he->h_length;
+ phe->h_addr_list = he->h_addr_list;
+
+ return PJ_SUCCESS;
+}
+
diff --git a/pjlib/src/pj/array.c b/pjlib/src/pj/array.c
new file mode 100644
index 00000000..edb4994d
--- /dev/null
+++ b/pjlib/src/pj/array.c
@@ -0,0 +1,63 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/array.c 5 10/14/05 12:26a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/array.c $
+ *
+ * 5 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 4 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/array.h>
+#include <pj/string.h>
+#include <pj/assert.h>
+#include <pj/errno.h>
+
+PJ_DEF(void) pj_array_insert( void *array,
+ unsigned elem_size,
+ unsigned count,
+ unsigned pos,
+ const void *value)
+{
+ if (count && pos < count-1) {
+ pj_memmove( (char*)array + (pos+1)*elem_size,
+ (char*)array + pos*elem_size,
+ (count-pos)*elem_size);
+ }
+ pj_memmove((char*)array + pos*elem_size, value, elem_size);
+}
+
+PJ_DEF(void) pj_array_erase( void *array,
+ unsigned elem_size,
+ unsigned count,
+ unsigned pos)
+{
+ pj_assert(count != 0);
+ if (pos < count-1) {
+ pj_memmove( (char*)array + pos*elem_size,
+ (char*)array + (pos+1)*elem_size,
+ (count-pos-1)*elem_size);
+ }
+}
+
+PJ_DEF(pj_status_t) pj_array_find( const void *array,
+ unsigned elem_size,
+ unsigned count,
+ pj_status_t (*matching)(const void *value),
+ void **result)
+{
+ unsigned i;
+ const char *char_array = array;
+ for (i=0; i<count; ++i) {
+ if ( (*matching)(char_array) == PJ_SUCCESS) {
+ if (result) {
+ *result = (void*)char_array;
+ }
+ return PJ_SUCCESS;
+ }
+ char_array += elem_size;
+ }
+ return PJ_ENOTFOUND;
+}
+
diff --git a/pjlib/src/pj/compat/longjmp_i386.S b/pjlib/src/pj/compat/longjmp_i386.S
new file mode 100644
index 00000000..0788f44a
--- /dev/null
+++ b/pjlib/src/pj/compat/longjmp_i386.S
@@ -0,0 +1,42 @@
+/* longjmp for i386.
+ Copyright (C) 1995, 1996, 1997, 1998 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#define _ASM
+#define _SETJMP_H
+#define PJ_LINUX_KERNEL 1
+#include <pj/compat/setjmp.h>
+
+.global __longjmp
+.type __longjmp,%function
+.align 4
+__longjmp:
+ movl 4(%esp), %ecx /* User's jmp_buf in %ecx. */
+ movl 8(%esp), %eax /* Second argument is return value. */
+ /* Save the return address now. */
+ movl (JB_PC*4)(%ecx), %edx
+ /* Restore registers. */
+ movl (JB_BX*4)(%ecx), %ebx
+ movl (JB_SI*4)(%ecx), %esi
+ movl (JB_DI*4)(%ecx), %edi
+ movl (JB_BP*4)(%ecx), %ebp
+ movl (JB_SP*4)(%ecx), %esp
+ /* Jump to saved PC. */
+ jmp *%edx
+.size __longjmp,.-__longjmp
+
diff --git a/pjlib/src/pj/compat/setjmp_i386.S b/pjlib/src/pj/compat/setjmp_i386.S
new file mode 100644
index 00000000..6810c554
--- /dev/null
+++ b/pjlib/src/pj/compat/setjmp_i386.S
@@ -0,0 +1,61 @@
+/* setjmp for i386, ELF version.
+ Copyright (C) 1995, 1996, 1997, 2000, 2001 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#define _ASM
+#define _SETJMP_H
+#define PJ_LINUX_KERNEL 1
+#include <pj/compat/setjmp.h>
+
+
+.global __sigsetjmp
+.type __sigsetjmp,%function
+.align 4
+
+__sigsetjmp:
+ movl 4 (%esp), %eax
+ /* Save registers. */
+ movl %ebx, (0 *4)(%eax)
+ movl %esi, (1 *4)(%eax)
+ movl %edi, (2 *4)(%eax)
+ /* Save SP as it will be after we return. */
+ leal 4(%esp), %ecx
+ movl %ecx, (4 *4)(%eax)
+ /* Save PC we are returning to now. */
+ movl 0(%esp), %ecx
+ movl %ecx, (5 *4)(%eax)
+ /* Save caller's frame pointer. */
+ movl %ebp, (3 *4)(%eax)
+
+ /* Make a tail call to __sigjmp_save; it takes the same args. */
+#ifdef __PIC__
+ /* We cannot use the PLT, because it requires that %ebx be set, but
+ we can't save and restore our caller's value. Instead, we do an
+ indirect jump through the GOT, using for the temporary register
+ %ecx, which is call-clobbered. */
+ call .Lhere
+.Lhere:
+ popl %ecx
+ addl $_GLOBAL_OFFSET_TABLE_+[.- .Lhere ], %ecx
+ movl __sigjmp_save @GOT (%ecx), %ecx
+ jmp *%ecx
+#else
+ jmp __sigjmp_save
+#endif
+.size __sigsetjmp,.-__sigsetjmp
+
diff --git a/pjlib/src/pj/compat/sigjmp.c b/pjlib/src/pj/compat/sigjmp.c
new file mode 100644
index 00000000..ead0e363
--- /dev/null
+++ b/pjlib/src/pj/compat/sigjmp.c
@@ -0,0 +1,21 @@
+#include <pj/config.h>
+#include <pj/compat/setjmp.h>
+
+int __sigjmp_save(sigjmp_buf env, int savemask)
+{
+ return 0;
+}
+
+extern int __sigsetjmp(pj_jmp_buf env, int savemask);
+extern void __longjmp(pj_jmp_buf env, int val) __attribute__((noreturn));
+
+PJ_DEF(int) pj_setjmp(pj_jmp_buf env)
+{
+ return __sigsetjmp(env, 0);
+}
+
+PJ_DEF(void) pj_longjmp(pj_jmp_buf env, int val)
+{
+ __longjmp(env, val);
+}
+
diff --git a/pjlib/src/pj/compat/string.c b/pjlib/src/pj/compat/string.c
new file mode 100644
index 00000000..25fd11c9
--- /dev/null
+++ b/pjlib/src/pj/compat/string.c
@@ -0,0 +1,33 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/compat/string.c 1 9/22/05 10:43a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/compat/string.c $
+ *
+ * 1 9/22/05 10:43a Bennylp
+ * Created.
+ *
+ */
+#include <pj/types.h>
+#include <pj/compat/string.h>
+#include <pj/ctype.h>
+
+PJ_DEF(int) strcasecmp(const char *s1, const char *s2)
+{
+ while ((*s1==*s2) || (pj_tolower(*s1)==pj_tolower(*s2))) {
+ if (!*s1++)
+ return 0;
+ ++s2;
+ }
+ return (pj_tolower(*s1) < pj_tolower(*s2)) ? -1 : 1;
+}
+
+PJ_DEF(int) strncasecmp(const char *s1, const char *s2, int len)
+{
+ if (!len) return 0;
+
+ while ((*s1==*s2) || (pj_tolower(*s1)==pj_tolower(*s2))) {
+ if (!*s1++ || --len <= 0)
+ return 0;
+ ++s2;
+ }
+ return (pj_tolower(*s1) < pj_tolower(*s2)) ? -1 : 1;
+}
+
diff --git a/pjlib/src/pj/config.c b/pjlib/src/pj/config.c
new file mode 100644
index 00000000..5a08c8dd
--- /dev/null
+++ b/pjlib/src/pj/config.c
@@ -0,0 +1,40 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/config.c 7 10/14/05 12:26a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/config.c $
+ *
+ * 7 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 6 9/21/05 1:39p Bennylp
+ * Periodic checkin for backup.
+ *
+ * 5 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/config.h>
+#include <pj/log.h>
+
+static const char *id = "config.c";
+const char *PJ_VERSION = "0.3.0-pre1";
+
+PJ_DEF(void) pj_dump_config(void)
+{
+ PJ_LOG(3, (id, "PJLIB (c)2005 Benny Prijono"));
+ PJ_LOG(3, (id, "Dumping configurations:"));
+ PJ_LOG(3, (id, " PJ_VERSION : %s", PJ_VERSION));
+ PJ_LOG(3, (id, " PJ_DEBUG : %d", PJ_DEBUG));
+ PJ_LOG(3, (id, " PJ_FUNCTIONS_ARE_INLINED : %d", PJ_FUNCTIONS_ARE_INLINED));
+ PJ_LOG(3, (id, " PJ_POOL_DEBUG : %d", PJ_POOL_DEBUG));
+ PJ_LOG(3, (id, " PJ_HAS_THREADS : %d", PJ_HAS_THREADS));
+ PJ_LOG(3, (id, " PJ_LOG_MAX_LEVEL : %d", PJ_LOG_MAX_LEVEL));
+ PJ_LOG(3, (id, " PJ_LOG_MAX_SIZE : %d", PJ_LOG_MAX_SIZE));
+ PJ_LOG(3, (id, " PJ_LOG_USE_STACK_BUFFER : %d", PJ_LOG_USE_STACK_BUFFER));
+ PJ_LOG(3, (id, " PJ_HAS_TCP : %d", PJ_HAS_TCP));
+ PJ_LOG(3, (id, " PJ_MAX_HOSTNAME : %d", PJ_MAX_HOSTNAME));
+ PJ_LOG(3, (id, " PJ_HAS_SEMAPHORE : %d", PJ_HAS_SEMAPHORE));
+ PJ_LOG(3, (id, " PJ_HAS_EVENT_OBJ : %d", PJ_HAS_EVENT_OBJ));
+ PJ_LOG(3, (id, " PJ_HAS_HIGH_RES_TIMER : %d", PJ_HAS_HIGH_RES_TIMER));
+ PJ_LOG(3, (id, " PJ_(endianness) : %s", (PJ_IS_BIG_ENDIAN?"big-endian":"little-endian")));
+ PJ_LOG(3, (id, " PJ_IOQUEUE_MAX_HANDLES : %d", PJ_IOQUEUE_MAX_HANDLES));
+}
diff --git a/pjlib/src/pj/equeue_winnt.c b/pjlib/src/pj/equeue_winnt.c
new file mode 100644
index 00000000..b1ed4508
--- /dev/null
+++ b/pjlib/src/pj/equeue_winnt.c
@@ -0,0 +1,13 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/equeue_winnt.c 2 10/14/05 12:26a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pj/equeue_winnt.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/05/05 6:19p Bennylp
+ * Created.
+ *
+ */
+#include <pj/equeue.h>
diff --git a/pjlib/src/pj/errno.c b/pjlib/src/pj/errno.c
new file mode 100644
index 00000000..218c789f
--- /dev/null
+++ b/pjlib/src/pj/errno.c
@@ -0,0 +1,107 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/errno.c 2 10/14/05 12:26a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pj/errno.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/08/05 9:53a Bennylp
+ * Created.
+ *
+ */
+#include <pj/errno.h>
+#include <pj/string.h>
+#include <pj/compat/sprintf.h>
+
+/* Prototype for platform specific error message, which will be defined
+ * in separate file.
+ */
+extern int platform_strerror( pj_os_err_type code,
+ char *buf, pj_size_t bufsize );
+
+/* PJLIB's own error codes/messages */
+static const struct
+{
+ int code;
+ const char *msg;
+} err_str[] =
+{
+ { PJ_EUNKNOWN, "Unknown Error" },
+ { PJ_EPENDING, "Pending operation" },
+ { PJ_ETOOMANYCONN, "Too many connecting sockets" },
+ { PJ_EINVAL, "Invalid value or argument" },
+ { PJ_ENAMETOOLONG, "Name too long" },
+ { PJ_ENOTFOUND, "Not found" },
+ { PJ_ENOMEM, "Not enough memory" },
+ { PJ_EBUG, "BUG DETECTED!" },
+ { PJ_ETIMEDOUT, "Operation timed out" },
+ { PJ_ETOOMANY, "Too many objects of the specified type"},
+ { PJ_EBUSY, "Object is busy"},
+ { PJ_ENOTSUP, "Option/operation is not supported"},
+ { PJ_EINVALIDOP, "Invalid operation"}
+};
+
+/*
+ * pjlib_error()
+ *
+ * Retrieve message string for PJLIB's own error code.
+ */
+static int pjlib_error(pj_status_t code, char *buf, pj_size_t size)
+{
+ unsigned i;
+
+ for (i=0; i<sizeof(err_str)/sizeof(err_str[0]); ++i) {
+ if (err_str[i].code == code) {
+ pj_size_t len = strlen(err_str[i].msg);
+ if (len >= size) len = size-1;
+ pj_memcpy(buf, err_str[i].msg, len);
+ buf[len] = '\0';
+ return len;
+ }
+ }
+
+ *buf++ = '?';
+ *buf++ = '?';
+ *buf++ = '?';
+ *buf++ = '\0';
+ return 3;
+}
+
+/*
+ * pj_strerror()
+ */
+PJ_DEF(pj_str_t) pj_strerror( pj_status_t statcode,
+ char *buf, pj_size_t bufsize )
+{
+ int len = -1;
+ pj_str_t errstr;
+
+ if (statcode < PJ_ERRNO_START + PJ_ERRNO_SPACE_SIZE) {
+ len = pj_snprintf( buf, bufsize, "Unknown error %d", statcode);
+
+ } else if (statcode < PJ_ERRNO_START_STATUS + PJ_ERRNO_SPACE_SIZE) {
+ len = pjlib_error(statcode, buf, bufsize);
+
+ } else if (statcode < PJ_ERRNO_START_SYS + PJ_ERRNO_SPACE_SIZE) {
+ len = platform_strerror(PJ_STATUS_TO_OS(statcode), buf, bufsize);
+
+ } else if (statcode < PJ_ERRNO_START_USER + PJ_ERRNO_SPACE_SIZE) {
+ len = pj_snprintf( buf, bufsize, "User error %d", statcode);
+
+ } else {
+ len = pj_snprintf( buf, bufsize, "Invalid error %d", statcode);
+
+ }
+
+ if (len < 1) {
+ *buf = '\0';
+ len = 0;
+ }
+
+ errstr.ptr = buf;
+ errstr.slen = len;
+
+ return errstr;
+}
+
diff --git a/pjlib/src/pj/except.c b/pjlib/src/pj/except.c
new file mode 100644
index 00000000..0525caee
--- /dev/null
+++ b/pjlib/src/pj/except.c
@@ -0,0 +1,148 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/except.c 6 10/14/05 12:26a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pj/except.c $
+ *
+ * 6 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 5 9/21/05 1:39p Bennylp
+ * Periodic checkin for backup.
+ *
+ * 4 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/except.h>
+#include <pj/os.h>
+#include <pj/assert.h>
+#include <pj/log.h>
+#include <pj/errno.h>
+
+static long thread_local_id = -1;
+
+#if defined(PJ_HAS_EXCEPTION_NAMES) && PJ_HAS_EXCEPTION_NAMES != 0
+ static const char *exception_id_names[PJ_MAX_EXCEPTION_ID];
+#else
+ /*
+ * Start from 1 (not 0)!!!
+ * Exception 0 is reserved for normal path of setjmp()!!!
+ */
+ static int last_exception_id = 1;
+#endif /* PJ_HAS_EXCEPTION_NAMES */
+
+
+PJ_DEF(void) pj_throw_exception_(int exception_id)
+{
+ struct pj_exception_state_t *handler;
+
+ handler = pj_thread_local_get(thread_local_id);
+ if (handler == NULL) {
+ PJ_LOG(1,("except.c", "!!!FATAL: unhandled exception %d!\n", exception_id));
+ pj_assert(handler != NULL);
+ /* This will crash the system! */
+ }
+ pj_longjmp(handler->state, exception_id);
+}
+
+PJ_DEF(void) pj_push_exception_handler_(struct pj_exception_state_t *rec)
+{
+ struct pj_exception_state_t *parent_handler = NULL;
+
+ if (thread_local_id == -1) {
+ pj_thread_local_alloc(&thread_local_id);
+ pj_assert(thread_local_id != -1);
+ }
+ parent_handler = pj_thread_local_get(thread_local_id);
+ rec->prev = parent_handler;
+ pj_thread_local_set(thread_local_id, rec);
+}
+
+PJ_DEF(void) pj_pop_exception_handler_(void)
+{
+ struct pj_exception_state_t *handler;
+
+ handler = pj_thread_local_get(thread_local_id);
+ pj_assert(handler != NULL);
+ pj_thread_local_set(thread_local_id, handler->prev);
+}
+
+#if defined(PJ_HAS_EXCEPTION_NAMES) && PJ_HAS_EXCEPTION_NAMES != 0
+PJ_DEF(pj_status_t) pj_exception_id_alloc( const char *name,
+ pj_exception_id_t *id)
+{
+ unsigned i;
+
+ pj_enter_critical_section();
+
+ /*
+ * Start from 1 (not 0)!!!
+ * Exception 0 is reserved for normal path of setjmp()!!!
+ */
+ for (i=1; i<PJ_MAX_EXCEPTION_ID; ++i) {
+ if (exception_id_names[i] == NULL) {
+ exception_id_names[i] = name;
+ *id = i;
+ pj_leave_critical_section();
+ return PJ_SUCCESS;
+ }
+ }
+
+ pj_leave_critical_section();
+ return PJ_ETOOMANY;
+}
+
+PJ_DEF(pj_status_t) pj_exception_id_free( pj_exception_id_t id )
+{
+ /*
+ * Start from 1 (not 0)!!!
+ * Exception 0 is reserved for normal path of setjmp()!!!
+ */
+ PJ_ASSERT_RETURN(id>0 && id<PJ_MAX_EXCEPTION_ID, PJ_EINVAL);
+
+ pj_enter_critical_section();
+ exception_id_names[id] = NULL;
+ pj_leave_critical_section();
+
+ return PJ_SUCCESS;
+
+}
+
+PJ_DEF(const char*) pj_exception_id_name(pj_exception_id_t id)
+{
+ /*
+ * Start from 1 (not 0)!!!
+ * Exception 0 is reserved for normal path of setjmp()!!!
+ */
+ PJ_ASSERT_RETURN(id>0 && id<PJ_MAX_EXCEPTION_ID, "<Invalid ID>");
+
+ if (exception_id_names[id] == NULL)
+ return "<Unallocated ID>";
+
+ return exception_id_names[id];
+}
+
+#else /* PJ_HAS_EXCEPTION_NAMES */
+PJ_DEF(pj_status_t) pj_exception_id_alloc( const char *name,
+ pj_exception_id_t *id)
+{
+ PJ_ASSERT_RETURN(last_exception_id < PJ_MAX_EXCEPTION_ID-1, PJ_ETOOMANY);
+
+ *id = last_exception_id++
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_exception_id_free( pj_exception_id_t id )
+{
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(const char*) pj_exception_id_name(pj_exception_id_t id)
+{
+ return "";
+}
+
+#endif /* PJ_HAS_EXCEPTION_NAMES */
+
+
+
diff --git a/pjlib/src/pj/extra-exports.c b/pjlib/src/pj/extra-exports.c
new file mode 100644
index 00000000..4cc12e3f
--- /dev/null
+++ b/pjlib/src/pj/extra-exports.c
@@ -0,0 +1,38 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/extra-exports.c 1 10/29/05 11:56a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pj/extra-exports.c $
+ *
+ * 1 10/29/05 11:56a Bennylp
+ * Version 0.3-pre2
+ *
+ */
+
+/*
+ * This file contains code to export extra symbols from Linux kernel.
+ * It should be copied to Linux kernel source tree and added to
+ * Linux kernel combilation.
+ *
+ * This file is part of PJLIB project.
+ */
+#include <linux/module.h>
+#include <linux/syscalls.h>
+
+EXPORT_SYMBOL(sys_select);
+
+EXPORT_SYMBOL(sys_epoll_create);
+EXPORT_SYMBOL(sys_epoll_ctl);
+EXPORT_SYMBOL(sys_epoll_wait);
+
+EXPORT_SYMBOL(sys_socket);
+EXPORT_SYMBOL(sys_bind);
+EXPORT_SYMBOL(sys_getpeername);
+EXPORT_SYMBOL(sys_getsockname);
+EXPORT_SYMBOL(sys_sendto);
+EXPORT_SYMBOL(sys_recvfrom);
+EXPORT_SYMBOL(sys_getsockopt);
+EXPORT_SYMBOL(sys_setsockopt);
+EXPORT_SYMBOL(sys_listen);
+EXPORT_SYMBOL(sys_shutdown);
+EXPORT_SYMBOL(sys_connect);
+EXPORT_SYMBOL(sys_accept);
+
diff --git a/pjlib/src/pj/fifobuf.c b/pjlib/src/pj/fifobuf.c
new file mode 100644
index 00000000..d0b41a15
--- /dev/null
+++ b/pjlib/src/pj/fifobuf.c
@@ -0,0 +1,182 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/fifobuf.c 4 9/17/05 10:37a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/fifobuf.c $
+ *
+ * 4 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/fifobuf.h>
+#include <pj/log.h>
+#include <pj/assert.h>
+#include <pj/os.h>
+
+#define THIS_FILE "fifobuf"
+
+#define SZ sizeof(unsigned)
+
+PJ_DEF(void)
+pj_fifobuf_init (pj_fifobuf_t *fifobuf, void *buffer, unsigned size)
+{
+ PJ_CHECK_STACK();
+
+ PJ_LOG(6, (THIS_FILE,
+ "fifobuf_init fifobuf=%p buffer=%p, size=%d",
+ fifobuf, buffer, size));
+
+ fifobuf->first = buffer;
+ fifobuf->last = fifobuf->first + size;
+ fifobuf->ubegin = fifobuf->uend = fifobuf->first;
+ fifobuf->full = 0;
+}
+
+PJ_DEF(unsigned)
+pj_fifobuf_max_size (pj_fifobuf_t *fifobuf)
+{
+ unsigned s1, s2;
+
+ PJ_CHECK_STACK();
+
+ if (fifobuf->uend >= fifobuf->ubegin) {
+ s1 = fifobuf->last - fifobuf->uend;
+ s2 = fifobuf->ubegin - fifobuf->first;
+ } else {
+ s1 = s2 = fifobuf->ubegin - fifobuf->uend;
+ }
+
+ return s1<s2 ? s2 : s1;
+}
+
+PJ_DEF(void*)
+pj_fifobuf_alloc (pj_fifobuf_t *fifobuf, unsigned size)
+{
+ unsigned available;
+ char *start;
+
+ PJ_CHECK_STACK();
+
+ if (fifobuf->full) {
+ PJ_LOG(6, (THIS_FILE,
+ "fifobuf_alloc fifobuf=%p, size=%d: full!",
+ fifobuf, size));
+ return NULL;
+ }
+
+ /* try to allocate from the end part of the fifo */
+ if (fifobuf->uend >= fifobuf->ubegin) {
+ available = fifobuf->last - fifobuf->uend;
+ if (available >= size+SZ) {
+ char *ptr = fifobuf->uend;
+ fifobuf->uend += (size+SZ);
+ if (fifobuf->uend == fifobuf->last)
+ fifobuf->uend = fifobuf->first;
+ if (fifobuf->uend == fifobuf->ubegin)
+ fifobuf->full = 1;
+ *(unsigned*)ptr = size+SZ;
+ ptr += SZ;
+
+ PJ_LOG(6, (THIS_FILE,
+ "fifobuf_alloc fifobuf=%p, size=%d: returning %p, p1=%p, p2=%p",
+ fifobuf, size, ptr, fifobuf->ubegin, fifobuf->uend));
+ return ptr;
+ }
+ }
+
+ /* try to allocate from the start part of the fifo */
+ start = (fifobuf->uend <= fifobuf->ubegin) ? fifobuf->uend : fifobuf->first;
+ available = fifobuf->ubegin - start;
+ if (available >= size+SZ) {
+ char *ptr = start;
+ fifobuf->uend = start + size + SZ;
+ if (fifobuf->uend == fifobuf->ubegin)
+ fifobuf->full = 1;
+ *(unsigned*)ptr = size+SZ;
+ ptr += SZ;
+
+ PJ_LOG(6, (THIS_FILE,
+ "fifobuf_alloc fifobuf=%p, size=%d: returning %p, p1=%p, p2=%p",
+ fifobuf, size, ptr, fifobuf->ubegin, fifobuf->uend));
+ return ptr;
+ }
+
+ PJ_LOG(6, (THIS_FILE,
+ "fifobuf_alloc fifobuf=%p, size=%d: no space left! p1=%p, p2=%p",
+ fifobuf, size, fifobuf->ubegin, fifobuf->uend));
+ return NULL;
+}
+
+PJ_DEF(pj_status_t)
+pj_fifobuf_unalloc (pj_fifobuf_t *fifobuf, void *buf)
+{
+ char *ptr = buf;
+ char *endptr;
+ unsigned sz;
+
+ PJ_CHECK_STACK();
+
+ ptr -= SZ;
+ sz = *(unsigned*)ptr;
+
+ endptr = fifobuf->uend;
+ if (endptr == fifobuf->first)
+ endptr = fifobuf->last;
+
+ if (ptr+sz != endptr) {
+ pj_assert(!"Invalid pointer to undo alloc");
+ return -1;
+ }
+
+ fifobuf->uend = ptr;
+ fifobuf->full = 0;
+
+ PJ_LOG(6, (THIS_FILE,
+ "fifobuf_unalloc fifobuf=%p, ptr=%p, size=%d, p1=%p, p2=%p",
+ fifobuf, buf, sz, fifobuf->ubegin, fifobuf->uend));
+
+ return 0;
+}
+
+PJ_DEF(pj_status_t)
+pj_fifobuf_free (pj_fifobuf_t *fifobuf, void *buf)
+{
+ char *ptr = buf;
+ char *end;
+ unsigned sz;
+
+ PJ_CHECK_STACK();
+
+ ptr -= SZ;
+ if (ptr < fifobuf->first || ptr >= fifobuf->last) {
+ pj_assert(!"Invalid pointer to free");
+ return -1;
+ }
+
+ if (ptr != fifobuf->ubegin && ptr != fifobuf->first) {
+ pj_assert(!"Invalid free() sequence!");
+ return -1;
+ }
+
+ end = (fifobuf->uend > fifobuf->ubegin) ? fifobuf->uend : fifobuf->last;
+ sz = *(unsigned*)ptr;
+ if (ptr+sz > end) {
+ pj_assert(!"Invalid size!");
+ return -1;
+ }
+
+ fifobuf->ubegin = ptr + sz;
+
+ /* Rollover */
+ if (fifobuf->ubegin == fifobuf->last)
+ fifobuf->ubegin = fifobuf->first;
+
+ /* Reset if fifobuf is empty */
+ if (fifobuf->ubegin == fifobuf->uend)
+ fifobuf->ubegin = fifobuf->uend = fifobuf->first;
+
+ fifobuf->full = 0;
+
+ PJ_LOG(6, (THIS_FILE,
+ "fifobuf_free fifobuf=%p, ptr=%p, size=%d, p1=%p, p2=%p",
+ fifobuf, buf, sz, fifobuf->ubegin, fifobuf->uend));
+
+ return 0;
+}
diff --git a/pjlib/src/pj/guid.c b/pjlib/src/pj/guid.c
new file mode 100644
index 00000000..20cb72a7
--- /dev/null
+++ b/pjlib/src/pj/guid.c
@@ -0,0 +1,19 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/guid.c 12 10/14/05 12:26a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/guid.c $
+ *
+ * 12 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 11 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/guid.h>
+#include <pj/pool.h>
+
+PJ_DEF(void) pj_create_unique_string(pj_pool_t *pool, pj_str_t *str)
+{
+ str->ptr = pj_pool_alloc(pool, PJ_GUID_STRING_LENGTH);
+ pj_generate_unique_string(str);
+}
diff --git a/pjlib/src/pj/guid_simple.c b/pjlib/src/pj/guid_simple.c
new file mode 100644
index 00000000..b5cca310
--- /dev/null
+++ b/pjlib/src/pj/guid_simple.c
@@ -0,0 +1,60 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/guid_simple.c 3 10/14/05 12:26a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/guid_simple.c $
+ *
+ * 3 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 2 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/guid.h>
+#include <pj/os.h>
+#include <pj/rand.h>
+#include <pj/string.h>
+#include <pj/compat/sprintf.h>
+
+const unsigned PJ_GUID_STRING_LENGTH=20;
+
+static void init_mac_address(unsigned char mac_addr[16])
+{
+ unsigned long *ulval1 = (unsigned long*) &mac_addr[0];
+ unsigned short *usval1 = (unsigned short*) &mac_addr[4];
+
+ *ulval1 = pj_rand();
+ *usval1 = (unsigned short) pj_rand();
+}
+
+PJ_DEF(pj_str_t*) pj_generate_unique_string(pj_str_t *str)
+{
+ static int guid_initialized;
+ static unsigned pid;
+ static char str_pid[5];
+ static unsigned char mac_addr[6];
+ static char str_mac_addr[16];
+ static unsigned clock_seq;
+
+ PJ_CHECK_STACK();
+
+ if (guid_initialized == 0) {
+ pid = pj_getpid();
+ init_mac_address(mac_addr);
+ clock_seq = 0;
+
+ sprintf(str_pid, "%04x", pid);
+ sprintf(str_mac_addr, "%02x%02x%02x%02x%02x%02x",
+ mac_addr[0], mac_addr[1], mac_addr[2],
+ mac_addr[3], mac_addr[4], mac_addr[5]);
+
+ guid_initialized = 1;
+ }
+
+ strcpy(str->ptr, str_pid);
+ sprintf(str->ptr+4, "%04x", clock_seq++);
+ pj_memcpy(str->ptr+8, str_mac_addr, 12);
+ str->slen = 20;
+
+ return str;
+}
+
diff --git a/pjlib/src/pj/guid_win32.c b/pjlib/src/pj/guid_win32.c
new file mode 100644
index 00000000..8e3707ec
--- /dev/null
+++ b/pjlib/src/pj/guid_win32.c
@@ -0,0 +1,61 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/guid_win32.c 4 10/14/05 12:26a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/guid_win32.c $
+ *
+ * 4 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 3 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/guid.h>
+#include <pj/string.h>
+#include <pj/sock.h>
+#include <windows.h>
+#include <objbase.h>
+#include <pj/os.h>
+
+
+const unsigned PJ_GUID_STRING_LENGTH=32;
+
+PJ_INLINE(void) hex2digit(unsigned value, char *p)
+{
+ static char hex[] = {'0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
+ *p++ = hex[ (value & 0xF0) >> 4 ];
+ *p++ = hex[ (value & 0x0F) ];
+}
+
+static void guid_to_str( const GUID *guid, pj_str_t *str )
+{
+ unsigned i;
+ GUID guid_copy;
+ const unsigned char *src = (const unsigned char*)&guid_copy;
+ char *dst = str->ptr;
+
+ pj_memcpy(&guid_copy, guid, sizeof(*guid));
+ guid_copy.Data1 = pj_ntohl(guid_copy.Data1);
+ guid_copy.Data2 = pj_ntohs(guid_copy.Data2);
+ guid_copy.Data3 = pj_ntohs(guid_copy.Data3);
+
+ for (i=0; i<16; ++i) {
+ hex2digit( *src, dst );
+ dst += 2;
+ ++src;
+ }
+ str->slen = 32;
+}
+
+
+PJ_DEF(pj_str_t*) pj_generate_unique_string(pj_str_t *str)
+{
+ GUID guid;
+
+ PJ_CHECK_STACK();
+
+ CoCreateGuid(&guid);
+ guid_to_str( &guid, str );
+ return str;
+}
+
diff --git a/pjlib/src/pj/hash.c b/pjlib/src/pj/hash.c
new file mode 100644
index 00000000..01bfaa6b
--- /dev/null
+++ b/pjlib/src/pj/hash.c
@@ -0,0 +1,252 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/hash.c 8 10/14/05 12:26a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/hash.c $
+ *
+ * 8 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 7 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/hash.h>
+#include <pj/log.h>
+#include <pj/string.h>
+#include <pj/pool.h>
+#include <pj/os.h>
+
+/**
+ * The hash multiplier used to calculate hash value.
+ */
+#define PJ_HASH_MULTIPLIER 33
+
+
+struct pj_hash_entry
+{
+ struct pj_hash_entry *next;
+ const void *key;
+ pj_uint32_t hash;
+ pj_uint32_t keylen;
+ void *value;
+};
+
+
+struct pj_hash_table_t
+{
+ pj_hash_entry **table;
+ unsigned count, rows;
+ pj_hash_iterator_t iterator;
+};
+
+
+
+PJ_DEF(pj_uint32_t) pj_hash_calc(pj_uint32_t hash, const void *key, unsigned keylen)
+{
+ PJ_CHECK_STACK();
+
+ if (keylen==PJ_HASH_KEY_STRING) {
+ const unsigned char *p = key;
+ for ( ; *p; ++p ) {
+ hash = hash * PJ_HASH_MULTIPLIER + *p;
+ }
+ keylen = p - (const unsigned char*)key;
+ } else {
+ const unsigned char *p = key,
+ *end = p + keylen;
+ for ( ; p!=end; ++p) {
+ hash = hash * PJ_HASH_MULTIPLIER + *p;
+ }
+ }
+ return hash;
+}
+
+
+PJ_DEF(pj_hash_table_t*) pj_hash_create(pj_pool_t *pool, unsigned size)
+{
+ pj_hash_table_t *h;
+ unsigned table_size;
+
+ h = pj_pool_alloc(pool, sizeof(pj_hash_table_t));
+ h->count = 0;
+
+ PJ_LOG( 5, ("hashtbl", "hash table %p created from pool %s", h, pj_pool_getobjname(pool)));
+
+ /* size must be 2^n - 1.
+ round-up the size to this rule, except when size is 2^n, then size
+ will be round-down to 2^n-1.
+ */
+ table_size = 8;
+ do {
+ table_size <<= 1;
+ } while (table_size <= size);
+ table_size -= 1;
+
+ h->rows = table_size;
+ h->table = pj_pool_calloc(pool, table_size+1, sizeof(pj_hash_entry*));
+ return h;
+}
+
+static pj_hash_entry **find_entry( pj_pool_t *pool, pj_hash_table_t *ht,
+ const void *key, unsigned keylen,
+ void *val)
+{
+ pj_uint32_t hash;
+ pj_hash_entry **p_entry, *entry;
+
+ hash=0;
+ if (keylen==PJ_HASH_KEY_STRING) {
+ const unsigned char *p = key;
+ for ( ; *p; ++p ) {
+ hash = hash * PJ_HASH_MULTIPLIER + *p;
+ }
+ keylen = p - (const unsigned char*)key;
+ } else {
+ const unsigned char *p = key,
+ *end = p + keylen;
+ for ( ; p!=end; ++p) {
+ hash = hash * PJ_HASH_MULTIPLIER + *p;
+ }
+ }
+
+ /* scan the linked list */
+ for (p_entry = &ht->table[hash & ht->rows], entry=*p_entry;
+ entry;
+ p_entry = &entry->next, entry = *p_entry)
+ {
+ if (entry->hash==hash && entry->keylen==keylen &&
+ memcmp(entry->key, key, keylen)==0)
+ {
+ break;
+ }
+ }
+
+ if (entry || val==NULL)
+ return p_entry;
+
+ /* create a new entry */
+ entry = pj_pool_alloc(pool, sizeof(pj_hash_entry));
+ PJ_LOG(5, ("hashtbl", "%p: New p_entry %p created, pool used=%u, cap=%u", ht, entry,
+ pj_pool_get_used_size(pool), pj_pool_get_capacity(pool)));
+ entry->next = NULL;
+ entry->hash = hash;
+ entry->key = key;
+ entry->keylen = keylen;
+ entry->value = val;
+ *p_entry = entry;
+
+ ++ht->count;
+
+ return p_entry;
+}
+
+PJ_DEF(void *) pj_hash_get( pj_hash_table_t *ht,
+ const void *key, unsigned keylen )
+{
+ pj_hash_entry *entry;
+ entry = *find_entry( NULL, ht, key, keylen, NULL);
+ return entry ? entry->value : NULL;
+}
+
+PJ_DEF(void) pj_hash_set( pj_pool_t *pool, pj_hash_table_t *ht,
+ const void *key, unsigned keylen,
+ void *value )
+{
+ pj_hash_entry **p_entry;
+
+ p_entry = find_entry( pool, ht, key, keylen, value );
+ if (*p_entry) {
+ if (value == NULL) {
+ /* delete entry */
+ PJ_LOG(5, ("hashtbl", "%p: p_entry %p deleted", ht, *p_entry));
+ *p_entry = (*p_entry)->next;
+ --ht->count;
+
+ } else {
+ /* overwrite */
+ (*p_entry)->value = value;
+ PJ_LOG(5, ("hashtbl", "%p: p_entry %p value set to %p", ht, *p_entry, value));
+ }
+ }
+}
+
+PJ_DEF(unsigned) pj_hash_count( pj_hash_table_t *ht )
+{
+ return ht->count;
+}
+
+PJ_DEF(pj_hash_iterator_t*) pj_hash_first( pj_hash_table_t *ht,
+ pj_hash_iterator_t *it )
+{
+ it->index = 0;
+ it->entry = NULL;
+
+ for (; it->index < ht->rows; ++it->index) {
+ it->entry = ht->table[it->index];
+ if (it->entry) {
+ break;
+ }
+ }
+
+ return it->entry ? it : NULL;
+}
+
+PJ_DEF(pj_hash_iterator_t*) pj_hash_next( pj_hash_table_t *ht,
+ pj_hash_iterator_t *it )
+{
+ it->entry = it->entry->next;
+ if (it->entry) {
+ return it;
+ }
+
+ for (++it->index; it->index < ht->rows; ++it->index) {
+ it->entry = ht->table[it->index];
+ if (it->entry) {
+ break;
+ }
+ }
+
+ return it->entry ? it : NULL;
+}
+
+PJ_DEF(void*) pj_hash_this( pj_hash_table_t *ht, pj_hash_iterator_t *it )
+{
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(ht);
+ return it->entry->value;
+}
+
+#if 0
+void pj_hash_dump_collision( pj_hash_table_t *ht )
+{
+ unsigned min=0xFFFFFFFF, max=0;
+ unsigned i;
+ char line[120];
+ int len, totlen = 0;
+
+ for (i=0; i<ht->rows; ++i) {
+ unsigned count = 0;
+ pj_hash_entry *entry = ht->table[i];
+ while (entry) {
+ ++count;
+ entry = entry->next;
+ }
+ if (count < min)
+ min = count;
+ if (count > max)
+ max = count;
+ len = pj_snprintf( line+totlen, sizeof(line)-totlen, "%3d:%3d ", i, count);
+ if (len < 1)
+ break;
+ totlen += len;
+
+ if ((i+1) % 10 == 0) {
+ line[totlen] = '\0';
+ PJ_LOG(4,(__FILE__, line));
+ }
+ }
+
+ PJ_LOG(4,(__FILE__,"Count: %d, min: %d, max: %d\n", ht->count, min, max));
+}
+#endif
+
+
diff --git a/pjlib/src/pj/ioqueue_dummy.c b/pjlib/src/pj/ioqueue_dummy.c
new file mode 100644
index 00000000..63abc15b
--- /dev/null
+++ b/pjlib/src/pj/ioqueue_dummy.c
@@ -0,0 +1,186 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/ioqueue_dummy.c 2 10/29/05 11:31a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pj/ioqueue_dummy.c $
+ *
+ * 2 10/29/05 11:31a Bennylp
+ * Changed accept and lock.
+ *
+ * 1 10/23/05 12:53p Bennylp
+ * Created.
+ *
+ */
+#include <pj/ioqueue.h>
+#include <pj/os.h>
+#include <pj/log.h>
+#include <pj/list.h>
+#include <pj/pool.h>
+#include <pj/string.h>
+#include <pj/assert.h>
+#include <pj/sock.h>
+#include <pj/errno.h>
+
+#define THIS_FILE "ioqueue"
+
+#define PJ_IOQUEUE_IS_READ_OP(op) \
+ ((op & PJ_IOQUEUE_OP_READ) || (op & PJ_IOQUEUE_OP_RECV_FROM))
+#define PJ_IOQUEUE_IS_WRITE_OP(op) \
+ ((op & PJ_IOQUEUE_OP_WRITE) || (op & PJ_IOQUEUE_OP_SEND_TO))
+
+
+#if PJ_HAS_TCP
+# define PJ_IOQUEUE_IS_ACCEPT_OP(op) (op & PJ_IOQUEUE_OP_ACCEPT)
+# define PJ_IOQUEUE_IS_CONNECT_OP(op) (op & PJ_IOQUEUE_OP_CONNECT)
+#else
+# define PJ_IOQUEUE_IS_ACCEPT_OP(op) 0
+# define PJ_IOQUEUE_IS_CONNECT_OP(op) 0
+#endif
+
+#if defined(PJ_DEBUG) && PJ_DEBUG != 0
+# define VALIDATE_FD_SET 1
+#else
+# define VALIDATE_FD_SET 0
+#endif
+
+struct pj_ioqueue_key_t
+{
+ PJ_DECL_LIST_MEMBER(struct pj_ioqueue_key_t)
+ pj_sock_t fd;
+ pj_ioqueue_operation_e op;
+ void *user_data;
+ pj_ioqueue_callback cb;
+};
+
+struct pj_ioqueue_t
+{
+};
+
+PJ_DEF(pj_status_t) pj_ioqueue_create( pj_pool_t *pool,
+ pj_size_t max_fd,
+ int max_threads,
+ pj_ioqueue_t **ptr_ioqueue)
+{
+ return PJ_ENOTSUP;
+}
+
+PJ_DEF(pj_status_t) pj_ioqueue_destroy(pj_ioqueue_t *ioque)
+{
+ return PJ_ENOTSUP;
+}
+
+PJ_DEF(pj_status_t) pj_ioqueue_set_lock( pj_ioqueue_t *ioque,
+ pj_lock_t *lock,
+ pj_bool_t auto_delete )
+{
+ return PJ_ENOTSUP;
+}
+
+PJ_DEF(pj_status_t) pj_ioqueue_register_sock( pj_pool_t *pool,
+ pj_ioqueue_t *ioque,
+ pj_sock_t sock,
+ void *user_data,
+ const pj_ioqueue_callback *cb,
+ pj_ioqueue_key_t **ptr_key)
+{
+ return PJ_ENOTSUP;
+}
+
+PJ_DEF(pj_status_t) pj_ioqueue_unregister( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key)
+{
+ return PJ_ENOTSUP;
+}
+
+PJ_DEF(void*) pj_ioqueue_get_user_data( pj_ioqueue_key_t *key )
+{
+ return NULL;
+}
+
+
+PJ_DEF(int) pj_ioqueue_poll( pj_ioqueue_t *ioque, const pj_time_val *timeout)
+{
+ return -1;
+}
+
+PJ_DEF(pj_status_t) pj_ioqueue_read( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ void *buffer,
+ pj_size_t buflen)
+{
+ return -1;
+}
+
+PJ_DEF(pj_status_t) pj_ioqueue_recv( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ void *buffer,
+ pj_size_t buflen,
+ unsigned flags)
+{
+ return -1;
+}
+
+PJ_DEF(pj_status_t) pj_ioqueue_recvfrom( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ void *buffer,
+ pj_size_t buflen,
+ unsigned flags,
+ pj_sockaddr_t *addr,
+ int *addrlen)
+{
+ return -1;
+}
+
+PJ_DEF(pj_status_t) pj_ioqueue_write( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ const void *data,
+ pj_size_t datalen)
+{
+ return -1;
+}
+
+PJ_DEF(pj_status_t) pj_ioqueue_send( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ const void *data,
+ pj_size_t datalen,
+ unsigned flags)
+{
+ return -1;
+}
+
+PJ_DEF(pj_status_t) pj_ioqueue_sendto( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ const void *data,
+ pj_size_t datalen,
+ unsigned flags,
+ const pj_sockaddr_t *addr,
+ int addrlen)
+{
+ return -1;
+}
+
+#if PJ_HAS_TCP
+/*
+ * Initiate overlapped accept() operation.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_accept( pj_ioqueue_t *ioqueue,
+ pj_ioqueue_key_t *key,
+ pj_sock_t *new_sock,
+ pj_sockaddr_t *local,
+ pj_sockaddr_t *remote,
+ int *addrlen)
+{
+ return -1;
+}
+
+/*
+ * Initiate overlapped connect() operation (well, it's non-blocking actually,
+ * since there's no overlapped version of connect()).
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_connect( pj_ioqueue_t *ioqueue,
+ pj_ioqueue_key_t *key,
+ const pj_sockaddr_t *addr,
+ int addrlen )
+{
+ return -1;
+}
+#endif /* PJ_HAS_TCP */
+
diff --git a/pjlib/src/pj/ioqueue_epoll.c b/pjlib/src/pj/ioqueue_epoll.c
new file mode 100644
index 00000000..7bbfe135
--- /dev/null
+++ b/pjlib/src/pj/ioqueue_epoll.c
@@ -0,0 +1,852 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/ioqueue_epoll.c 4 10/29/05 10:27p Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pj/ioqueue_epoll.c $
+ *
+ * 4 10/29/05 10:27p Bennylp
+ * Fixed misc warnings.
+ *
+ * 3 10/29/05 11:49a Bennylp
+ * Fixed warnings.
+ *
+ * 2 10/29/05 11:31a Bennylp
+ * Changed accept and lock.
+ *
+ * 1 10/17/05 10:49p Bennylp
+ * Created.
+ *
+ */
+
+/*
+ * ioqueue_epoll.c
+ *
+ * This is the implementation of IOQueue framework using /dev/epoll
+ * API in _both_ Linux user-mode and kernel-mode.
+ */
+
+#include <pj/ioqueue.h>
+#include <pj/os.h>
+#include <pj/lock.h>
+#include <pj/log.h>
+#include <pj/list.h>
+#include <pj/pool.h>
+#include <pj/string.h>
+#include <pj/assert.h>
+#include <pj/errno.h>
+#include <pj/sock.h>
+#include <pj/compat/socket.h>
+
+#if !defined(PJ_LINUX_KERNEL) || PJ_LINUX_KERNEL==0
+ /*
+ * Linux user mode
+ */
+# include <sys/epoll.h>
+# include <errno.h>
+# include <unistd.h>
+
+# define epoll_data data.ptr
+# define epoll_data_type void*
+# define ioctl_val_type unsigned long*
+# define getsockopt_val_ptr int*
+# define os_getsockopt getsockopt
+# define os_ioctl ioctl
+# define os_read read
+# define os_close close
+# define os_epoll_create epoll_create
+# define os_epoll_ctl epoll_ctl
+# define os_epoll_wait epoll_wait
+#else
+ /*
+ * Linux kernel mode.
+ */
+# include <linux/config.h>
+# include <linux/version.h>
+# if defined(MODVERSIONS)
+# include <linux/modversions.h>
+# endif
+# include <linux/kernel.h>
+# include <linux/poll.h>
+# include <linux/eventpoll.h>
+# include <linux/syscalls.h>
+# include <linux/errno.h>
+# include <linux/unistd.h>
+# include <asm/ioctls.h>
+ enum EPOLL_EVENTS
+ {
+ EPOLLIN = 0x001,
+ EPOLLOUT = 0x004,
+ EPOLLERR = 0x008,
+ };
+# define os_epoll_create sys_epoll_create
+ static int os_epoll_ctl(int epfd, int op, int fd, struct epoll_event *event)
+ {
+ long rc;
+ mm_segment_t oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ rc = sys_epoll_ctl(epfd, op, fd, event);
+ set_fs(oldfs);
+ if (rc) {
+ errno = -rc;
+ return -1;
+ } else {
+ return 0;
+ }
+ }
+ static int os_epoll_wait(int epfd, struct epoll_event *events,
+ int maxevents, int timeout)
+ {
+ int count;
+ mm_segment_t oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ count = sys_epoll_wait(epfd, events, maxevents, timeout);
+ set_fs(oldfs);
+ return count;
+ }
+# define os_close sys_close
+# define os_getsockopt pj_sock_getsockopt
+ static int os_read(int fd, void *buf, size_t len)
+ {
+ long rc;
+ mm_segment_t oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ rc = sys_read(fd, buf, len);
+ set_fs(oldfs);
+ if (rc) {
+ errno = -rc;
+ return -1;
+ } else {
+ return 0;
+ }
+ }
+# define socklen_t unsigned
+# define ioctl_val_type unsigned long
+ int ioctl(int fd, int opt, ioctl_val_type value);
+ static int os_ioctl(int fd, int opt, ioctl_val_type value)
+ {
+ int rc;
+ mm_segment_t oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ rc = ioctl(fd, opt, value);
+ set_fs(oldfs);
+ if (rc < 0) {
+ errno = -rc;
+ return rc;
+ } else
+ return rc;
+ }
+# define getsockopt_val_ptr char*
+
+# define epoll_data data
+# define epoll_data_type __u32
+#endif
+
+#define THIS_FILE "ioq_epoll"
+
+#define PJ_IOQUEUE_IS_READ_OP(op) ((op & PJ_IOQUEUE_OP_READ) || \
+ (op & PJ_IOQUEUE_OP_RECV) || \
+ (op & PJ_IOQUEUE_OP_RECV_FROM))
+#define PJ_IOQUEUE_IS_WRITE_OP(op) ((op & PJ_IOQUEUE_OP_WRITE) || \
+ (op & PJ_IOQUEUE_OP_SEND) || \
+ (op & PJ_IOQUEUE_OP_SEND_TO))
+
+
+#if PJ_HAS_TCP
+# define PJ_IOQUEUE_IS_ACCEPT_OP(op) (op & PJ_IOQUEUE_OP_ACCEPT)
+# define PJ_IOQUEUE_IS_CONNECT_OP(op) (op & PJ_IOQUEUE_OP_CONNECT)
+#else
+# define PJ_IOQUEUE_IS_ACCEPT_OP(op) 0
+# define PJ_IOQUEUE_IS_CONNECT_OP(op) 0
+#endif
+
+
+//#define TRACE_(expr) PJ_LOG(3,expr)
+#define TRACE_(expr)
+
+
+/*
+ * This describes each key.
+ */
+struct pj_ioqueue_key_t
+{
+ PJ_DECL_LIST_MEMBER(struct pj_ioqueue_key_t)
+ pj_sock_t fd;
+ pj_ioqueue_operation_e op;
+ void *user_data;
+ pj_ioqueue_callback cb;
+
+ void *rd_buf;
+ unsigned rd_flags;
+ pj_size_t rd_buflen;
+ void *wr_buf;
+ pj_size_t wr_buflen;
+
+ pj_sockaddr_t *rmt_addr;
+ int *rmt_addrlen;
+
+ pj_sockaddr_t *local_addr;
+ int *local_addrlen;
+
+ pj_sock_t *accept_fd;
+};
+
+/*
+ * This describes the I/O queue.
+ */
+struct pj_ioqueue_t
+{
+ pj_lock_t *lock;
+ pj_bool_t auto_delete_lock;
+ unsigned max, count;
+ pj_ioqueue_key_t hlist;
+ int epfd;
+};
+
+/*
+ * pj_ioqueue_create()
+ *
+ * Create select ioqueue.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_create( pj_pool_t *pool,
+ pj_size_t max_fd,
+ int max_threads,
+ pj_ioqueue_t **p_ioqueue)
+{
+ pj_ioqueue_t *ioque;
+ pj_status_t rc;
+
+ PJ_UNUSED_ARG(max_threads);
+
+ if (max_fd > PJ_IOQUEUE_MAX_HANDLES) {
+ pj_assert(!"max_fd too large");
+ return PJ_EINVAL;
+ }
+
+ ioque = pj_pool_alloc(pool, sizeof(pj_ioqueue_t));
+ ioque->max = max_fd;
+ ioque->count = 0;
+ pj_list_init(&ioque->hlist);
+
+ rc = pj_lock_create_recursive_mutex(pool, "ioq%p", &ioque->lock);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ ioque->auto_delete_lock = PJ_TRUE;
+ ioque->epfd = os_epoll_create(max_fd);
+ if (ioque->epfd < 0) {
+ return PJ_RETURN_OS_ERROR(pj_get_native_os_error());
+ }
+
+ PJ_LOG(4, ("pjlib", "select() I/O Queue created (%p)", ioque));
+
+ *p_ioqueue = ioque;
+ return PJ_SUCCESS;
+}
+
+/*
+ * pj_ioqueue_destroy()
+ *
+ * Destroy ioqueue.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_destroy(pj_ioqueue_t *ioque)
+{
+ PJ_ASSERT_RETURN(ioque, PJ_EINVAL);
+ PJ_ASSERT_RETURN(ioque->epfd > 0, PJ_EINVALIDOP);
+
+ pj_lock_acquire(ioque->lock);
+ os_close(ioque->epfd);
+ ioque->epfd = 0;
+ if (ioque->auto_delete_lock)
+ pj_lock_destroy(ioque->lock);
+
+ return PJ_SUCCESS;
+}
+
+/*
+ * pj_ioqueue_set_lock()
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_set_lock( pj_ioqueue_t *ioque,
+ pj_lock_t *lock,
+ pj_bool_t auto_delete )
+{
+ PJ_ASSERT_RETURN(ioque && lock, PJ_EINVAL);
+
+ if (ioque->auto_delete_lock) {
+ pj_lock_destroy(ioque->lock);
+ }
+
+ ioque->lock = lock;
+ ioque->auto_delete_lock = auto_delete;
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * pj_ioqueue_register_sock()
+ *
+ * Register a socket to ioqueue.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_register_sock( pj_pool_t *pool,
+ pj_ioqueue_t *ioque,
+ pj_sock_t sock,
+ void *user_data,
+ const pj_ioqueue_callback *cb,
+ pj_ioqueue_key_t **p_key)
+{
+ pj_ioqueue_key_t *key = NULL;
+ pj_uint32_t value;
+ struct epoll_event ev;
+ int status;
+ pj_status_t rc = PJ_SUCCESS;
+
+ PJ_ASSERT_RETURN(pool && ioque && sock != PJ_INVALID_SOCKET &&
+ cb && p_key, PJ_EINVAL);
+
+ pj_lock_acquire(ioque->lock);
+
+ if (ioque->count >= ioque->max) {
+ rc = PJ_ETOOMANY;
+ TRACE_((THIS_FILE, "pj_ioqueue_register_sock error: too many files"));
+ goto on_return;
+ }
+
+ /* Set socket to nonblocking. */
+ value = 1;
+ if ((rc=os_ioctl(sock, FIONBIO, (ioctl_val_type)&value))) {
+ TRACE_((THIS_FILE, "pj_ioqueue_register_sock error: ioctl rc=%d",
+ rc));
+ rc = pj_get_netos_error();
+ goto on_return;
+ }
+
+ /* Create key. */
+ key = (pj_ioqueue_key_t*)pj_pool_zalloc(pool, sizeof(pj_ioqueue_key_t));
+ key->fd = sock;
+ key->user_data = user_data;
+ pj_memcpy(&key->cb, cb, sizeof(pj_ioqueue_callback));
+
+ /* os_epoll_ctl. */
+ ev.events = EPOLLIN | EPOLLOUT | EPOLLERR;
+ ev.epoll_data = (epoll_data_type)key;
+ status = os_epoll_ctl(ioque->epfd, EPOLL_CTL_ADD, sock, &ev);
+ if (status < 0) {
+ rc = pj_get_os_error();
+ TRACE_((THIS_FILE,
+ "pj_ioqueue_register_sock error: os_epoll_ctl rc=%d",
+ status));
+ goto on_return;
+ }
+
+ /* Register */
+ pj_list_insert_before(&ioque->hlist, key);
+ ++ioque->count;
+
+on_return:
+ *p_key = key;
+ pj_lock_release(ioque->lock);
+
+ return rc;
+}
+
+/*
+ * pj_ioqueue_unregister()
+ *
+ * Unregister handle from ioqueue.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_unregister( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key)
+{
+ struct epoll_event ev;
+ int status;
+
+ PJ_ASSERT_RETURN(ioque && key, PJ_EINVAL);
+
+ pj_lock_acquire(ioque->lock);
+
+ pj_assert(ioque->count > 0);
+ --ioque->count;
+ pj_list_erase(key);
+
+ ev.events = 0;
+ ev.epoll_data = (epoll_data_type)key;
+ status = os_epoll_ctl( ioque->epfd, EPOLL_CTL_DEL, key->fd, &ev);
+ if (status != 0) {
+ pj_status_t rc = pj_get_os_error();
+ pj_lock_release(ioque->lock);
+ return rc;
+ }
+
+ pj_lock_release(ioque->lock);
+ return PJ_SUCCESS;
+}
+
+/*
+ * pj_ioqueue_get_user_data()
+ *
+ * Obtain value associated with a key.
+ */
+PJ_DEF(void*) pj_ioqueue_get_user_data( pj_ioqueue_key_t *key )
+{
+ PJ_ASSERT_RETURN(key != NULL, NULL);
+ return key->user_data;
+}
+
+
+/*
+ * pj_ioqueue_poll()
+ *
+ */
+PJ_DEF(int) pj_ioqueue_poll( pj_ioqueue_t *ioque, const pj_time_val *timeout)
+{
+ int i, count, processed;
+ struct epoll_event events[16];
+ int msec;
+
+ PJ_CHECK_STACK();
+
+ msec = timeout ? PJ_TIME_VAL_MSEC(*timeout) : 9000;
+
+ count = os_epoll_wait( ioque->epfd, events, PJ_ARRAY_SIZE(events), msec);
+ if (count <= 0)
+ return count;
+
+ /* Lock ioqueue. */
+ pj_lock_acquire(ioque->lock);
+
+ processed = 0;
+
+ for (i=0; i<count; ++i) {
+ pj_ioqueue_key_t *h = (pj_ioqueue_key_t*)(epoll_data_type)
+ events[i].epoll_data;
+ pj_status_t rc;
+
+ /*
+ * Check for completion of read operations.
+ */
+ if ((events[i].events & EPOLLIN) && (PJ_IOQUEUE_IS_READ_OP(h->op))) {
+ pj_ssize_t bytes_read = h->rd_buflen;
+
+ if ((h->op & PJ_IOQUEUE_OP_RECV_FROM)) {
+ rc = pj_sock_recvfrom( h->fd, h->rd_buf, &bytes_read, 0,
+ h->rmt_addr, h->rmt_addrlen);
+ } else if ((h->op & PJ_IOQUEUE_OP_RECV)) {
+ rc = pj_sock_recv(h->fd, h->rd_buf, &bytes_read, 0);
+ } else {
+ bytes_read = os_read( h->fd, h->rd_buf, bytes_read);
+ rc = (bytes_read >= 0) ? PJ_SUCCESS : pj_get_os_error();
+ }
+
+ if (rc != PJ_SUCCESS) {
+ bytes_read = -rc;
+ }
+
+ h->op &= ~(PJ_IOQUEUE_OP_READ | PJ_IOQUEUE_OP_RECV |
+ PJ_IOQUEUE_OP_RECV_FROM);
+
+ /* Call callback. */
+ (*h->cb.on_read_complete)(h, bytes_read);
+
+ ++processed;
+ }
+ /*
+ * Check for completion of accept() operation.
+ */
+ else if ((events[i].events & EPOLLIN) &&
+ (h->op & PJ_IOQUEUE_OP_ACCEPT))
+ {
+ /* accept() must be the only operation specified on
+ * server socket
+ */
+ pj_assert( h->op == PJ_IOQUEUE_OP_ACCEPT);
+
+ rc = pj_sock_accept( h->fd, h->accept_fd,
+ h->rmt_addr, h->rmt_addrlen);
+ if (rc==PJ_SUCCESS && h->local_addr) {
+ rc = pj_sock_getsockname(*h->accept_fd, h->local_addr,
+ h->local_addrlen);
+ }
+
+ h->op &= ~(PJ_IOQUEUE_OP_ACCEPT);
+
+ /* Call callback. */
+ (*h->cb.on_accept_complete)(h, *h->accept_fd, rc);
+
+ ++processed;
+ }
+
+ /*
+ * Check for completion of write operations.
+ */
+ if ((events[i].events & EPOLLOUT) && PJ_IOQUEUE_IS_WRITE_OP(h->op)) {
+ /* Completion of write(), send(), or sendto() operation. */
+
+ /* Clear operation. */
+ h->op &= ~(PJ_IOQUEUE_OP_WRITE | PJ_IOQUEUE_OP_SEND |
+ PJ_IOQUEUE_OP_SEND_TO);
+
+ /* Call callback. */
+ /* All data must have been sent? */
+ (*h->cb.on_write_complete)(h, h->wr_buflen);
+
+ ++processed;
+ }
+#if PJ_HAS_TCP
+ /*
+ * Check for completion of connect() operation.
+ */
+ else if ((events[i].events & EPOLLOUT) &&
+ (h->op & PJ_IOQUEUE_OP_CONNECT))
+ {
+ /* Completion of connect() operation */
+ pj_ssize_t bytes_transfered;
+
+ /* from connect(2):
+ * On Linux, use getsockopt to read the SO_ERROR option at
+ * level SOL_SOCKET to determine whether connect() completed
+ * successfully (if SO_ERROR is zero).
+ */
+ int value;
+ socklen_t vallen = sizeof(value);
+ int gs_rc = os_getsockopt(h->fd, SOL_SOCKET, SO_ERROR,
+ (getsockopt_val_ptr)&value, &vallen);
+ if (gs_rc != 0) {
+ /* Argh!! What to do now???
+ * Just indicate that the socket is connected. The
+ * application will get error as soon as it tries to use
+ * the socket to send/receive.
+ */
+ bytes_transfered = 0;
+ } else {
+ bytes_transfered = value;
+ }
+
+ /* Clear operation. */
+ h->op &= (~PJ_IOQUEUE_OP_CONNECT);
+
+ /* Call callback. */
+ (*h->cb.on_connect_complete)(h, bytes_transfered);
+
+ ++processed;
+ }
+#endif /* PJ_HAS_TCP */
+
+ /*
+ * Check for error condition.
+ */
+ if (events[i].events & EPOLLERR) {
+ if (h->op & PJ_IOQUEUE_OP_CONNECT) {
+ h->op &= ~PJ_IOQUEUE_OP_CONNECT;
+
+ /* Call callback. */
+ (*h->cb.on_connect_complete)(h, -1);
+
+ ++processed;
+ }
+ }
+ }
+
+ pj_lock_release(ioque->lock);
+
+ return processed;
+}
+
+/*
+ * pj_ioqueue_read()
+ *
+ * Start asynchronous read from the descriptor.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_read( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ void *buffer,
+ pj_size_t buflen)
+{
+ PJ_ASSERT_RETURN(ioque && key && buffer, PJ_EINVAL);
+ PJ_CHECK_STACK();
+
+ /* For consistency with other ioqueue implementation, we would reject
+ * if descriptor has already been submitted for reading before.
+ */
+ PJ_ASSERT_RETURN(((key->op & PJ_IOQUEUE_OP_READ) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_RECV) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_RECV_FROM) == 0),
+ PJ_EBUSY);
+
+ pj_lock_acquire(ioque->lock);
+
+ key->op |= PJ_IOQUEUE_OP_READ;
+ key->rd_flags = 0;
+ key->rd_buf = buffer;
+ key->rd_buflen = buflen;
+
+ pj_lock_release(ioque->lock);
+ return PJ_EPENDING;
+}
+
+
+/*
+ * pj_ioqueue_recv()
+ *
+ * Start asynchronous recv() from the socket.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_recv( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ void *buffer,
+ pj_size_t buflen,
+ unsigned flags )
+{
+ PJ_ASSERT_RETURN(ioque && key && buffer, PJ_EINVAL);
+ PJ_CHECK_STACK();
+
+ /* For consistency with other ioqueue implementation, we would reject
+ * if descriptor has already been submitted for reading before.
+ */
+ PJ_ASSERT_RETURN(((key->op & PJ_IOQUEUE_OP_READ) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_RECV) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_RECV_FROM) == 0),
+ PJ_EBUSY);
+
+ pj_lock_acquire(ioque->lock);
+
+ key->op |= PJ_IOQUEUE_OP_RECV;
+ key->rd_buf = buffer;
+ key->rd_buflen = buflen;
+ key->rd_flags = flags;
+
+ pj_lock_release(ioque->lock);
+ return PJ_EPENDING;
+}
+
+/*
+ * pj_ioqueue_recvfrom()
+ *
+ * Start asynchronous recvfrom() from the socket.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_recvfrom( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ void *buffer,
+ pj_size_t buflen,
+ unsigned flags,
+ pj_sockaddr_t *addr,
+ int *addrlen)
+{
+ PJ_ASSERT_RETURN(ioque && key && buffer, PJ_EINVAL);
+ PJ_CHECK_STACK();
+
+ /* For consistency with other ioqueue implementation, we would reject
+ * if descriptor has already been submitted for reading before.
+ */
+ PJ_ASSERT_RETURN(((key->op & PJ_IOQUEUE_OP_READ) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_RECV) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_RECV_FROM) == 0),
+ PJ_EBUSY);
+
+ pj_lock_acquire(ioque->lock);
+
+ key->op |= PJ_IOQUEUE_OP_RECV_FROM;
+ key->rd_buf = buffer;
+ key->rd_buflen = buflen;
+ key->rd_flags = flags;
+ key->rmt_addr = addr;
+ key->rmt_addrlen = addrlen;
+
+ pj_lock_release(ioque->lock);
+ return PJ_EPENDING;
+}
+
+/*
+ * pj_ioqueue_write()
+ *
+ * Start asynchronous write() to the descriptor.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_write( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ const void *data,
+ pj_size_t datalen)
+{
+ pj_status_t rc;
+ pj_ssize_t sent;
+
+ PJ_ASSERT_RETURN(ioque && key && data, PJ_EINVAL);
+ PJ_CHECK_STACK();
+
+ /* For consistency with other ioqueue implementation, we would reject
+ * if descriptor has already been submitted for writing before.
+ */
+ PJ_ASSERT_RETURN(((key->op & PJ_IOQUEUE_OP_WRITE) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_SEND) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_SEND_TO) == 0),
+ PJ_EBUSY);
+
+ sent = datalen;
+ /* sent would be -1 after pj_sock_send() if it returns error. */
+ rc = pj_sock_send(key->fd, data, &sent, 0);
+ if (rc != PJ_SUCCESS && rc != PJ_STATUS_FROM_OS(OSERR_EWOULDBLOCK)) {
+ return rc;
+ }
+
+ pj_lock_acquire(ioque->lock);
+
+ key->op |= PJ_IOQUEUE_OP_WRITE;
+ key->wr_buf = NULL;
+ key->wr_buflen = datalen;
+
+ pj_lock_release(ioque->lock);
+
+ return PJ_EPENDING;
+}
+
+/*
+ * pj_ioqueue_send()
+ *
+ * Start asynchronous send() to the descriptor.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_send( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ const void *data,
+ pj_size_t datalen,
+ unsigned flags)
+{
+ pj_status_t rc;
+ pj_ssize_t sent;
+
+ PJ_ASSERT_RETURN(ioque && key && data, PJ_EINVAL);
+ PJ_CHECK_STACK();
+
+ /* For consistency with other ioqueue implementation, we would reject
+ * if descriptor has already been submitted for writing before.
+ */
+ PJ_ASSERT_RETURN(((key->op & PJ_IOQUEUE_OP_WRITE) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_SEND) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_SEND_TO) == 0),
+ PJ_EBUSY);
+
+ sent = datalen;
+ /* sent would be -1 after pj_sock_send() if it returns error. */
+ rc = pj_sock_send(key->fd, data, &sent, flags);
+ if (rc != PJ_SUCCESS && rc != PJ_STATUS_FROM_OS(OSERR_EWOULDBLOCK)) {
+ return rc;
+ }
+
+ pj_lock_acquire(ioque->lock);
+
+ key->op |= PJ_IOQUEUE_OP_SEND;
+ key->wr_buf = NULL;
+ key->wr_buflen = datalen;
+
+ pj_lock_release(ioque->lock);
+
+ return PJ_EPENDING;
+}
+
+
+/*
+ * pj_ioqueue_sendto()
+ *
+ * Start asynchronous write() to the descriptor.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_sendto( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ const void *data,
+ pj_size_t datalen,
+ unsigned flags,
+ const pj_sockaddr_t *addr,
+ int addrlen)
+{
+ pj_status_t rc;
+ pj_ssize_t sent;
+
+ PJ_ASSERT_RETURN(ioque && key && data, PJ_EINVAL);
+ PJ_CHECK_STACK();
+
+ /* For consistency with other ioqueue implementation, we would reject
+ * if descriptor has already been submitted for writing before.
+ */
+ PJ_ASSERT_RETURN(((key->op & PJ_IOQUEUE_OP_WRITE) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_SEND) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_SEND_TO) == 0),
+ PJ_EBUSY);
+
+ sent = datalen;
+ /* sent would be -1 after pj_sock_sendto() if it returns error. */
+ rc = pj_sock_sendto(key->fd, data, &sent, flags, addr, addrlen);
+ if (rc != PJ_SUCCESS && rc != PJ_STATUS_FROM_OS(OSERR_EWOULDBLOCK)) {
+ return rc;
+ }
+
+ pj_lock_acquire(ioque->lock);
+
+ key->op |= PJ_IOQUEUE_OP_SEND_TO;
+ key->wr_buf = NULL;
+ key->wr_buflen = datalen;
+
+ pj_lock_release(ioque->lock);
+ return PJ_EPENDING;
+}
+
+#if PJ_HAS_TCP
+/*
+ * Initiate overlapped accept() operation.
+ */
+PJ_DEF(int) pj_ioqueue_accept( pj_ioqueue_t *ioqueue,
+ pj_ioqueue_key_t *key,
+ pj_sock_t *new_sock,
+ pj_sockaddr_t *local,
+ pj_sockaddr_t *remote,
+ int *addrlen)
+{
+ /* check parameters. All must be specified! */
+ pj_assert(ioqueue && key && new_sock);
+
+ /* Server socket must have no other operation! */
+ pj_assert(key->op == 0);
+
+ pj_lock_acquire(ioqueue->lock);
+
+ key->op = PJ_IOQUEUE_OP_ACCEPT;
+ key->accept_fd = new_sock;
+ key->rmt_addr = remote;
+ key->rmt_addrlen = addrlen;
+ key->local_addr = local;
+ key->local_addrlen = addrlen; /* use same addr. as rmt_addrlen */
+
+ pj_lock_release(ioqueue->lock);
+ return PJ_EPENDING;
+}
+
+/*
+ * Initiate overlapped connect() operation (well, it's non-blocking actually,
+ * since there's no overlapped version of connect()).
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_connect( pj_ioqueue_t *ioqueue,
+ pj_ioqueue_key_t *key,
+ const pj_sockaddr_t *addr,
+ int addrlen )
+{
+ pj_status_t rc;
+
+ /* check parameters. All must be specified! */
+ PJ_ASSERT_RETURN(ioqueue && key && addr && addrlen, PJ_EINVAL);
+
+ /* Connecting socket must have no other operation! */
+ PJ_ASSERT_RETURN(key->op == 0, PJ_EBUSY);
+
+ rc = pj_sock_connect(key->fd, addr, addrlen);
+ if (rc == PJ_SUCCESS) {
+ /* Connected! */
+ return PJ_SUCCESS;
+ } else {
+ if (rc == PJ_STATUS_FROM_OS(OSERR_EINPROGRESS) ||
+ rc == PJ_STATUS_FROM_OS(OSERR_EWOULDBLOCK))
+ {
+ /* Pending! */
+ pj_lock_acquire(ioqueue->lock);
+ key->op = PJ_IOQUEUE_OP_CONNECT;
+ pj_lock_release(ioqueue->lock);
+ return PJ_EPENDING;
+ } else {
+ /* Error! */
+ return rc;
+ }
+ }
+}
+#endif /* PJ_HAS_TCP */
+
diff --git a/pjlib/src/pj/ioqueue_linux_kernel.c b/pjlib/src/pj/ioqueue_linux_kernel.c
new file mode 100644
index 00000000..b8338118
--- /dev/null
+++ b/pjlib/src/pj/ioqueue_linux_kernel.c
@@ -0,0 +1,150 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/ioqueue_linux_kernel.c 1 10/05/05 4:42p Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/ioqueue_linux_kernel.c $
+ *
+ * 1 10/05/05 4:42p Bennylp
+ * Created.
+ *
+ */
+#include <pj/ioqueue.h>
+#include <pj/os.h>
+#include <pj/log.h>
+#include <pj/list.h>
+#include <pj/pool.h>
+#include <pj/string.h>
+#include <pj/assert.h>
+#include <pj/sock.h>
+
+#define THIS_FILE "ioqueue"
+
+#define PJ_IOQUEUE_IS_READ_OP(op) \
+ ((op & PJ_IOQUEUE_OP_READ) || (op & PJ_IOQUEUE_OP_RECV_FROM))
+#define PJ_IOQUEUE_IS_WRITE_OP(op) \
+ ((op & PJ_IOQUEUE_OP_WRITE) || (op & PJ_IOQUEUE_OP_SEND_TO))
+
+
+#if PJ_HAS_TCP
+# define PJ_IOQUEUE_IS_ACCEPT_OP(op) (op & PJ_IOQUEUE_OP_ACCEPT)
+# define PJ_IOQUEUE_IS_CONNECT_OP(op) (op & PJ_IOQUEUE_OP_CONNECT)
+#else
+# define PJ_IOQUEUE_IS_ACCEPT_OP(op) 0
+# define PJ_IOQUEUE_IS_CONNECT_OP(op) 0
+#endif
+
+#if defined(PJ_DEBUG) && PJ_DEBUG != 0
+# define VALIDATE_FD_SET 1
+#else
+# define VALIDATE_FD_SET 0
+#endif
+
+struct pj_ioqueue_key_t
+{
+ PJ_DECL_LIST_MEMBER(struct pj_ioqueue_key_t)
+ pj_sock_t fd;
+ pj_ioqueue_operation_e op;
+ void *user_data;
+ pj_ioqueue_callback cb;
+};
+
+struct pj_ioqueue_t
+{
+};
+
+PJ_DEF(pj_ioqueue_t*) pj_ioqueue_create(pj_pool_t *pool, pj_size_t max_fd)
+{
+ return NULL;
+}
+
+PJ_DEF(pj_status_t) pj_ioqueue_destroy(pj_ioqueue_t *ioque)
+{
+ return 0;
+}
+
+PJ_DEF(pj_ioqueue_key_t*) pj_ioqueue_register( pj_pool_t *pool,
+ pj_ioqueue_t *ioque,
+ pj_oshandle_t sock,
+ void *user_data,
+ const pj_ioqueue_callback *cb)
+{
+ return NULL;
+}
+
+PJ_DEF(pj_status_t) pj_ioqueue_unregister( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key)
+{
+ return -1;
+}
+
+PJ_DEF(void*) pj_ioqueue_get_user_data( pj_ioqueue_key_t *key )
+{
+ return NULL;
+}
+
+
+PJ_DEF(int) pj_ioqueue_poll( pj_ioqueue_t *ioque, const pj_time_val *timeout)
+{
+ return -1;
+}
+
+PJ_DEF(int) pj_ioqueue_read( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ void *buffer,
+ pj_size_t buflen)
+{
+ return -1;
+}
+
+PJ_DEF(int) pj_ioqueue_recvfrom( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ void *buffer,
+ pj_size_t buflen,
+ pj_sockaddr_t *addr,
+ int *addrlen)
+{
+ return -1;
+}
+
+PJ_DEF(int) pj_ioqueue_write( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ const void *data,
+ pj_size_t datalen)
+{
+ return -1;
+}
+
+PJ_DEF(int) pj_ioqueue_sendto( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ const void *data,
+ pj_size_t datalen,
+ const pj_sockaddr_t *addr,
+ int addrlen)
+{
+ return -1;
+}
+
+#if PJ_HAS_TCP
+/*
+ * Initiate overlapped accept() operation.
+ */
+PJ_DEF(int) pj_ioqueue_accept( pj_ioqueue_t *ioqueue,
+ pj_ioqueue_key_t *key,
+ pj_sock_t *new_sock,
+ pj_sockaddr_t *local,
+ pj_sockaddr_t *remote,
+ int *addrlen)
+{
+ return -1;
+}
+
+/*
+ * Initiate overlapped connect() operation (well, it's non-blocking actually,
+ * since there's no overlapped version of connect()).
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_connect( pj_ioqueue_t *ioqueue,
+ pj_ioqueue_key_t *key,
+ const pj_sockaddr_t *addr,
+ int addrlen )
+{
+ return -1;
+}
+#endif /* PJ_HAS_TCP */
+
diff --git a/pjlib/src/pj/ioqueue_select.c b/pjlib/src/pj/ioqueue_select.c
new file mode 100644
index 00000000..615c758e
--- /dev/null
+++ b/pjlib/src/pj/ioqueue_select.c
@@ -0,0 +1,947 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/ioqueue_select.c 15 10/29/05 10:27p Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/ioqueue_select.c $
+ *
+ * 15 10/29/05 10:27p Bennylp
+ * Fixed misc warnings.
+ *
+ * 14 10/29/05 11:31a Bennylp
+ * Changed accept and lock.
+ *
+ * 13 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 12 9/21/05 1:39p Bennylp
+ * Periodic checkin for backup.
+ *
+ * 11 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+
+/*
+ * sock_select.c
+ *
+ * This is the implementation of IOQueue using pj_sock_select().
+ * It runs anywhere where pj_sock_select() is available (currently
+ * Win32, Linux, Linux kernel, etc.).
+ */
+
+#include <pj/ioqueue.h>
+#include <pj/os.h>
+#include <pj/lock.h>
+#include <pj/log.h>
+#include <pj/list.h>
+#include <pj/pool.h>
+#include <pj/string.h>
+#include <pj/assert.h>
+#include <pj/sock.h>
+#include <pj/compat/socket.h>
+#include <pj/sock_select.h>
+#include <pj/errno.h>
+
+/*
+ * ISSUES with ioqueue_select()
+ *
+ * EAGAIN/EWOULDBLOCK error in recv():
+ * - when multiple threads are working with the ioqueue, application
+ * may receive EAGAIN or EWOULDBLOCK in the receive callback.
+ * This error happens because more than one thread is watching for
+ * the same descriptor set, so when all of them call recv() or recvfrom()
+ * simultaneously, only one will succeed and the rest will get the error.
+ *
+ */
+#define THIS_FILE "ioq_select"
+
+#define PJ_IOQUEUE_IS_READ_OP(op) ((op & PJ_IOQUEUE_OP_READ) || \
+ (op & PJ_IOQUEUE_OP_RECV) || \
+ (op & PJ_IOQUEUE_OP_RECV_FROM))
+#define PJ_IOQUEUE_IS_WRITE_OP(op) ((op & PJ_IOQUEUE_OP_WRITE) || \
+ (op & PJ_IOQUEUE_OP_SEND) || \
+ (op & PJ_IOQUEUE_OP_SEND_TO))
+
+
+#if PJ_HAS_TCP
+# define PJ_IOQUEUE_IS_ACCEPT_OP(op) (op & PJ_IOQUEUE_OP_ACCEPT)
+# define PJ_IOQUEUE_IS_CONNECT_OP(op) (op & PJ_IOQUEUE_OP_CONNECT)
+#else
+# define PJ_IOQUEUE_IS_ACCEPT_OP(op) 0
+# define PJ_IOQUEUE_IS_CONNECT_OP(op) 0
+#endif
+
+/*
+ * During debugging build, VALIDATE_FD_SET is set.
+ * This will check the validity of the fd_sets.
+ */
+#if defined(PJ_DEBUG) && PJ_DEBUG != 0
+# define VALIDATE_FD_SET 1
+#else
+# define VALIDATE_FD_SET 0
+#endif
+
+/*
+ * This describes each key.
+ */
+struct pj_ioqueue_key_t
+{
+ PJ_DECL_LIST_MEMBER(struct pj_ioqueue_key_t)
+ pj_sock_t fd;
+ pj_ioqueue_operation_e op;
+ void *user_data;
+ pj_ioqueue_callback cb;
+
+ void *rd_buf;
+ unsigned rd_flags;
+ pj_size_t rd_buflen;
+ void *wr_buf;
+ pj_size_t wr_buflen;
+
+ pj_sockaddr_t *rmt_addr;
+ int *rmt_addrlen;
+
+ pj_sockaddr_t *local_addr;
+ int *local_addrlen;
+
+ pj_sock_t *accept_fd;
+};
+
+/*
+ * This describes the I/O queue itself.
+ */
+struct pj_ioqueue_t
+{
+ pj_lock_t *lock;
+ pj_bool_t auto_delete_lock;
+ unsigned max, count;
+ pj_ioqueue_key_t hlist;
+ pj_fd_set_t rfdset;
+ pj_fd_set_t wfdset;
+#if PJ_HAS_TCP
+ pj_fd_set_t xfdset;
+#endif
+};
+
+/*
+ * pj_ioqueue_create()
+ *
+ * Create select ioqueue.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_create( pj_pool_t *pool,
+ pj_size_t max_fd,
+ int max_threads,
+ pj_ioqueue_t **p_ioqueue)
+{
+ pj_ioqueue_t *ioque;
+ pj_status_t rc;
+
+ PJ_UNUSED_ARG(max_threads);
+
+ if (max_fd > PJ_IOQUEUE_MAX_HANDLES) {
+ pj_assert(!"max_fd too large");
+ return PJ_EINVAL;
+ }
+
+ ioque = pj_pool_alloc(pool, sizeof(pj_ioqueue_t));
+ ioque->max = max_fd;
+ ioque->count = 0;
+ PJ_FD_ZERO(&ioque->rfdset);
+ PJ_FD_ZERO(&ioque->wfdset);
+#if PJ_HAS_TCP
+ PJ_FD_ZERO(&ioque->xfdset);
+#endif
+ pj_list_init(&ioque->hlist);
+
+ rc = pj_lock_create_recursive_mutex(pool, "ioq%p", &ioque->lock);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ ioque->auto_delete_lock = PJ_TRUE;
+
+ PJ_LOG(4, ("pjlib", "select() I/O Queue created (%p)", ioque));
+
+ *p_ioqueue = ioque;
+ return PJ_SUCCESS;
+}
+
+/*
+ * pj_ioqueue_destroy()
+ *
+ * Destroy ioqueue.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_destroy(pj_ioqueue_t *ioque)
+{
+ pj_status_t rc = PJ_SUCCESS;
+
+ PJ_ASSERT_RETURN(ioque, PJ_EINVAL);
+
+ if (ioque->auto_delete_lock)
+ rc = pj_lock_destroy(ioque->lock);
+
+ return rc;
+}
+
+
+/*
+ * pj_ioqueue_set_lock()
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_set_lock( pj_ioqueue_t *ioque,
+ pj_lock_t *lock,
+ pj_bool_t auto_delete )
+{
+ PJ_ASSERT_RETURN(ioque && lock, PJ_EINVAL);
+
+ if (ioque->auto_delete_lock) {
+ pj_lock_destroy(ioque->lock);
+ }
+
+ ioque->lock = lock;
+ ioque->auto_delete_lock = auto_delete;
+
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * pj_ioqueue_register_sock()
+ *
+ * Register a handle to ioqueue.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_register_sock( pj_pool_t *pool,
+ pj_ioqueue_t *ioque,
+ pj_sock_t sock,
+ void *user_data,
+ const pj_ioqueue_callback *cb,
+ pj_ioqueue_key_t **p_key)
+{
+ pj_ioqueue_key_t *key = NULL;
+ pj_uint32_t value;
+ pj_status_t rc = PJ_SUCCESS;
+
+ PJ_ASSERT_RETURN(pool && ioque && sock != PJ_INVALID_SOCKET &&
+ cb && p_key, PJ_EINVAL);
+
+ pj_lock_acquire(ioque->lock);
+
+ if (ioque->count >= ioque->max) {
+ rc = PJ_ETOOMANY;
+ goto on_return;
+ }
+
+ /* Set socket to nonblocking. */
+ value = 1;
+#ifdef PJ_WIN32
+ if (ioctlsocket(sock, FIONBIO, (unsigned long*)&value)) {
+#else
+ if (ioctl(sock, FIONBIO, &value)) {
+#endif
+ rc = pj_get_netos_error();
+ goto on_return;
+ }
+
+ /* Create key. */
+ key = (pj_ioqueue_key_t*)pj_pool_zalloc(pool, sizeof(pj_ioqueue_key_t));
+ key->fd = sock;
+ key->user_data = user_data;
+
+ /* Save callback. */
+ pj_memcpy(&key->cb, cb, sizeof(pj_ioqueue_callback));
+
+ /* Register */
+ pj_list_insert_before(&ioque->hlist, key);
+ ++ioque->count;
+
+on_return:
+ *p_key = key;
+ pj_lock_release(ioque->lock);
+
+ return rc;
+}
+
+/*
+ * pj_ioqueue_unregister()
+ *
+ * Unregister handle from ioqueue.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_unregister( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key)
+{
+ PJ_ASSERT_RETURN(ioque && key, PJ_EINVAL);
+
+ pj_lock_acquire(ioque->lock);
+
+ pj_assert(ioque->count > 0);
+ --ioque->count;
+ pj_list_erase(key);
+ PJ_FD_CLR(key->fd, &ioque->rfdset);
+ PJ_FD_CLR(key->fd, &ioque->wfdset);
+#if PJ_HAS_TCP
+ PJ_FD_CLR(key->fd, &ioque->xfdset);
+#endif
+
+ pj_lock_release(ioque->lock);
+ return PJ_SUCCESS;
+}
+
+/*
+ * pj_ioqueue_get_user_data()
+ *
+ * Obtain value associated with a key.
+ */
+PJ_DEF(void*) pj_ioqueue_get_user_data( pj_ioqueue_key_t *key )
+{
+ PJ_ASSERT_RETURN(key != NULL, NULL);
+ return key->user_data;
+}
+
+
+/* This supposed to check whether the fd_set values are consistent
+ * with the operation currently set in each key.
+ */
+#if VALIDATE_FD_SET
+static void validate_sets(const pj_ioqueue_t *ioque,
+ const pj_fd_set_t *rfdset,
+ const pj_fd_set_t *wfdset,
+ const pj_fd_set_t *xfdset)
+{
+ pj_ioqueue_key_t *key;
+
+ key = ioque->hlist.next;
+ while (key != &ioque->hlist) {
+ if ((key->op & PJ_IOQUEUE_OP_READ)
+ || (key->op & PJ_IOQUEUE_OP_RECV)
+ || (key->op & PJ_IOQUEUE_OP_RECV_FROM)
+#if defined(PJ_HAS_TCP) && PJ_HAS_TCP != 0
+ || (key->op & PJ_IOQUEUE_OP_ACCEPT)
+#endif
+ )
+ {
+ pj_assert(PJ_FD_ISSET(key->fd, rfdset));
+ }
+ else {
+ pj_assert(PJ_FD_ISSET(key->fd, rfdset) == 0);
+ }
+ if ((key->op & PJ_IOQUEUE_OP_WRITE)
+ || (key->op & PJ_IOQUEUE_OP_SEND)
+ || (key->op & PJ_IOQUEUE_OP_SEND_TO)
+#if defined(PJ_HAS_TCP) && PJ_HAS_TCP != 0
+ || (key->op & PJ_IOQUEUE_OP_CONNECT)
+#endif
+ )
+ {
+ pj_assert(PJ_FD_ISSET(key->fd, wfdset));
+ }
+ else {
+ pj_assert(PJ_FD_ISSET(key->fd, wfdset) == 0);
+ }
+#if defined(PJ_HAS_TCP) && PJ_HAS_TCP != 0
+ if (key->op & PJ_IOQUEUE_OP_CONNECT)
+ {
+ pj_assert(PJ_FD_ISSET(key->fd, xfdset));
+ }
+ else {
+ pj_assert(PJ_FD_ISSET(key->fd, xfdset) == 0);
+ }
+#endif /* PJ_HAS_TCP */
+
+ key = key->next;
+ }
+}
+#endif /* VALIDATE_FD_SET */
+
+
+/*
+ * pj_ioqueue_poll()
+ *
+ * Few things worth written:
+ *
+ * - we used to do only one callback called per poll, but it didn't go
+ * very well. The reason is because on some situation, the write
+ * callback gets called all the time, thus doesn't give the read
+ * callback to get called. This happens, for example, when user
+ * submit write operation inside the write callback.
+ * As the result, we changed the behaviour so that now multiple
+ * callbacks are called in a single poll. It should be fast too,
+ * just that we need to be carefull with the ioqueue data structs.
+ *
+ * - to guarantee preemptiveness etc, the poll function must strictly
+ * work on fd_set copy of the ioqueue (not the original one).
+ */
+PJ_DEF(int) pj_ioqueue_poll( pj_ioqueue_t *ioque, const pj_time_val *timeout)
+{
+ pj_fd_set_t rfdset, wfdset, xfdset;
+ int count;
+ pj_ioqueue_key_t *h;
+
+ /* Lock ioqueue before making fd_set copies */
+ pj_lock_acquire(ioque->lock);
+
+ if (PJ_FD_COUNT(&ioque->rfdset)==0 &&
+ PJ_FD_COUNT(&ioque->wfdset)==0 &&
+ PJ_FD_COUNT(&ioque->xfdset)==0)
+ {
+ pj_lock_release(ioque->lock);
+ if (timeout)
+ pj_thread_sleep(PJ_TIME_VAL_MSEC(*timeout));
+ return 0;
+ }
+
+ /* Copy ioqueue's pj_fd_set_t to local variables. */
+ pj_memcpy(&rfdset, &ioque->rfdset, sizeof(pj_fd_set_t));
+ pj_memcpy(&wfdset, &ioque->wfdset, sizeof(pj_fd_set_t));
+#if PJ_HAS_TCP
+ pj_memcpy(&xfdset, &ioque->xfdset, sizeof(pj_fd_set_t));
+#else
+ PJ_FD_ZERO(&xfdset);
+#endif
+
+#if VALIDATE_FD_SET
+ validate_sets(ioque, &rfdset, &wfdset, &xfdset);
+#endif
+
+ /* Unlock ioqueue before select(). */
+ pj_lock_release(ioque->lock);
+
+ count = pj_sock_select(FD_SETSIZE, &rfdset, &wfdset, &xfdset, timeout);
+
+ if (count <= 0)
+ return count;
+
+ /* Lock ioqueue again before scanning for signalled sockets. */
+ pj_lock_acquire(ioque->lock);
+
+#if PJ_HAS_TCP
+ /* Scan for exception socket */
+ h = ioque->hlist.next;
+do_except_scan:
+ for ( ; h!=&ioque->hlist; h = h->next) {
+ if ((h->op & PJ_IOQUEUE_OP_CONNECT) && PJ_FD_ISSET(h->fd, &xfdset))
+ break;
+ }
+ if (h != &ioque->hlist) {
+ /* 'connect()' should be the only operation. */
+ pj_assert((h->op == PJ_IOQUEUE_OP_CONNECT));
+
+ /* Clear operation. */
+ h->op &= ~(PJ_IOQUEUE_OP_CONNECT);
+ PJ_FD_CLR(h->fd, &ioque->wfdset);
+ PJ_FD_CLR(h->fd, &ioque->xfdset);
+ PJ_FD_CLR(h->fd, &wfdset);
+ PJ_FD_CLR(h->fd, &xfdset);
+
+ /* Call callback. */
+ if (h->cb.on_connect_complete)
+ (*h->cb.on_connect_complete)(h, -1);
+
+ /* Re-scan exception list. */
+ goto do_except_scan;
+ }
+#endif /* PJ_HAS_TCP */
+
+ /* Scan for readable socket. */
+ h = ioque->hlist.next;
+do_readable_scan:
+ for ( ; h!=&ioque->hlist; h = h->next) {
+ if ((PJ_IOQUEUE_IS_READ_OP(h->op) || PJ_IOQUEUE_IS_ACCEPT_OP(h->op)) &&
+ PJ_FD_ISSET(h->fd, &rfdset))
+ {
+ break;
+ }
+ }
+ if (h != &ioque->hlist) {
+ pj_status_t rc;
+
+ pj_assert(PJ_IOQUEUE_IS_READ_OP(h->op) ||
+ PJ_IOQUEUE_IS_ACCEPT_OP(h->op));
+
+# if PJ_HAS_TCP
+ if ((h->op & PJ_IOQUEUE_OP_ACCEPT)) {
+ /* accept() must be the only operation specified on server socket */
+ pj_assert(h->op == PJ_IOQUEUE_OP_ACCEPT);
+
+ rc=pj_sock_accept(h->fd, h->accept_fd, h->rmt_addr, h->rmt_addrlen);
+ if (rc==0 && h->local_addr) {
+ rc = pj_sock_getsockname(*h->accept_fd, h->local_addr,
+ h->local_addrlen);
+ }
+
+ h->op &= ~(PJ_IOQUEUE_OP_ACCEPT);
+ PJ_FD_CLR(h->fd, &ioque->rfdset);
+
+ /* Call callback. */
+ if (h->cb.on_accept_complete)
+ (*h->cb.on_accept_complete)(h, *h->accept_fd, rc);
+
+ /* Re-scan readable sockets. */
+ goto do_readable_scan;
+ }
+ else {
+# endif
+ pj_ssize_t bytes_read = h->rd_buflen;
+
+ if ((h->op & PJ_IOQUEUE_OP_RECV_FROM)) {
+ rc = pj_sock_recvfrom(h->fd, h->rd_buf, &bytes_read, 0,
+ h->rmt_addr, h->rmt_addrlen);
+ } else if ((h->op & PJ_IOQUEUE_OP_RECV)) {
+ rc = pj_sock_recv(h->fd, h->rd_buf, &bytes_read, 0);
+ } else {
+ /*
+ * User has specified pj_ioqueue_read().
+ * On Win32, we should do ReadFile(). But because we got
+ * here because of select() anyway, user must have put a
+ * socket descriptor on h->fd, which in this case we can
+ * just call pj_sock_recv() instead of ReadFile().
+ * On Unix, user may put a file in h->fd, so we'll have
+ * to call read() here.
+ * This may not compile on systems which doesn't have
+ * read(). That's why we only specify PJ_LINUX here so
+ * that error is easier to catch.
+ */
+# if defined(PJ_WIN32) && PJ_WIN32 != 0
+ rc = pj_sock_recv(h->fd, h->rd_buf, &bytes_read, 0);
+# elif defined(PJ_LINUX) && PJ_LINUX != 0
+ bytes_read = read(h->fd, h->rd_buf, bytes_read);
+ rc = (bytes_read >= 0) ? PJ_SUCCESS : pj_get_os_error();
+# elif defined(PJ_LINUX_KERNEL) && PJ_LINUX_KERNEL != 0
+ bytes_read = sys_read(h->fd, h->rd_buf, bytes_read);
+ rc = (bytes_read >= 0) ? PJ_SUCCESS : -bytes_read;
+# else
+# error "Check this man!"
+# endif
+ }
+
+ if (rc != PJ_SUCCESS) {
+# if defined(PJ_WIN32) && PJ_WIN32 != 0
+ /* On Win32, for UDP, WSAECONNRESET on the receive side
+ * indicates that previous sending has triggered ICMP Port
+ * Unreachable message.
+ * But we wouldn't know at this point which one of previous
+ * key that has triggered the error, since UDP socket can
+ * be shared!
+ * So we'll just ignore it!
+ */
+
+ if (rc == PJ_STATUS_FROM_OS(WSAECONNRESET)) {
+ PJ_LOG(4,(THIS_FILE,
+ "Ignored ICMP port unreach. on key=%p", h));
+ }
+# endif
+
+ /* In any case we would report this to caller. */
+ bytes_read = -rc;
+ }
+
+ h->op &= ~(PJ_IOQUEUE_OP_READ | PJ_IOQUEUE_OP_RECV |
+ PJ_IOQUEUE_OP_RECV_FROM);
+ PJ_FD_CLR(h->fd, &ioque->rfdset);
+ PJ_FD_CLR(h->fd, &rfdset);
+
+ /* Call callback. */
+ if (h->cb.on_read_complete)
+ (*h->cb.on_read_complete)(h, bytes_read);
+
+ /* Re-scan readable sockets. */
+ goto do_readable_scan;
+
+ }
+ }
+
+ /* Scan for writable socket */
+ h = ioque->hlist.next;
+do_writable_scan:
+ for ( ; h!=&ioque->hlist; h = h->next) {
+ if ((PJ_IOQUEUE_IS_WRITE_OP(h->op) || PJ_IOQUEUE_IS_CONNECT_OP(h->op))
+ && PJ_FD_ISSET(h->fd, &wfdset))
+ {
+ break;
+ }
+ }
+ if (h != &ioque->hlist) {
+ pj_assert(PJ_IOQUEUE_IS_WRITE_OP(h->op) ||
+ PJ_IOQUEUE_IS_CONNECT_OP(h->op));
+
+#if PJ_HAS_TCP
+ if ((h->op & PJ_IOQUEUE_OP_CONNECT)) {
+ /* Completion of connect() operation */
+ pj_ssize_t bytes_transfered;
+
+#if defined(PJ_LINUX) || defined(PJ_LINUX_KERNEL)
+ /* from connect(2):
+ * On Linux, use getsockopt to read the SO_ERROR option at
+ * level SOL_SOCKET to determine whether connect() completed
+ * successfully (if SO_ERROR is zero).
+ */
+ int value;
+ socklen_t vallen = sizeof(value);
+ int gs_rc = getsockopt(h->fd, SOL_SOCKET, SO_ERROR,
+ &value, &vallen);
+ if (gs_rc != 0) {
+ /* Argh!! What to do now???
+ * Just indicate that the socket is connected. The
+ * application will get error as soon as it tries to use
+ * the socket to send/receive.
+ */
+ bytes_transfered = 0;
+ } else {
+ bytes_transfered = value;
+ }
+#elif defined(PJ_WIN32)
+ bytes_transfered = 0; /* success */
+#else
+# error "Got to check this one!"
+#endif
+
+ /* Clear operation. */
+ h->op &= (~PJ_IOQUEUE_OP_CONNECT);
+ PJ_FD_CLR(h->fd, &ioque->wfdset);
+ PJ_FD_CLR(h->fd, &ioque->xfdset);
+
+ /* Call callback. */
+ if (h->cb.on_connect_complete)
+ (*h->cb.on_connect_complete)(h, bytes_transfered);
+
+ /* Re-scan writable sockets. */
+ goto do_writable_scan;
+
+ } else
+#endif /* PJ_HAS_TCP */
+ {
+ /* Completion of write(), send(), or sendto() operation. */
+
+ /* Clear operation. */
+ h->op &= ~(PJ_IOQUEUE_OP_WRITE | PJ_IOQUEUE_OP_SEND |
+ PJ_IOQUEUE_OP_SEND_TO);
+ PJ_FD_CLR(h->fd, &ioque->wfdset);
+ PJ_FD_CLR(h->fd, &wfdset);
+
+ /* Call callback. */
+ /* All data must have been sent? */
+ if (h->cb.on_write_complete)
+ (*h->cb.on_write_complete)(h, h->wr_buflen);
+
+ /* Re-scan writable sockets. */
+ goto do_writable_scan;
+ }
+ }
+
+ /* Shouldn't happen. */
+ /* For strange reason on WinXP select() can return 1 while there is no
+ * pj_fd_set_t signaled. */
+ /* pj_assert(0); */
+
+ //count = 0;
+
+ pj_lock_release(ioque->lock);
+ return count;
+}
+
+/*
+ * pj_ioqueue_read()
+ *
+ * Start asynchronous read from the descriptor.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_read( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ void *buffer,
+ pj_size_t buflen)
+{
+ PJ_ASSERT_RETURN(ioque && key && buffer, PJ_EINVAL);
+ PJ_CHECK_STACK();
+
+ /* For consistency with other ioqueue implementation, we would reject
+ * if descriptor has already been submitted for reading before.
+ */
+ PJ_ASSERT_RETURN(((key->op & PJ_IOQUEUE_OP_READ) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_RECV) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_RECV_FROM) == 0),
+ PJ_EBUSY);
+
+ pj_lock_acquire(ioque->lock);
+
+ key->op |= PJ_IOQUEUE_OP_READ;
+ key->rd_flags = 0;
+ key->rd_buf = buffer;
+ key->rd_buflen = buflen;
+ PJ_FD_SET(key->fd, &ioque->rfdset);
+
+ pj_lock_release(ioque->lock);
+ return PJ_EPENDING;
+}
+
+
+/*
+ * pj_ioqueue_recv()
+ *
+ * Start asynchronous recv() from the socket.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_recv( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ void *buffer,
+ pj_size_t buflen,
+ unsigned flags )
+{
+ PJ_ASSERT_RETURN(ioque && key && buffer, PJ_EINVAL);
+ PJ_CHECK_STACK();
+
+ /* For consistency with other ioqueue implementation, we would reject
+ * if descriptor has already been submitted for reading before.
+ */
+ PJ_ASSERT_RETURN(((key->op & PJ_IOQUEUE_OP_READ) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_RECV) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_RECV_FROM) == 0),
+ PJ_EBUSY);
+
+ pj_lock_acquire(ioque->lock);
+
+ key->op |= PJ_IOQUEUE_OP_RECV;
+ key->rd_buf = buffer;
+ key->rd_buflen = buflen;
+ key->rd_flags = flags;
+ PJ_FD_SET(key->fd, &ioque->rfdset);
+
+ pj_lock_release(ioque->lock);
+ return PJ_EPENDING;
+}
+
+/*
+ * pj_ioqueue_recvfrom()
+ *
+ * Start asynchronous recvfrom() from the socket.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_recvfrom( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ void *buffer,
+ pj_size_t buflen,
+ unsigned flags,
+ pj_sockaddr_t *addr,
+ int *addrlen)
+{
+ PJ_ASSERT_RETURN(ioque && key && buffer, PJ_EINVAL);
+ PJ_CHECK_STACK();
+
+ /* For consistency with other ioqueue implementation, we would reject
+ * if descriptor has already been submitted for reading before.
+ */
+ PJ_ASSERT_RETURN(((key->op & PJ_IOQUEUE_OP_READ) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_RECV) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_RECV_FROM) == 0),
+ PJ_EBUSY);
+
+ pj_lock_acquire(ioque->lock);
+
+ key->op |= PJ_IOQUEUE_OP_RECV_FROM;
+ key->rd_buf = buffer;
+ key->rd_buflen = buflen;
+ key->rd_flags = flags;
+ key->rmt_addr = addr;
+ key->rmt_addrlen = addrlen;
+ PJ_FD_SET(key->fd, &ioque->rfdset);
+
+ pj_lock_release(ioque->lock);
+ return PJ_EPENDING;
+}
+
+/*
+ * pj_ioqueue_write()
+ *
+ * Start asynchronous write() to the descriptor.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_write( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ const void *data,
+ pj_size_t datalen)
+{
+ pj_status_t rc;
+ pj_ssize_t sent;
+
+ PJ_ASSERT_RETURN(ioque && key && data, PJ_EINVAL);
+ PJ_CHECK_STACK();
+
+ /* For consistency with other ioqueue implementation, we would reject
+ * if descriptor has already been submitted for writing before.
+ */
+ PJ_ASSERT_RETURN(((key->op & PJ_IOQUEUE_OP_WRITE) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_SEND) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_SEND_TO) == 0),
+ PJ_EBUSY);
+
+ sent = datalen;
+ /* sent would be -1 after pj_sock_send() if it returns error. */
+ rc = pj_sock_send(key->fd, data, &sent, 0);
+ if (rc != PJ_SUCCESS && rc != PJ_STATUS_FROM_OS(OSERR_EWOULDBLOCK)) {
+ return rc;
+ }
+
+ pj_lock_acquire(ioque->lock);
+
+ key->op |= PJ_IOQUEUE_OP_WRITE;
+ key->wr_buf = NULL;
+ key->wr_buflen = datalen;
+ PJ_FD_SET(key->fd, &ioque->wfdset);
+
+ pj_lock_release(ioque->lock);
+
+ return PJ_EPENDING;
+}
+
+/*
+ * pj_ioqueue_send()
+ *
+ * Start asynchronous send() to the descriptor.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_send( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ const void *data,
+ pj_size_t datalen,
+ unsigned flags)
+{
+ pj_status_t rc;
+ pj_ssize_t sent;
+
+ PJ_ASSERT_RETURN(ioque && key && data, PJ_EINVAL);
+ PJ_CHECK_STACK();
+
+ /* For consistency with other ioqueue implementation, we would reject
+ * if descriptor has already been submitted for writing before.
+ */
+ PJ_ASSERT_RETURN(((key->op & PJ_IOQUEUE_OP_WRITE) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_SEND) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_SEND_TO) == 0),
+ PJ_EBUSY);
+
+ sent = datalen;
+ /* sent would be -1 after pj_sock_send() if it returns error. */
+ rc = pj_sock_send(key->fd, data, &sent, flags);
+ if (rc != PJ_SUCCESS && rc != PJ_STATUS_FROM_OS(OSERR_EWOULDBLOCK)) {
+ return rc;
+ }
+
+ pj_lock_acquire(ioque->lock);
+
+ key->op |= PJ_IOQUEUE_OP_SEND;
+ key->wr_buf = NULL;
+ key->wr_buflen = datalen;
+ PJ_FD_SET(key->fd, &ioque->wfdset);
+
+ pj_lock_release(ioque->lock);
+
+ return PJ_EPENDING;
+}
+
+
+/*
+ * pj_ioqueue_sendto()
+ *
+ * Start asynchronous write() to the descriptor.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_sendto( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ const void *data,
+ pj_size_t datalen,
+ unsigned flags,
+ const pj_sockaddr_t *addr,
+ int addrlen)
+{
+ pj_status_t rc;
+ pj_ssize_t sent;
+
+ PJ_ASSERT_RETURN(ioque && key && data, PJ_EINVAL);
+ PJ_CHECK_STACK();
+
+ /* For consistency with other ioqueue implementation, we would reject
+ * if descriptor has already been submitted for writing before.
+ */
+ PJ_ASSERT_RETURN(((key->op & PJ_IOQUEUE_OP_WRITE) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_SEND) == 0 &&
+ (key->op & PJ_IOQUEUE_OP_SEND_TO) == 0),
+ PJ_EBUSY);
+
+ sent = datalen;
+ /* sent would be -1 after pj_sock_sendto() if it returns error. */
+ rc = pj_sock_sendto(key->fd, data, &sent, flags, addr, addrlen);
+ if (rc != PJ_SUCCESS && rc != PJ_STATUS_FROM_OS(OSERR_EWOULDBLOCK)) {
+ return rc;
+ }
+
+ pj_lock_acquire(ioque->lock);
+
+ key->op |= PJ_IOQUEUE_OP_SEND_TO;
+ key->wr_buf = NULL;
+ key->wr_buflen = datalen;
+ PJ_FD_SET(key->fd, &ioque->wfdset);
+
+ pj_lock_release(ioque->lock);
+ return PJ_EPENDING;
+}
+
+#if PJ_HAS_TCP
+/*
+ * Initiate overlapped accept() operation.
+ */
+PJ_DEF(int) pj_ioqueue_accept( pj_ioqueue_t *ioqueue,
+ pj_ioqueue_key_t *key,
+ pj_sock_t *new_sock,
+ pj_sockaddr_t *local,
+ pj_sockaddr_t *remote,
+ int *addrlen)
+{
+ /* check parameters. All must be specified! */
+ pj_assert(ioqueue && key && new_sock);
+
+ /* Server socket must have no other operation! */
+ pj_assert(key->op == 0);
+
+ pj_lock_acquire(ioqueue->lock);
+
+ key->op = PJ_IOQUEUE_OP_ACCEPT;
+ key->accept_fd = new_sock;
+ key->rmt_addr = remote;
+ key->rmt_addrlen = addrlen;
+ key->local_addr = local;
+ key->local_addrlen = addrlen; /* use same addr. as rmt_addrlen */
+
+ PJ_FD_SET(key->fd, &ioqueue->rfdset);
+
+ pj_lock_release(ioqueue->lock);
+ return PJ_EPENDING;
+}
+
+/*
+ * Initiate overlapped connect() operation (well, it's non-blocking actually,
+ * since there's no overlapped version of connect()).
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_connect( pj_ioqueue_t *ioqueue,
+ pj_ioqueue_key_t *key,
+ const pj_sockaddr_t *addr,
+ int addrlen )
+{
+ pj_status_t rc;
+
+ /* check parameters. All must be specified! */
+ PJ_ASSERT_RETURN(ioqueue && key && addr && addrlen, PJ_EINVAL);
+
+ /* Connecting socket must have no other operation! */
+ PJ_ASSERT_RETURN(key->op == 0, PJ_EBUSY);
+
+ rc = pj_sock_connect(key->fd, addr, addrlen);
+ if (rc == PJ_SUCCESS) {
+ /* Connected! */
+ return PJ_SUCCESS;
+ } else {
+ if (rc == PJ_STATUS_FROM_OS(OSERR_EINPROGRESS) ||
+ rc == PJ_STATUS_FROM_OS(OSERR_EWOULDBLOCK))
+ {
+ /* Pending! */
+ pj_lock_acquire(ioqueue->lock);
+ key->op = PJ_IOQUEUE_OP_CONNECT;
+ PJ_FD_SET(key->fd, &ioqueue->wfdset);
+ PJ_FD_SET(key->fd, &ioqueue->xfdset);
+ pj_lock_release(ioqueue->lock);
+ return PJ_EPENDING;
+ } else {
+ /* Error! */
+ return rc;
+ }
+ }
+}
+#endif /* PJ_HAS_TCP */
+
diff --git a/pjlib/src/pj/ioqueue_winnt.c b/pjlib/src/pj/ioqueue_winnt.c
new file mode 100644
index 00000000..93116c9d
--- /dev/null
+++ b/pjlib/src/pj/ioqueue_winnt.c
@@ -0,0 +1,852 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/ioqueue_winnt.c 11 10/29/05 11:31a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/ioqueue_winnt.c $
+ *
+ * 11 10/29/05 11:31a Bennylp
+ * Changed accept and lock.
+ *
+ * 10 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 9 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/ioqueue.h>
+#include <pj/os.h>
+#include <pj/lock.h>
+#include <pj/pool.h>
+#include <pj/string.h>
+#include <pj/sock.h>
+#include <pj/array.h>
+#include <pj/log.h>
+#include <pj/assert.h>
+#include <pj/errno.h>
+
+
+#if defined(PJ_HAS_WINSOCK2_H) && PJ_HAS_WINSOCK2_H != 0
+# include <winsock2.h>
+#elif defined(PJ_HAS_WINSOCK_H) && PJ_HAS_WINSOCK_H != 0
+# include <winsock.h>
+#endif
+
+#if defined(PJ_HAS_MSWSOCK_H) && PJ_HAS_MSWSOCK_H != 0
+# include <mswsock.h>
+#endif
+
+
+#define ACCEPT_ADDR_LEN (sizeof(pj_sockaddr_in)+20)
+
+/*
+ * OVERLAP structure for send and receive.
+ */
+typedef struct ioqueue_overlapped
+{
+ WSAOVERLAPPED overlapped;
+ pj_ioqueue_operation_e operation;
+ WSABUF wsabuf;
+} ioqueue_overlapped;
+
+#if PJ_HAS_TCP
+/*
+ * OVERLAP structure for accept.
+ */
+typedef struct ioqueue_accept_rec
+{
+ WSAOVERLAPPED overlapped;
+ pj_ioqueue_operation_e operation;
+ pj_sock_t newsock;
+ pj_sock_t *newsock_ptr;
+ int *addrlen;
+ void *remote;
+ void *local;
+ char accept_buf[2 * ACCEPT_ADDR_LEN];
+} ioqueue_accept_rec;
+#endif
+
+/*
+ * Structure for individual socket.
+ */
+struct pj_ioqueue_key_t
+{
+ HANDLE hnd;
+ void *user_data;
+ ioqueue_overlapped recv_overlapped;
+ ioqueue_overlapped send_overlapped;
+#if PJ_HAS_TCP
+ int connecting;
+ ioqueue_accept_rec accept_overlapped;
+#endif
+ pj_ioqueue_callback cb;
+};
+
+/*
+ * IO Queue structure.
+ */
+struct pj_ioqueue_t
+{
+ HANDLE iocp;
+ pj_lock_t *lock;
+ pj_bool_t auto_delete_lock;
+ unsigned event_count;
+ HANDLE event_pool[MAXIMUM_WAIT_OBJECTS+1];
+#if PJ_HAS_TCP
+ unsigned connecting_count;
+ HANDLE connecting_handles[MAXIMUM_WAIT_OBJECTS+1];
+ pj_ioqueue_key_t *connecting_keys[MAXIMUM_WAIT_OBJECTS+1];
+#endif
+};
+
+
+#if PJ_HAS_TCP
+/*
+ * Process the socket when the overlapped accept() completed.
+ */
+static void ioqueue_on_accept_complete(ioqueue_accept_rec *accept_overlapped)
+{
+ struct sockaddr *local;
+ struct sockaddr *remote;
+ int locallen, remotelen;
+
+ PJ_CHECK_STACK();
+
+ /* Operation complete immediately. */
+ GetAcceptExSockaddrs( accept_overlapped->accept_buf,
+ 0,
+ ACCEPT_ADDR_LEN,
+ ACCEPT_ADDR_LEN,
+ &local,
+ &locallen,
+ &remote,
+ &remotelen);
+ pj_memcpy(accept_overlapped->local, local, locallen);
+ pj_memcpy(accept_overlapped->remote, remote, locallen);
+ *accept_overlapped->addrlen = locallen;
+ if (accept_overlapped->newsock_ptr)
+ *accept_overlapped->newsock_ptr = accept_overlapped->newsock;
+ accept_overlapped->operation = 0;
+ accept_overlapped->newsock = PJ_INVALID_SOCKET;
+}
+
+static void erase_connecting_socket( pj_ioqueue_t *ioqueue, unsigned pos)
+{
+ pj_ioqueue_key_t *key = ioqueue->connecting_keys[pos];
+ HANDLE hEvent = ioqueue->connecting_handles[pos];
+ unsigned long optval;
+
+ /* Remove key from array of connecting handles. */
+ pj_array_erase(ioqueue->connecting_keys, sizeof(key),
+ ioqueue->connecting_count, pos);
+ pj_array_erase(ioqueue->connecting_handles, sizeof(HANDLE),
+ ioqueue->connecting_count, pos);
+ --ioqueue->connecting_count;
+
+ /* Disassociate the socket from the event. */
+ WSAEventSelect((pj_sock_t)key->hnd, hEvent, 0);
+
+ /* Put event object to pool. */
+ if (ioqueue->event_count < MAXIMUM_WAIT_OBJECTS) {
+ ioqueue->event_pool[ioqueue->event_count++] = hEvent;
+ } else {
+ /* Shouldn't happen. There should be no more pending connections
+ * than max.
+ */
+ pj_assert(0);
+ CloseHandle(hEvent);
+ }
+
+ /* Set socket to blocking again. */
+ optval = 0;
+ if (ioctlsocket((pj_sock_t)key->hnd, FIONBIO, &optval) != 0) {
+ DWORD dwStatus;
+ dwStatus = WSAGetLastError();
+ }
+}
+
+/*
+ * Poll for the completion of non-blocking connect().
+ * If there's a completion, the function return the key of the completed
+ * socket, and 'result' argument contains the connect() result. If connect()
+ * succeeded, 'result' will have value zero, otherwise will have the error
+ * code.
+ */
+static pj_ioqueue_key_t *check_connecting( pj_ioqueue_t *ioqueue,
+ pj_ssize_t *connect_err )
+{
+ pj_ioqueue_key_t *key = NULL;
+
+ if (ioqueue->connecting_count) {
+ DWORD result;
+
+ pj_lock_acquire(ioqueue->lock);
+ result = WaitForMultipleObjects(ioqueue->connecting_count,
+ ioqueue->connecting_handles,
+ FALSE, 0);
+ if (result >= WAIT_OBJECT_0 &&
+ result < WAIT_OBJECT_0+ioqueue->connecting_count)
+ {
+ WSANETWORKEVENTS net_events;
+
+ /* Got completed connect(). */
+ unsigned pos = result - WAIT_OBJECT_0;
+ key = ioqueue->connecting_keys[pos];
+
+ /* See whether connect has succeeded. */
+ WSAEnumNetworkEvents((pj_sock_t)key->hnd,
+ ioqueue->connecting_handles[pos],
+ &net_events);
+ *connect_err = net_events.iErrorCode[FD_CONNECT_BIT];
+
+ /* Erase socket from pending connect. */
+ erase_connecting_socket(ioqueue, pos);
+ }
+ pj_lock_release(ioqueue->lock);
+ }
+ return key;
+}
+#endif
+
+
+PJ_DEF(pj_status_t) pj_ioqueue_create( pj_pool_t *pool,
+ pj_size_t max_fd,
+ int max_threads,
+ pj_ioqueue_t **ioqueue)
+{
+ pj_ioqueue_t *ioq;
+ pj_status_t rc;
+
+ PJ_UNUSED_ARG(max_fd);
+ PJ_ASSERT_RETURN(pool && ioqueue, PJ_EINVAL);
+
+ ioq = pj_pool_zalloc(pool, sizeof(*ioq));
+ ioq->iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, max_threads);
+ if (ioq->iocp == NULL)
+ return PJ_RETURN_OS_ERROR(GetLastError());
+
+ rc = pj_lock_create_simple_mutex(pool, NULL, &ioq->lock);
+ if (rc != PJ_SUCCESS) {
+ CloseHandle(ioq->iocp);
+ return rc;
+ }
+
+ ioq->auto_delete_lock = PJ_TRUE;
+
+ *ioqueue = ioq;
+
+ PJ_LOG(4, ("pjlib", "WinNT IOCP I/O Queue created (%p)", ioq));
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_ioqueue_destroy( pj_ioqueue_t *ioque )
+{
+ unsigned i;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(ioque, PJ_EINVAL);
+
+ /* Destroy events in the pool */
+ for (i=0; i<ioque->event_count; ++i) {
+ CloseHandle(ioque->event_pool[i]);
+ }
+ ioque->event_count = 0;
+
+ if (ioque->auto_delete_lock)
+ pj_lock_destroy(ioque->lock);
+
+ if (CloseHandle(ioque->iocp) == TRUE)
+ return PJ_SUCCESS;
+ else
+ return PJ_RETURN_OS_ERROR(GetLastError());
+}
+
+PJ_DEF(pj_status_t) pj_ioqueue_set_lock( pj_ioqueue_t *ioque,
+ pj_lock_t *lock,
+ pj_bool_t auto_delete )
+{
+ PJ_ASSERT_RETURN(ioque && lock, PJ_EINVAL);
+
+ if (ioque->auto_delete_lock) {
+ pj_lock_destroy(ioque->lock);
+ }
+
+ ioque->lock = lock;
+ ioque->auto_delete_lock = auto_delete;
+
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_ioqueue_register_sock( pj_pool_t *pool,
+ pj_ioqueue_t *ioque,
+ pj_sock_t hnd,
+ void *user_data,
+ const pj_ioqueue_callback *cb,
+ pj_ioqueue_key_t **key )
+{
+ HANDLE hioq;
+ pj_ioqueue_key_t *rec;
+
+ PJ_ASSERT_RETURN(pool && ioque && cb && key, PJ_EINVAL);
+
+ rec = pj_pool_zalloc(pool, sizeof(pj_ioqueue_key_t));
+ rec->hnd = (HANDLE)hnd;
+ rec->user_data = user_data;
+ pj_memcpy(&rec->cb, cb, sizeof(pj_ioqueue_callback));
+#if PJ_HAS_TCP
+ rec->accept_overlapped.newsock = PJ_INVALID_SOCKET;
+#endif
+ hioq = CreateIoCompletionPort((HANDLE)hnd, ioque->iocp, (DWORD)rec, 0);
+ if (!hioq) {
+ return PJ_RETURN_OS_ERROR(GetLastError());
+ }
+
+ *key = rec;
+ return PJ_SUCCESS;
+}
+
+
+
+PJ_DEF(pj_status_t) pj_ioqueue_unregister( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key )
+{
+ PJ_ASSERT_RETURN(ioque && key, PJ_EINVAL);
+
+#if PJ_HAS_TCP
+ if (key->connecting) {
+ unsigned pos;
+
+ /* Erase from connecting_handles */
+ pj_lock_acquire(ioque->lock);
+ for (pos=0; pos < ioque->connecting_count; ++pos) {
+ if (ioque->connecting_keys[pos] == key) {
+ erase_connecting_socket(ioque, pos);
+ if (key->accept_overlapped.newsock_ptr) {
+ /* ??? shouldn't it be newsock instead of newsock_ptr??? */
+ closesocket(*key->accept_overlapped.newsock_ptr);
+ }
+ break;
+ }
+ }
+ pj_lock_release(ioque->lock);
+ key->connecting = 0;
+ }
+#endif
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(void*) pj_ioqueue_get_user_data( pj_ioqueue_key_t *key )
+{
+ PJ_ASSERT_RETURN(key, NULL);
+ return key->user_data;
+}
+
+/*
+ * Poll for events.
+ */
+PJ_DEF(int) pj_ioqueue_poll( pj_ioqueue_t *ioque, const pj_time_val *timeout)
+{
+ DWORD dwMsec, dwBytesTransfered, dwKey;
+ ioqueue_overlapped *ov;
+ pj_ioqueue_key_t *key;
+ pj_ssize_t size_status;
+ BOOL rc;
+
+ PJ_ASSERT_RETURN(ioque, -PJ_EINVAL);
+
+ /* Check the connecting array. */
+#if PJ_HAS_TCP
+ key = check_connecting(ioque, &size_status);
+ if (key != NULL) {
+ key->cb.on_connect_complete(key, (int)size_status);
+ return 1;
+ }
+#endif
+
+ /* Calculate miliseconds timeout for GetQueuedCompletionStatus */
+ dwMsec = timeout ? timeout->sec*1000 + timeout->msec : INFINITE;
+
+ /* Poll for completion status. */
+ rc = GetQueuedCompletionStatus(ioque->iocp, &dwBytesTransfered, &dwKey,
+ (OVERLAPPED**)&ov, dwMsec);
+
+ /* The return value is:
+ * - nonzero if event was dequeued.
+ * - zero and ov==NULL if no event was dequeued.
+ * - zero and ov!=NULL if event for failed I/O was dequeued.
+ */
+ if (ov) {
+ /* Event was dequeued for either successfull or failed I/O */
+ key = (pj_ioqueue_key_t*)dwKey;
+ size_status = dwBytesTransfered;
+ switch (ov->operation) {
+ case PJ_IOQUEUE_OP_READ:
+ case PJ_IOQUEUE_OP_RECV:
+ case PJ_IOQUEUE_OP_RECV_FROM:
+ key->recv_overlapped.operation = 0;
+ if (key->cb.on_read_complete)
+ key->cb.on_read_complete(key, size_status);
+ break;
+ case PJ_IOQUEUE_OP_WRITE:
+ case PJ_IOQUEUE_OP_SEND:
+ case PJ_IOQUEUE_OP_SEND_TO:
+ key->send_overlapped.operation = 0;
+ if (key->cb.on_write_complete)
+ key->cb.on_write_complete(key, size_status);
+ break;
+#if PJ_HAS_TCP
+ case PJ_IOQUEUE_OP_ACCEPT:
+ /* special case for accept. */
+ ioqueue_on_accept_complete((ioqueue_accept_rec*)ov);
+ if (key->cb.on_accept_complete)
+ key->cb.on_accept_complete(key, key->accept_overlapped.newsock,
+ 0);
+ break;
+ case PJ_IOQUEUE_OP_CONNECT:
+#endif
+ case PJ_IOQUEUE_OP_NONE:
+ pj_assert(0);
+ break;
+ }
+ return 1;
+ }
+
+ if (GetLastError()==WAIT_TIMEOUT) {
+ /* Check the connecting array. */
+#if PJ_HAS_TCP
+ key = check_connecting(ioque, &size_status);
+ if (key != NULL) {
+ key->cb.on_connect_complete(key, (int)size_status);
+ return 1;
+ }
+#endif
+ return 0;
+ }
+ return -1;
+}
+
+/*
+ * pj_ioqueue_read()
+ *
+ * Initiate overlapped ReadFile operation.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_read( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ void *buffer,
+ pj_size_t buflen)
+{
+ BOOL rc;
+ DWORD bytesRead;
+
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(ioque);
+
+ if (key->recv_overlapped.operation != PJ_IOQUEUE_OP_NONE) {
+ pj_assert(!"Operation already pending for this descriptor");
+ return PJ_EBUSY;
+ }
+
+ pj_memset(&key->recv_overlapped, 0, sizeof(key->recv_overlapped));
+ key->recv_overlapped.operation = PJ_IOQUEUE_OP_READ;
+
+ rc = ReadFile(key->hnd, buffer, buflen, &bytesRead,
+ &key->recv_overlapped.overlapped);
+ if (rc == FALSE) {
+ DWORD dwStatus = GetLastError();
+ if (dwStatus==ERROR_IO_PENDING)
+ return PJ_EPENDING;
+ else
+ return PJ_STATUS_FROM_OS(dwStatus);
+ } else {
+ /*
+ * This is workaround to a probable bug in Win2000 (probably NT too).
+ * Even if 'rc' is TRUE, which indicates operation has completed,
+ * GetQueuedCompletionStatus still will return the key.
+ * So as work around, we always return PJ_EPENDING here.
+ */
+ return PJ_EPENDING;
+ }
+}
+
+/*
+ * pj_ioqueue_recv()
+ *
+ * Initiate overlapped WSARecv() operation.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_recv( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ void *buffer,
+ pj_size_t buflen,
+ unsigned flags )
+{
+ int rc;
+ DWORD bytesRead;
+ DWORD dwFlags = 0;
+
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(ioque);
+
+ if (key->recv_overlapped.operation != PJ_IOQUEUE_OP_NONE) {
+ pj_assert(!"Operation already pending for this socket");
+ return PJ_EBUSY;
+ }
+
+ pj_memset(&key->recv_overlapped, 0, sizeof(key->recv_overlapped));
+ key->recv_overlapped.operation = PJ_IOQUEUE_OP_READ;
+
+ key->recv_overlapped.wsabuf.buf = buffer;
+ key->recv_overlapped.wsabuf.len = buflen;
+
+ dwFlags = flags;
+
+ rc = WSARecv((SOCKET)key->hnd, &key->recv_overlapped.wsabuf, 1,
+ &bytesRead, &dwFlags,
+ &key->recv_overlapped.overlapped, NULL);
+ if (rc == SOCKET_ERROR) {
+ DWORD dwStatus = WSAGetLastError();
+ if (dwStatus==WSA_IO_PENDING)
+ return PJ_EPENDING;
+ else
+ return PJ_STATUS_FROM_OS(dwStatus);
+ } else {
+ /* Must always return pending status.
+ * See comments on pj_ioqueue_read
+ * return bytesRead;
+ */
+ return PJ_EPENDING;
+ }
+}
+
+/*
+ * pj_ioqueue_recvfrom()
+ *
+ * Initiate overlapped RecvFrom() operation.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_recvfrom( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ void *buffer,
+ pj_size_t buflen,
+ unsigned flags,
+ pj_sockaddr_t *addr,
+ int *addrlen)
+{
+ BOOL rc;
+ DWORD bytesRead;
+ DWORD dwFlags;
+
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(ioque);
+
+ if (key->recv_overlapped.operation != PJ_IOQUEUE_OP_NONE) {
+ pj_assert(!"Operation already pending for this socket");
+ return PJ_EBUSY;
+ }
+
+ pj_memset(&key->recv_overlapped, 0, sizeof(key->recv_overlapped));
+ key->recv_overlapped.operation = PJ_IOQUEUE_OP_RECV_FROM;
+ key->recv_overlapped.wsabuf.buf = buffer;
+ key->recv_overlapped.wsabuf.len = buflen;
+ dwFlags = flags;
+ rc = WSARecvFrom((SOCKET)key->hnd, &key->recv_overlapped.wsabuf, 1,
+ &bytesRead, &dwFlags,
+ addr, addrlen,
+ &key->recv_overlapped.overlapped, NULL);
+ if (rc == SOCKET_ERROR) {
+ DWORD dwStatus = WSAGetLastError();
+ if (dwStatus==WSA_IO_PENDING)
+ return PJ_EPENDING;
+ else
+ return PJ_STATUS_FROM_OS(dwStatus);
+ } else {
+ /* Must always return pending status.
+ * See comments on pj_ioqueue_read
+ * return bytesRead;
+ */
+ return PJ_EPENDING;
+ }
+}
+
+/*
+ * pj_ioqueue_write()
+ *
+ * Initiate overlapped WriteFile() operation.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_write( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ const void *data,
+ pj_size_t datalen)
+{
+ BOOL rc;
+ DWORD bytesWritten;
+
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(ioque);
+
+ if (key->send_overlapped.operation != PJ_IOQUEUE_OP_NONE) {
+ pj_assert(!"Operation already pending for this descriptor");
+ return PJ_EBUSY;
+ }
+
+ pj_memset(&key->send_overlapped, 0, sizeof(key->send_overlapped));
+ key->send_overlapped.operation = PJ_IOQUEUE_OP_WRITE;
+ rc = WriteFile(key->hnd, data, datalen, &bytesWritten,
+ &key->send_overlapped.overlapped);
+
+ if (rc == FALSE) {
+ DWORD dwStatus = GetLastError();
+ if (dwStatus==ERROR_IO_PENDING)
+ return PJ_EPENDING;
+ else
+ return PJ_STATUS_FROM_OS(dwStatus);
+ } else {
+ /* Must always return pending status.
+ * See comments on pj_ioqueue_read
+ * return bytesWritten;
+ */
+ return PJ_EPENDING;
+ }
+}
+
+
+/*
+ * pj_ioqueue_send()
+ *
+ * Initiate overlapped Send operation.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_send( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ const void *data,
+ pj_size_t datalen,
+ unsigned flags )
+{
+ int rc;
+ DWORD bytesWritten;
+ DWORD dwFlags;
+
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(ioque);
+
+ if (key->send_overlapped.operation != PJ_IOQUEUE_OP_NONE) {
+ pj_assert(!"Operation already pending for this socket");
+ return PJ_EBUSY;
+ }
+
+ pj_memset(&key->send_overlapped, 0, sizeof(key->send_overlapped));
+ key->send_overlapped.operation = PJ_IOQUEUE_OP_WRITE;
+ key->send_overlapped.wsabuf.buf = (void*)data;
+ key->send_overlapped.wsabuf.len = datalen;
+ dwFlags = flags;
+ rc = WSASend((SOCKET)key->hnd, &key->send_overlapped.wsabuf, 1,
+ &bytesWritten, dwFlags,
+ &key->send_overlapped.overlapped, NULL);
+ if (rc == SOCKET_ERROR) {
+ DWORD dwStatus = WSAGetLastError();
+ if (dwStatus==WSA_IO_PENDING)
+ return PJ_EPENDING;
+ else
+ return PJ_STATUS_FROM_OS(dwStatus);
+ } else {
+ /* Must always return pending status.
+ * See comments on pj_ioqueue_read
+ * return bytesRead;
+ */
+ return PJ_EPENDING;
+ }
+}
+
+
+/*
+ * pj_ioqueue_sendto()
+ *
+ * Initiate overlapped SendTo operation.
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_sendto( pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *key,
+ const void *data,
+ pj_size_t datalen,
+ unsigned flags,
+ const pj_sockaddr_t *addr,
+ int addrlen)
+{
+ BOOL rc;
+ DWORD bytesSent;
+ DWORD dwFlags;
+
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(ioque);
+
+ if (key->send_overlapped.operation != PJ_IOQUEUE_OP_NONE) {
+ pj_assert(!"Operation already pending for this socket");
+ return PJ_EBUSY;
+ }
+
+ pj_memset(&key->send_overlapped, 0, sizeof(key->send_overlapped));
+ key->send_overlapped.operation = PJ_IOQUEUE_OP_SEND_TO;
+ key->send_overlapped.wsabuf.buf = (char*)data;
+ key->send_overlapped.wsabuf.len = datalen;
+ dwFlags = flags;
+ rc = WSASendTo((SOCKET)key->hnd, &key->send_overlapped.wsabuf, 1,
+ &bytesSent, dwFlags, addr,
+ addrlen, &key->send_overlapped.overlapped, NULL);
+ if (rc == SOCKET_ERROR) {
+ DWORD dwStatus = WSAGetLastError();
+ if (dwStatus==WSA_IO_PENDING)
+ return PJ_EPENDING;
+ else
+ return PJ_STATUS_FROM_OS(dwStatus);
+ } else {
+ // Must always return pending status.
+ // See comments on pj_ioqueue_read
+ // return bytesSent;
+ return PJ_EPENDING;
+ }
+}
+
+#if PJ_HAS_TCP
+
+/*
+ * pj_ioqueue_accept()
+ *
+ * Initiate overlapped accept() operation.
+ */
+PJ_DEF(int) pj_ioqueue_accept( pj_ioqueue_t *ioqueue,
+ pj_ioqueue_key_t *key,
+ pj_sock_t *new_sock,
+ pj_sockaddr_t *local,
+ pj_sockaddr_t *remote,
+ int *addrlen)
+{
+ BOOL rc;
+ DWORD bytesReceived;
+ pj_status_t status;
+
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(ioqueue);
+
+ if (key->accept_overlapped.operation != PJ_IOQUEUE_OP_NONE) {
+ pj_assert(!"Operation already pending for this socket");
+ return PJ_EBUSY;
+ }
+
+ if (key->accept_overlapped.newsock == PJ_INVALID_SOCKET) {
+ pj_sock_t sock;
+ status = pj_sock_socket(PJ_AF_INET, PJ_SOCK_STREAM, 0, &sock);
+ if (status != PJ_SUCCESS)
+ return status;
+
+ key->accept_overlapped.newsock = sock;
+ }
+ key->accept_overlapped.operation = PJ_IOQUEUE_OP_ACCEPT;
+ key->accept_overlapped.addrlen = addrlen;
+ key->accept_overlapped.local = local;
+ key->accept_overlapped.remote = remote;
+ key->accept_overlapped.newsock_ptr = new_sock;
+ pj_memset(&key->accept_overlapped.overlapped, 0,
+ sizeof(key->accept_overlapped.overlapped));
+
+ rc = AcceptEx( (SOCKET)key->hnd, (SOCKET)key->accept_overlapped.newsock,
+ key->accept_overlapped.accept_buf,
+ 0, ACCEPT_ADDR_LEN, ACCEPT_ADDR_LEN,
+ &bytesReceived,
+ &key->accept_overlapped.overlapped);
+
+ if (rc == TRUE) {
+ ioqueue_on_accept_complete(&key->accept_overlapped);
+ if (key->cb.on_accept_complete)
+ key->cb.on_accept_complete(key, key->accept_overlapped.newsock, 0);
+ return PJ_SUCCESS;
+ } else {
+ DWORD dwStatus = WSAGetLastError();
+ if (dwStatus==WSA_IO_PENDING)
+ return PJ_EPENDING;
+ else
+ return PJ_STATUS_FROM_OS(dwStatus);
+ }
+}
+
+
+/*
+ * pj_ioqueue_connect()
+ *
+ * Initiate overlapped connect() operation (well, it's non-blocking actually,
+ * since there's no overlapped version of connect()).
+ */
+PJ_DEF(pj_status_t) pj_ioqueue_connect( pj_ioqueue_t *ioqueue,
+ pj_ioqueue_key_t *key,
+ const pj_sockaddr_t *addr,
+ int addrlen )
+{
+ unsigned long optval = 1;
+ HANDLE hEvent;
+
+ PJ_CHECK_STACK();
+
+ /* Set socket to non-blocking. */
+ if (ioctlsocket((pj_sock_t)key->hnd, FIONBIO, &optval) != 0) {
+ return PJ_RETURN_OS_ERROR(WSAGetLastError());
+ }
+
+ /* Initiate connect() */
+ if (connect((pj_sock_t)key->hnd, addr, addrlen) != 0) {
+ DWORD dwStatus;
+ dwStatus = WSAGetLastError();
+ if (dwStatus != WSAEWOULDBLOCK) {
+ /* Permanent error */
+ return PJ_RETURN_OS_ERROR(dwStatus);
+ } else {
+ /* Pending operation. This is what we're looking for. */
+ }
+ } else {
+ /* Connect has completed immediately! */
+ /* Restore to blocking mode. */
+ optval = 0;
+ if (ioctlsocket((pj_sock_t)key->hnd, FIONBIO, &optval) != 0) {
+ return PJ_RETURN_OS_ERROR(WSAGetLastError());
+ }
+
+ key->cb.on_connect_complete(key, 0);
+ return PJ_SUCCESS;
+ }
+
+ /* Add to the array of connecting socket to be polled */
+ pj_lock_acquire(ioqueue->lock);
+
+ if (ioqueue->connecting_count >= MAXIMUM_WAIT_OBJECTS) {
+ pj_lock_release(ioqueue->lock);
+ return PJ_ETOOMANYCONN;
+ }
+
+ /* Get or create event object. */
+ if (ioqueue->event_count) {
+ hEvent = ioqueue->event_pool[ioqueue->event_count - 1];
+ --ioqueue->event_count;
+ } else {
+ hEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
+ if (hEvent == NULL) {
+ DWORD dwStatus = GetLastError();
+ pj_lock_release(ioqueue->lock);
+ return PJ_STATUS_FROM_OS(dwStatus);
+ }
+ }
+
+ /* Mark key as connecting.
+ * We can't use array index since key can be removed dynamically.
+ */
+ key->connecting = 1;
+
+ /* Associate socket events to the event object. */
+ if (WSAEventSelect((pj_sock_t)key->hnd, hEvent, FD_CONNECT) != 0) {
+ CloseHandle(hEvent);
+ pj_lock_release(ioqueue->lock);
+ return PJ_RETURN_OS_ERROR(WSAGetLastError());
+ }
+
+ /* Add to array. */
+ ioqueue->connecting_keys[ ioqueue->connecting_count ] = key;
+ ioqueue->connecting_handles[ ioqueue->connecting_count ] = hEvent;
+ ioqueue->connecting_count++;
+
+ pj_lock_release(ioqueue->lock);
+
+ return PJ_EPENDING;
+}
+#endif /* #if PJ_HAS_TCP */
+
diff --git a/pjlib/src/pj/list.c b/pjlib/src/pj/list.c
new file mode 100644
index 00000000..82b9e83a
--- /dev/null
+++ b/pjlib/src/pj/list.c
@@ -0,0 +1,18 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/list.c 5 10/14/05 12:26a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/list.c $
+ *
+ * 5 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 4 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/list.h>
+
+#if !PJ_FUNCTIONS_ARE_INLINED
+# include <pj/list_i.h>
+#endif
+
+
diff --git a/pjlib/src/pj/lock.c b/pjlib/src/pj/lock.c
new file mode 100644
index 00000000..10b967a8
--- /dev/null
+++ b/pjlib/src/pj/lock.c
@@ -0,0 +1,190 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/lock.c 3 10/29/05 11:51a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pj/lock.c $
+ *
+ * 3 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/05/05 6:35p Bennylp
+ * Created.
+ */
+#include <pj/lock.h>
+#include <pj/os.h>
+#include <pj/assert.h>
+#include <pj/pool.h>
+#include <pj/string.h>
+#include <pj/errno.h>
+
+
+typedef void LOCK_OBJ;
+
+/*
+ * Lock structure.
+ */
+struct pj_lock_t
+{
+ LOCK_OBJ *lock_object;
+
+ pj_status_t (*acquire) (LOCK_OBJ*);
+ pj_status_t (*tryacquire) (LOCK_OBJ*);
+ pj_status_t (*release) (LOCK_OBJ*);
+ pj_status_t (*destroy) (LOCK_OBJ*);
+};
+
+typedef pj_status_t (*FPTR)(LOCK_OBJ*);
+
+/******************************************************************************
+ * Implementation of lock object with mutex.
+ */
+static pj_lock_t mutex_lock_template =
+{
+ NULL,
+ (FPTR) &pj_mutex_lock,
+ (FPTR) &pj_mutex_trylock,
+ (FPTR) &pj_mutex_unlock,
+ (FPTR) &pj_mutex_destroy
+};
+
+static pj_status_t create_mutex_lock( pj_pool_t *pool,
+ const char *name,
+ int type,
+ pj_lock_t **lock )
+{
+ pj_lock_t *p_lock;
+ pj_status_t rc;
+
+ PJ_ASSERT_RETURN(pool && lock, PJ_EINVAL);
+
+ p_lock = pj_pool_alloc(pool, sizeof(pj_lock_t));
+ if (!p_lock)
+ return PJ_ENOMEM;
+
+ pj_memcpy(p_lock, &mutex_lock_template, sizeof(pj_lock_t));
+ rc = pj_mutex_create(pool, name, type, (pj_mutex_t**)&p_lock->lock_object);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ *lock = p_lock;
+ return PJ_SUCCESS;
+}
+
+
+PJ_DEF(pj_status_t) pj_lock_create_simple_mutex( pj_pool_t *pool,
+ const char *name,
+ pj_lock_t **lock )
+{
+ return create_mutex_lock(pool, name, PJ_MUTEX_SIMPLE, lock);
+}
+
+PJ_DEF(pj_status_t) pj_lock_create_recursive_mutex( pj_pool_t *pool,
+ const char *name,
+ pj_lock_t **lock )
+{
+ return create_mutex_lock(pool, name, PJ_MUTEX_RECURSE, lock);
+}
+
+
+/******************************************************************************
+ * Implementation of NULL lock object.
+ */
+static pj_status_t null_op(void *arg)
+{
+ PJ_UNUSED_ARG(arg);
+ return PJ_SUCCESS;
+}
+
+static pj_lock_t null_lock_template =
+{
+ NULL,
+ &null_op,
+ &null_op,
+ &null_op,
+ &null_op
+};
+
+PJ_DEF(pj_status_t) pj_lock_create_null_mutex( pj_pool_t *pool,
+ const char *name,
+ pj_lock_t **lock )
+{
+ PJ_UNUSED_ARG(name);
+ PJ_UNUSED_ARG(pool);
+
+ PJ_ASSERT_RETURN(lock, PJ_EINVAL);
+
+ *lock = &null_lock_template;
+ return PJ_SUCCESS;
+}
+
+
+/******************************************************************************
+ * Implementation of semaphore lock object.
+ */
+#if defined(PJ_HAS_SEMAPHORE) && PJ_HAS_SEMAPHORE != 0
+
+static pj_lock_t sem_lock_template =
+{
+ NULL,
+ (FPTR) &pj_sem_wait,
+ (FPTR) &pj_sem_trywait,
+ (FPTR) &pj_sem_post,
+ (FPTR) &pj_sem_destroy
+};
+
+PJ_DEF(pj_status_t) pj_lock_create_semaphore( pj_pool_t *pool,
+ const char *name,
+ unsigned initial,
+ unsigned max,
+ pj_lock_t **lock )
+{
+ pj_lock_t *p_lock;
+ pj_status_t rc;
+
+ PJ_ASSERT_RETURN(pool && lock, PJ_EINVAL);
+
+ p_lock = pj_pool_alloc(pool, sizeof(pj_lock_t));
+ if (!p_lock)
+ return PJ_ENOMEM;
+
+ pj_memcpy(p_lock, &sem_lock_template, sizeof(pj_lock_t));
+ rc = pj_sem_create( pool, name, initial, max,
+ (pj_sem_t**)&p_lock->lock_object);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ *lock = p_lock;
+
+ return PJ_SUCCESS;
+}
+
+
+#endif /* PJ_HAS_SEMAPHORE */
+
+
+PJ_DEF(pj_status_t) pj_lock_acquire( pj_lock_t *lock )
+{
+ PJ_ASSERT_RETURN(lock != NULL, PJ_EINVAL);
+ return (*lock->acquire)(lock->lock_object);
+}
+
+PJ_DEF(pj_status_t) pj_lock_tryacquire( pj_lock_t *lock )
+{
+ PJ_ASSERT_RETURN(lock != NULL, PJ_EINVAL);
+ return (*lock->tryacquire)(lock->lock_object);
+}
+
+PJ_DEF(pj_status_t) pj_lock_release( pj_lock_t *lock )
+{
+ PJ_ASSERT_RETURN(lock != NULL, PJ_EINVAL);
+ return (*lock->release)(lock->lock_object);
+}
+
+PJ_DEF(pj_status_t) pj_lock_destroy( pj_lock_t *lock )
+{
+ PJ_ASSERT_RETURN(lock != NULL, PJ_EINVAL);
+ return (*lock->destroy)(lock->lock_object);
+}
+
diff --git a/pjlib/src/pj/log.c b/pjlib/src/pj/log.c
new file mode 100644
index 00000000..7f79e55c
--- /dev/null
+++ b/pjlib/src/pj/log.c
@@ -0,0 +1,217 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/log.c 7 10/14/05 12:26a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/log.c $
+ *
+ * 7 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 6 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/types.h>
+#include <pj/log.h>
+#include <pj/string.h>
+#include <pj/os.h>
+#include <pj/compat/vsprintf.h>
+#include <pj/compat/stdarg.h>
+
+#if PJ_LOG_MAX_LEVEL >= 1
+
+static int log_max_level = PJ_LOG_MAX_LEVEL;
+static pj_log_func *log_writer = &pj_log_write;
+static unsigned log_decor = PJ_LOG_HAS_TIME | PJ_LOG_HAS_MICRO_SEC |
+ PJ_LOG_HAS_SENDER | PJ_LOG_HAS_NEWLINE;
+
+#if PJ_LOG_USE_STACK_BUFFER==0
+static char log_buffer[PJ_LOG_MAX_SIZE];
+#endif
+
+PJ_DEF(void) pj_log_set_decor(unsigned decor)
+{
+ log_decor = decor;
+}
+
+PJ_DEF(unsigned) pj_log_get_decor(void)
+{
+ return log_decor;
+}
+
+PJ_DEF(void) pj_log_set_level(int level)
+{
+ log_max_level = level;
+}
+
+PJ_DEF(int) pj_log_get_level(void)
+{
+ return log_max_level;
+}
+
+PJ_DEF(void) pj_log_set_log_func( pj_log_func *func )
+{
+ log_writer = func;
+}
+
+PJ_DEF(pj_log_func*) pj_log_get_log_func(void)
+{
+ return log_writer;
+}
+
+static void pj_log(const char *sender, int level,
+ const char *format, va_list marker)
+{
+ pj_time_val now;
+ pj_parsed_time ptime;
+ char *pre;
+#if PJ_LOG_USE_STACK_BUFFER
+ char log_buffer[PJ_LOG_MAX_SIZE];
+#endif
+ int len;
+
+ PJ_CHECK_STACK();
+
+ if (level > log_max_level)
+ return;
+
+ /* Get current date/time. */
+ pj_gettimeofday(&now);
+ pj_time_decode(&now, &ptime);
+
+ pre = log_buffer;
+ if (log_decor & PJ_LOG_HAS_DAY_NAME) {
+ static const char *wdays[] = { "Sun", "Mon", "Tue", "Wed",
+ "Thu", "Fri", "Sat"};
+ strcpy(pre, wdays[ptime.wday]);
+ pre += 3;
+ }
+ if (log_decor & PJ_LOG_HAS_YEAR) {
+ *pre++ = ' ';
+ pre += pj_utoa(ptime.year, pre);
+ }
+ if (log_decor & PJ_LOG_HAS_MONTH) {
+ *pre++ = '-';
+ pre += pj_utoa_pad(ptime.mon, pre, 2, '0');
+ }
+ if (log_decor & PJ_LOG_HAS_DAY_OF_MON) {
+ *pre++ = ' ';
+ pre += pj_utoa_pad(ptime.day, pre, 2, '0');
+ }
+ if (log_decor & PJ_LOG_HAS_TIME) {
+ *pre++ = ' ';
+ pre += pj_utoa_pad(ptime.hour, pre, 2, '0');
+ *pre++ = ':';
+ pre += pj_utoa_pad(ptime.min, pre, 2, '0');
+ *pre++ = ':';
+ pre += pj_utoa_pad(ptime.sec, pre, 2, '0');
+ }
+ if (log_decor & PJ_LOG_HAS_MICRO_SEC) {
+ *pre++ = '.';
+ pre += pj_utoa_pad(ptime.msec, pre, 3, '0');
+ }
+ if (log_decor & PJ_LOG_HAS_SENDER) {
+ enum { SENDER_WIDTH = 12 };
+ int sender_len = strlen(sender);
+ *pre++ = ' ';
+ if (sender_len <= SENDER_WIDTH) {
+ while (sender_len < SENDER_WIDTH)
+ *pre++ = ' ', ++sender_len;
+ while (*sender)
+ *pre++ = *sender++;
+ } else {
+ int i;
+ for (i=0; i<SENDER_WIDTH; ++i)
+ *pre++ = *sender++;
+ }
+ }
+
+ if (log_decor != 0 && log_decor != PJ_LOG_HAS_NEWLINE)
+ *pre++ = ' ';
+
+ len = pre - log_buffer;
+
+ /* Print the whole message to the string log_buffer. */
+ len = len + vsnprintf(pre, sizeof(log_buffer)-len, format, marker);
+ if (len > 0 && len < sizeof(log_buffer)-1) {
+ if (log_decor & PJ_LOG_HAS_NEWLINE) {
+ log_buffer[len++] = '\n';
+ }
+ log_buffer[len++] = '\0';
+ } else {
+ len = sizeof(log_buffer)-1;
+ if (log_decor & PJ_LOG_HAS_NEWLINE) {
+ log_buffer[sizeof(log_buffer)-2] = '\n';
+ }
+ log_buffer[sizeof(log_buffer)-1] = '\0';
+ }
+
+ if (log_writer)
+ (*log_writer)(level, log_buffer, len);
+}
+
+PJ_DEF(void) pj_log_0(const char *obj, const char *format, ...)
+{
+ va_list arg;
+ va_start(arg, format);
+ pj_log(obj, 0, format, arg);
+ va_end(arg);
+}
+
+PJ_DEF(void) pj_log_1(const char *obj, const char *format, ...)
+{
+ va_list arg;
+ va_start(arg, format);
+ pj_log(obj, 1, format, arg);
+ va_end(arg);
+}
+#endif /* PJ_LOG_MAX_LEVEL >= 1 */
+
+#if PJ_LOG_MAX_LEVEL >= 2
+PJ_DEF(void) pj_log_2(const char *obj, const char *format, ...)
+{
+ va_list arg;
+ va_start(arg, format);
+ pj_log(obj, 2, format, arg);
+ va_end(arg);
+}
+#endif
+
+#if PJ_LOG_MAX_LEVEL >= 3
+PJ_DEF(void) pj_log_3(const char *obj, const char *format, ...)
+{
+ va_list arg;
+ va_start(arg, format);
+ pj_log(obj, 3, format, arg);
+ va_end(arg);
+}
+#endif
+
+#if PJ_LOG_MAX_LEVEL >= 4
+PJ_DEF(void) pj_log_4(const char *obj, const char *format, ...)
+{
+ va_list arg;
+ va_start(arg, format);
+ pj_log(obj, 4, format, arg);
+ va_end(arg);
+}
+#endif
+
+#if PJ_LOG_MAX_LEVEL >= 5
+PJ_DEF(void) pj_log_5(const char *obj, const char *format, ...)
+{
+ va_list arg;
+ va_start(arg, format);
+ pj_log(obj, 5, format, arg);
+ va_end(arg);
+}
+#endif
+
+#if PJ_LOG_MAX_LEVEL >= 6
+PJ_DEF(void) pj_log_6(const char *obj, const char *format, ...)
+{
+ va_list arg;
+ va_start(arg, format);
+ pj_log(obj, 6, format, arg);
+ va_end(arg);
+}
+#endif
+
diff --git a/pjlib/src/pj/log_writer_printk.c b/pjlib/src/pj/log_writer_printk.c
new file mode 100644
index 00000000..b18a3027
--- /dev/null
+++ b/pjlib/src/pj/log_writer_printk.c
@@ -0,0 +1,20 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/log_writer_printk.c 2 10/14/05 12:26a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/log_writer_printk.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 9/22/05 10:37a Bennylp
+ * Created.
+ *
+ */
+#include <pj/log.h>
+#include <pj/os.h>
+
+PJ_DEF(void) pj_log_write(int level, const char *buffer, int len)
+{
+ PJ_CHECK_STACK();
+ printk(KERN_INFO "%s", buffer);
+}
+
diff --git a/pjlib/src/pj/log_writer_stdout.c b/pjlib/src/pj/log_writer_stdout.c
new file mode 100644
index 00000000..30a7a6f1
--- /dev/null
+++ b/pjlib/src/pj/log_writer_stdout.c
@@ -0,0 +1,66 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/log_writer_stdout.c 5 10/14/05 12:26a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/log_writer_stdout.c $
+ *
+ * 5 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 4 9/21/05 1:39p Bennylp
+ * Periodic checkin for backup.
+ *
+ * 3 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/log.h>
+#include <pj/os.h>
+#include <pj/compat/stdfileio.h>
+
+#define CLR_FATAL (PJ_TERM_COLOR_BRIGHT | PJ_TERM_COLOR_R)
+#define CLR_WARNING (PJ_TERM_COLOR_BRIGHT | PJ_TERM_COLOR_R | PJ_TERM_COLOR_G)
+#define CLR_INFO (PJ_TERM_COLOR_BRIGHT | PJ_TERM_COLOR_R | PJ_TERM_COLOR_G | \
+ PJ_TERM_COLOR_B)
+#define CLR_DEFAULT (PJ_TERM_COLOR_R | PJ_TERM_COLOR_G | PJ_TERM_COLOR_B)
+
+static void term_set_color(int level)
+{
+#if defined(PJ_TERM_HAS_COLOR) && PJ_TERM_HAS_COLOR != 0
+ unsigned attr = 0;
+ switch (level) {
+ case 0:
+ case 1: attr = CLR_FATAL;
+ break;
+ case 2: attr = CLR_WARNING;
+ break;
+ case 3: attr = CLR_INFO;
+ break;
+ default:
+ attr = CLR_DEFAULT;
+ break;
+ }
+
+ pj_term_set_color(attr);
+#endif
+}
+
+static void term_restore_color(void)
+{
+#if defined(PJ_TERM_HAS_COLOR) && PJ_TERM_HAS_COLOR != 0
+ pj_term_set_color(CLR_DEFAULT);
+#endif
+}
+
+
+PJ_DEF(void) pj_log_write(int level, const char *buffer, int len)
+{
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(len);
+
+ /* Copy to terminal/file. */
+ term_set_color(level);
+ fputs(buffer, stdout);
+ term_restore_color();
+
+ fflush(stdout);
+}
+
diff --git a/pjlib/src/pj/md5.c b/pjlib/src/pj/md5.c
new file mode 100644
index 00000000..3c42b4ed
--- /dev/null
+++ b/pjlib/src/pj/md5.c
@@ -0,0 +1,404 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/md5.c 5 10/14/05 12:26a Bennylp $ */
+/*
+ Copyright (C) 1999, 2000, 2002 Aladdin Enterprises. All rights reserved.
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ L. Peter Deutsch
+ ghost@aladdin.com
+
+ */
+/* $Id: md5.c,v 1.6 2002/04/13 19:20:28 lpd Exp $ */
+/*
+ Independent implementation of MD5 (RFC 1321).
+
+ This code implements the MD5 Algorithm defined in RFC 1321, whose
+ text is available at
+ http://www.ietf.org/rfc/rfc1321.txt
+ The code is derived from the text of the RFC, including the test suite
+ (section A.5) but excluding the rest of Appendix A. It does not include
+ any code or documentation that is identified in the RFC as being
+ copyrighted.
+
+ The original and principal author of md5.c is L. Peter Deutsch
+ <ghost@aladdin.com>. Other authors are noted in the change history
+ that follows (in reverse chronological order):
+
+ 2002-04-13 lpd Clarified derivation from RFC 1321; now handles byte order
+ either statically or dynamically; added missing #include <string.h>
+ in library.
+ 2002-03-11 lpd Corrected argument list for main(), and added int return
+ type, in test program and T value program.
+ 2002-02-21 lpd Added missing #include <stdio.h> in test program.
+ 2000-07-03 lpd Patched to eliminate warnings about "constant is
+ unsigned in ANSI C, signed in traditional"; made test program
+ self-checking.
+ 1999-11-04 lpd Edited comments slightly for automatic TOC extraction.
+ 1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5).
+ 1999-05-03 lpd Original version.
+ */
+
+#include <pj/md5.h>
+#include <pj/string.h>
+#include <pj/os.h>
+
+#undef BYTE_ORDER /* 1 = big-endian, -1 = little-endian, 0 = unknown */
+
+/*
+#ifdef ARCH_IS_BIG_ENDIAN
+# define BYTE_ORDER (ARCH_IS_BIG_ENDIAN ? 1 : -1)
+#else
+# define BYTE_ORDER 0
+#endif
+*/
+/* pjlib: */
+#include <pj/config.h>
+#if PJ_IS_LITTLE_ENDIAN
+# define BYTE_ORDER -1
+#elif PJ_IS_BIG_ENDIAN
+# define BYTE_ORDER 1
+#else
+# error Endianess is not known!
+#endif
+
+
+#define T_MASK ((md5_word_t)~0)
+#define T1 /* 0xd76aa478 */ (T_MASK ^ 0x28955b87)
+#define T2 /* 0xe8c7b756 */ (T_MASK ^ 0x173848a9)
+#define T3 0x242070db
+#define T4 /* 0xc1bdceee */ (T_MASK ^ 0x3e423111)
+#define T5 /* 0xf57c0faf */ (T_MASK ^ 0x0a83f050)
+#define T6 0x4787c62a
+#define T7 /* 0xa8304613 */ (T_MASK ^ 0x57cfb9ec)
+#define T8 /* 0xfd469501 */ (T_MASK ^ 0x02b96afe)
+#define T9 0x698098d8
+#define T10 /* 0x8b44f7af */ (T_MASK ^ 0x74bb0850)
+#define T11 /* 0xffff5bb1 */ (T_MASK ^ 0x0000a44e)
+#define T12 /* 0x895cd7be */ (T_MASK ^ 0x76a32841)
+#define T13 0x6b901122
+#define T14 /* 0xfd987193 */ (T_MASK ^ 0x02678e6c)
+#define T15 /* 0xa679438e */ (T_MASK ^ 0x5986bc71)
+#define T16 0x49b40821
+#define T17 /* 0xf61e2562 */ (T_MASK ^ 0x09e1da9d)
+#define T18 /* 0xc040b340 */ (T_MASK ^ 0x3fbf4cbf)
+#define T19 0x265e5a51
+#define T20 /* 0xe9b6c7aa */ (T_MASK ^ 0x16493855)
+#define T21 /* 0xd62f105d */ (T_MASK ^ 0x29d0efa2)
+#define T22 0x02441453
+#define T23 /* 0xd8a1e681 */ (T_MASK ^ 0x275e197e)
+#define T24 /* 0xe7d3fbc8 */ (T_MASK ^ 0x182c0437)
+#define T25 0x21e1cde6
+#define T26 /* 0xc33707d6 */ (T_MASK ^ 0x3cc8f829)
+#define T27 /* 0xf4d50d87 */ (T_MASK ^ 0x0b2af278)
+#define T28 0x455a14ed
+#define T29 /* 0xa9e3e905 */ (T_MASK ^ 0x561c16fa)
+#define T30 /* 0xfcefa3f8 */ (T_MASK ^ 0x03105c07)
+#define T31 0x676f02d9
+#define T32 /* 0x8d2a4c8a */ (T_MASK ^ 0x72d5b375)
+#define T33 /* 0xfffa3942 */ (T_MASK ^ 0x0005c6bd)
+#define T34 /* 0x8771f681 */ (T_MASK ^ 0x788e097e)
+#define T35 0x6d9d6122
+#define T36 /* 0xfde5380c */ (T_MASK ^ 0x021ac7f3)
+#define T37 /* 0xa4beea44 */ (T_MASK ^ 0x5b4115bb)
+#define T38 0x4bdecfa9
+#define T39 /* 0xf6bb4b60 */ (T_MASK ^ 0x0944b49f)
+#define T40 /* 0xbebfbc70 */ (T_MASK ^ 0x4140438f)
+#define T41 0x289b7ec6
+#define T42 /* 0xeaa127fa */ (T_MASK ^ 0x155ed805)
+#define T43 /* 0xd4ef3085 */ (T_MASK ^ 0x2b10cf7a)
+#define T44 0x04881d05
+#define T45 /* 0xd9d4d039 */ (T_MASK ^ 0x262b2fc6)
+#define T46 /* 0xe6db99e5 */ (T_MASK ^ 0x1924661a)
+#define T47 0x1fa27cf8
+#define T48 /* 0xc4ac5665 */ (T_MASK ^ 0x3b53a99a)
+#define T49 /* 0xf4292244 */ (T_MASK ^ 0x0bd6ddbb)
+#define T50 0x432aff97
+#define T51 /* 0xab9423a7 */ (T_MASK ^ 0x546bdc58)
+#define T52 /* 0xfc93a039 */ (T_MASK ^ 0x036c5fc6)
+#define T53 0x655b59c3
+#define T54 /* 0x8f0ccc92 */ (T_MASK ^ 0x70f3336d)
+#define T55 /* 0xffeff47d */ (T_MASK ^ 0x00100b82)
+#define T56 /* 0x85845dd1 */ (T_MASK ^ 0x7a7ba22e)
+#define T57 0x6fa87e4f
+#define T58 /* 0xfe2ce6e0 */ (T_MASK ^ 0x01d3191f)
+#define T59 /* 0xa3014314 */ (T_MASK ^ 0x5cfebceb)
+#define T60 0x4e0811a1
+#define T61 /* 0xf7537e82 */ (T_MASK ^ 0x08ac817d)
+#define T62 /* 0xbd3af235 */ (T_MASK ^ 0x42c50dca)
+#define T63 0x2ad7d2bb
+#define T64 /* 0xeb86d391 */ (T_MASK ^ 0x14792c6e)
+
+
+static void
+md5_process(md5_state_t *pms, const md5_byte_t *data /*[64]*/)
+{
+ md5_word_t
+ a = pms->abcd[0], b = pms->abcd[1],
+ c = pms->abcd[2], d = pms->abcd[3];
+ md5_word_t t;
+#if BYTE_ORDER > 0
+ /* Define storage only for big-endian CPUs. */
+ md5_word_t X[16];
+#else
+ /* Define storage for little-endian or both types of CPUs. */
+ md5_word_t xbuf[16];
+ const md5_word_t *X;
+#endif
+
+ PJ_CHECK_STACK();
+
+ {
+#if BYTE_ORDER == 0
+ /*
+ * Determine dynamically whether this is a big-endian or
+ * little-endian machine, since we can use a more efficient
+ * algorithm on the latter.
+ */
+ static const int w = 1;
+
+ if (*((const md5_byte_t *)&w)) /* dynamic little-endian */
+#endif
+#if BYTE_ORDER <= 0 /* little-endian */
+ {
+ /*
+ * On little-endian machines, we can process properly aligned
+ * data without copying it.
+ */
+ if (!((data - (const md5_byte_t *)0) & 3)) {
+ /* data are properly aligned */
+ X = (const md5_word_t *)data;
+ } else {
+ /* not aligned */
+ memcpy(xbuf, data, 64);
+ X = xbuf;
+ }
+ }
+#endif
+#if BYTE_ORDER == 0
+ else /* dynamic big-endian */
+#endif
+#if BYTE_ORDER >= 0 /* big-endian */
+ {
+ /*
+ * On big-endian machines, we must arrange the bytes in the
+ * right order.
+ */
+ const md5_byte_t *xp = data;
+ int i;
+
+# if BYTE_ORDER == 0
+ X = xbuf; /* (dynamic only) */
+# else
+# define xbuf X /* (static only) */
+# endif
+ for (i = 0; i < 16; ++i, xp += 4)
+ xbuf[i] = xp[0] + (xp[1] << 8) + (xp[2] << 16) + (xp[3] << 24);
+ }
+#endif
+ }
+
+#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32 - (n))))
+
+ /* Round 1. */
+ /* Let [abcd k s i] denote the operation
+ a = b + ((a + F(b,c,d) + X[k] + T[i]) <<< s). */
+#define F(x, y, z) (((x) & (y)) | (~(x) & (z)))
+#define SET(a, b, c, d, k, s, Ti)\
+ t = a + F(b,c,d) + X[k] + Ti;\
+ a = ROTATE_LEFT(t, s) + b
+ /* Do the following 16 operations. */
+ SET(a, b, c, d, 0, 7, T1);
+ SET(d, a, b, c, 1, 12, T2);
+ SET(c, d, a, b, 2, 17, T3);
+ SET(b, c, d, a, 3, 22, T4);
+ SET(a, b, c, d, 4, 7, T5);
+ SET(d, a, b, c, 5, 12, T6);
+ SET(c, d, a, b, 6, 17, T7);
+ SET(b, c, d, a, 7, 22, T8);
+ SET(a, b, c, d, 8, 7, T9);
+ SET(d, a, b, c, 9, 12, T10);
+ SET(c, d, a, b, 10, 17, T11);
+ SET(b, c, d, a, 11, 22, T12);
+ SET(a, b, c, d, 12, 7, T13);
+ SET(d, a, b, c, 13, 12, T14);
+ SET(c, d, a, b, 14, 17, T15);
+ SET(b, c, d, a, 15, 22, T16);
+#undef SET
+
+ /* Round 2. */
+ /* Let [abcd k s i] denote the operation
+ a = b + ((a + G(b,c,d) + X[k] + T[i]) <<< s). */
+#define G(x, y, z) (((x) & (z)) | ((y) & ~(z)))
+#define SET(a, b, c, d, k, s, Ti)\
+ t = a + G(b,c,d) + X[k] + Ti;\
+ a = ROTATE_LEFT(t, s) + b
+ /* Do the following 16 operations. */
+ SET(a, b, c, d, 1, 5, T17);
+ SET(d, a, b, c, 6, 9, T18);
+ SET(c, d, a, b, 11, 14, T19);
+ SET(b, c, d, a, 0, 20, T20);
+ SET(a, b, c, d, 5, 5, T21);
+ SET(d, a, b, c, 10, 9, T22);
+ SET(c, d, a, b, 15, 14, T23);
+ SET(b, c, d, a, 4, 20, T24);
+ SET(a, b, c, d, 9, 5, T25);
+ SET(d, a, b, c, 14, 9, T26);
+ SET(c, d, a, b, 3, 14, T27);
+ SET(b, c, d, a, 8, 20, T28);
+ SET(a, b, c, d, 13, 5, T29);
+ SET(d, a, b, c, 2, 9, T30);
+ SET(c, d, a, b, 7, 14, T31);
+ SET(b, c, d, a, 12, 20, T32);
+#undef SET
+
+ /* Round 3. */
+ /* Let [abcd k s t] denote the operation
+ a = b + ((a + H(b,c,d) + X[k] + T[i]) <<< s). */
+#define H(x, y, z) ((x) ^ (y) ^ (z))
+#define SET(a, b, c, d, k, s, Ti)\
+ t = a + H(b,c,d) + X[k] + Ti;\
+ a = ROTATE_LEFT(t, s) + b
+ /* Do the following 16 operations. */
+ SET(a, b, c, d, 5, 4, T33);
+ SET(d, a, b, c, 8, 11, T34);
+ SET(c, d, a, b, 11, 16, T35);
+ SET(b, c, d, a, 14, 23, T36);
+ SET(a, b, c, d, 1, 4, T37);
+ SET(d, a, b, c, 4, 11, T38);
+ SET(c, d, a, b, 7, 16, T39);
+ SET(b, c, d, a, 10, 23, T40);
+ SET(a, b, c, d, 13, 4, T41);
+ SET(d, a, b, c, 0, 11, T42);
+ SET(c, d, a, b, 3, 16, T43);
+ SET(b, c, d, a, 6, 23, T44);
+ SET(a, b, c, d, 9, 4, T45);
+ SET(d, a, b, c, 12, 11, T46);
+ SET(c, d, a, b, 15, 16, T47);
+ SET(b, c, d, a, 2, 23, T48);
+#undef SET
+
+ /* Round 4. */
+ /* Let [abcd k s t] denote the operation
+ a = b + ((a + I(b,c,d) + X[k] + T[i]) <<< s). */
+#define I(x, y, z) ((y) ^ ((x) | ~(z)))
+#define SET(a, b, c, d, k, s, Ti)\
+ t = a + I(b,c,d) + X[k] + Ti;\
+ a = ROTATE_LEFT(t, s) + b
+ /* Do the following 16 operations. */
+ SET(a, b, c, d, 0, 6, T49);
+ SET(d, a, b, c, 7, 10, T50);
+ SET(c, d, a, b, 14, 15, T51);
+ SET(b, c, d, a, 5, 21, T52);
+ SET(a, b, c, d, 12, 6, T53);
+ SET(d, a, b, c, 3, 10, T54);
+ SET(c, d, a, b, 10, 15, T55);
+ SET(b, c, d, a, 1, 21, T56);
+ SET(a, b, c, d, 8, 6, T57);
+ SET(d, a, b, c, 15, 10, T58);
+ SET(c, d, a, b, 6, 15, T59);
+ SET(b, c, d, a, 13, 21, T60);
+ SET(a, b, c, d, 4, 6, T61);
+ SET(d, a, b, c, 11, 10, T62);
+ SET(c, d, a, b, 2, 15, T63);
+ SET(b, c, d, a, 9, 21, T64);
+#undef SET
+
+ /* Then perform the following additions. (That is increment each
+ of the four registers by the value it had before this block
+ was started.) */
+ pms->abcd[0] += a;
+ pms->abcd[1] += b;
+ pms->abcd[2] += c;
+ pms->abcd[3] += d;
+}
+
+void
+md5_init(md5_state_t *pms)
+{
+ PJ_CHECK_STACK();
+
+ pms->count[0] = pms->count[1] = 0;
+ pms->abcd[0] = 0x67452301;
+ pms->abcd[1] = /*0xefcdab89*/ T_MASK ^ 0x10325476;
+ pms->abcd[2] = /*0x98badcfe*/ T_MASK ^ 0x67452301;
+ pms->abcd[3] = 0x10325476;
+}
+
+void
+md5_append(md5_state_t *pms, const md5_byte_t *data, int nbytes)
+{
+ const md5_byte_t *p = data;
+ int left = nbytes;
+ int offset = (pms->count[0] >> 3) & 63;
+ md5_word_t nbits = (md5_word_t)(nbytes << 3);
+
+ PJ_CHECK_STACK();
+
+ if (nbytes <= 0)
+ return;
+
+ /* Update the message length. */
+ pms->count[1] += nbytes >> 29;
+ pms->count[0] += nbits;
+ if (pms->count[0] < nbits)
+ pms->count[1]++;
+
+ /* Process an initial partial block. */
+ if (offset) {
+ int copy = (offset + nbytes > 64 ? 64 - offset : nbytes);
+
+ memcpy(pms->buf + offset, p, copy);
+ if (offset + copy < 64)
+ return;
+ p += copy;
+ left -= copy;
+ md5_process(pms, pms->buf);
+ }
+
+ /* Process full blocks. */
+ for (; left >= 64; p += 64, left -= 64)
+ md5_process(pms, p);
+
+ /* Process a final partial block. */
+ if (left)
+ memcpy(pms->buf, p, left);
+}
+
+void
+md5_finish(md5_state_t *pms, md5_byte_t digest[16])
+{
+ static const md5_byte_t pad[64] = {
+ 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ };
+ md5_byte_t data[8];
+ int i;
+
+ PJ_CHECK_STACK();
+
+ /* Save the length before padding. */
+ for (i = 0; i < 8; ++i)
+ data[i] = (md5_byte_t)(pms->count[i >> 2] >> ((i & 3) << 3));
+ /* Pad to 56 bytes mod 64. */
+ md5_append(pms, pad, ((55 - (pms->count[0] >> 3)) & 63) + 1);
+ /* Append the length. */
+ md5_append(pms, data, 8);
+ for (i = 0; i < 16; ++i)
+ digest[i] = (md5_byte_t)(pms->abcd[i >> 2] >> ((i & 3) << 3));
+}
diff --git a/pjlib/src/pj/os_core_linux_kernel.c b/pjlib/src/pj/os_core_linux_kernel.c
new file mode 100644
index 00000000..82edccb4
--- /dev/null
+++ b/pjlib/src/pj/os_core_linux_kernel.c
@@ -0,0 +1,685 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/os_core_linux_kernel.c 3 10/29/05 11:51a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/os_core_linux_kernel.c $
+ *
+ * 3 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 9/22/05 10:38a Bennylp
+ * Creaetd.
+ *
+ */
+#include <pj/os.h>
+#include <pj/assert.h>
+#include <pj/pool.h>
+#include <pj/log.h>
+#include <pj/except.h>
+#include <pj/errno.h>
+#include <pj/string.h>
+#include <pj/compat/high_precision.h>
+#include <pj/compat/sprintf.h>
+
+#include <linux/config.h>
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/kernel.h>
+#include <linux/sched.h>
+//#include <linux/tqueue.h>
+#include <linux/wait.h>
+#include <linux/signal.h>
+
+#include <asm/atomic.h>
+#include <asm/unistd.h>
+#include <asm/semaphore.h>
+
+#define THIS_FILE "oslinuxkern"
+
+struct pj_thread_t
+{
+ /** Thread's name. */
+ char obj_name[PJ_MAX_OBJ_NAME];
+
+ /** Linux task structure for thread. */
+ struct task_struct *thread;
+
+ /** Flags (specified in pj_thread_create) */
+ unsigned flags;
+
+ /** Task queue needed to launch thread. */
+ //struct tq_struct tq;
+
+ /** Semaphore needed to control thread startup. */
+ struct semaphore startstop_sem;
+
+ /** Semaphore to suspend thread during startup. */
+ struct semaphore suspend_sem;
+
+ /** Queue thread is waiting on. Gets initialized by
+ thread_initialize, can be used by thread itself.
+ */
+ wait_queue_head_t queue;
+
+ /** Flag to tell thread whether to die or not.
+ When the thread receives a signal, it must check
+ the value of terminate and call thread_deinitialize and terminate
+ if set.
+ */
+ int terminate;
+
+ /** Thread's entry. */
+ pj_thread_proc *func;
+
+ /** Argument. */
+ void *arg;
+};
+
+struct pj_atomic_t
+{
+ atomic_t atom;
+};
+
+struct pj_mutex_t
+{
+ struct semaphore sem;
+ pj_bool_t recursive;
+ pj_thread_t *owner;
+ int own_count;
+};
+
+struct pj_sem_t
+{
+ struct semaphore sem;
+};
+
+/*
+ * Static global variables.
+ */
+#define MAX_TLS_ID 32
+static void *tls_values[MAX_TLS_ID];
+static int tls_id;
+static long thread_tls_id;
+static spinlock_t critical_section = SPIN_LOCK_UNLOCKED;
+static unsigned long spinlock_flags;
+static pj_thread_t main_thread;
+
+/* private functions */
+//#define TRACE_(expr) PJ_LOG(3,expr)
+#define TRACE_(x)
+
+
+/* This must be called in the context of the new thread. */
+static void thread_initialize( pj_thread_t *thread )
+{
+ TRACE_((THIS_FILE, "---new thread initializing..."));
+
+ /* Set TLS */
+ pj_thread_local_set(thread_tls_id, thread);
+
+ /* fill in thread structure */
+ thread->thread = current;
+ pj_assert(thread->thread != NULL);
+
+ /* set signal mask to what we want to respond */
+ siginitsetinv(&current->blocked,
+ sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM));
+
+ /* initialise wait queue */
+ init_waitqueue_head(&thread->queue);
+
+ /* initialise termination flag */
+ thread->terminate = 0;
+
+ /* set name of this process (max 15 chars + 0 !) */
+ thread->obj_name[15] = '\0';
+ sprintf(current->comm, thread->obj_name);
+
+ /* tell the creator that we are ready and let him continue */
+ up(&thread->startstop_sem);
+}
+
+/* cleanup of thread. Called by the exiting thread. */
+static void thread_deinitialize(pj_thread_t *thread)
+{
+ /* we are terminating */
+
+ /* lock the kernel, the exit will unlock it */
+ thread->thread = NULL;
+ mb();
+
+ /* notify the stop_kthread() routine that we are terminating. */
+ up(&thread->startstop_sem);
+
+ /* the kernel_thread that called clone() does a do_exit here. */
+
+ /* there is no race here between execution of the "killer" and
+ real termination of the thread (race window between up and do_exit),
+ since both the thread and the "killer" function are running with
+ the kernel lock held.
+ The kernel lock will be freed after the thread exited, so the code
+ is really not executed anymore as soon as the unload functions gets
+ the kernel lock back.
+ The init process may not have made the cleanup of the process here,
+ but the cleanup can be done safely with the module unloaded.
+ */
+
+}
+
+static int thread_proc(void *arg)
+{
+ pj_thread_t *thread = arg;
+
+ TRACE_((THIS_FILE, "---new thread starting!"));
+
+ /* Initialize thread. */
+ thread_initialize( thread );
+
+ /* Wait if created suspended. */
+ if (thread->flags & PJ_THREAD_SUSPENDED) {
+ TRACE_((THIS_FILE, "---new thread suspended..."));
+ down(&thread->suspend_sem);
+ }
+
+ TRACE_((THIS_FILE, "---new thread running..."));
+
+ pj_assert(thread->func != NULL);
+
+ /* Call thread's entry. */
+ (*thread->func)(thread->arg);
+
+ TRACE_((THIS_FILE, "---thread exiting..."));
+
+ /* Cleanup thread. */
+ thread_deinitialize(thread);
+
+ return 0;
+}
+
+/* The very task entry. */
+static void kthread_launcher(void *arg)
+{
+ TRACE_((THIS_FILE, "...launching thread!..."));
+ kernel_thread(&thread_proc, arg, 0);
+}
+
+PJ_DEF(pj_status_t) pj_init(void)
+{
+ pj_status_t rc;
+
+ PJ_LOG(5, ("pj_init", "Initializing PJ Library.."));
+
+ rc = pj_thread_init();
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ /* Initialize exception ID for the pool.
+ * Must do so after critical section is configured.
+ */
+ rc = pj_exception_id_alloc("PJLIB/No memory", &PJ_NO_MEMORY_EXCEPTION);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_uint32_t) pj_getpid(void)
+{
+ return 1;
+}
+
+PJ_DEF(pj_status_t) pj_thread_register ( const char *cstr_thread_name,
+ pj_thread_desc desc,
+ pj_thread_t **ptr_thread)
+{
+ char stack_ptr;
+ pj_thread_t *thread = (pj_thread_t *)desc;
+ pj_str_t thread_name = pj_str((char*)cstr_thread_name);
+
+ /* Size sanity check. */
+ if (sizeof(pj_thread_desc) < sizeof(pj_thread_t)) {
+ pj_assert(!"Not enough pj_thread_desc size!");
+ return PJ_EBUG;
+ }
+
+ /* If a thread descriptor has been registered before, just return it. */
+ if (pj_thread_local_get (thread_tls_id) != 0) {
+ *ptr_thread = (pj_thread_t*)pj_thread_local_get (thread_tls_id);
+ return PJ_SUCCESS;
+ }
+
+ /* Initialize and set the thread entry. */
+ pj_memset(desc, 0, sizeof(pj_thread_desc));
+
+ if(cstr_thread_name && pj_strlen(&thread_name) < sizeof(thread->obj_name)-1)
+ pj_sprintf(thread->obj_name, cstr_thread_name, thread->thread);
+ else
+ pj_sprintf(thread->obj_name, "thr%p", (void*)thread->thread);
+
+ /* Initialize. */
+ thread_initialize(thread);
+
+ /* Eat semaphore. */
+ down(&thread->startstop_sem);
+
+#if defined(PJ_OS_HAS_CHECK_STACK) && PJ_OS_HAS_CHECK_STACK!=0
+ thread->stk_start = &stack_ptr;
+ thread->stk_size = 0xFFFFFFFFUL;
+ thread->stk_max_usage = 0;
+#else
+ stack_ptr = '\0';
+#endif
+
+ *ptr_thread = thread;
+ return PJ_SUCCESS;
+}
+
+
+pj_status_t pj_thread_init(void)
+{
+ pj_status_t rc;
+ pj_thread_t *dummy;
+
+ rc = pj_thread_local_alloc(&thread_tls_id);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ return pj_thread_register("pjlib-main", (pj_uint8_t*)&main_thread, &dummy);
+}
+
+PJ_DEF(pj_status_t) pj_thread_create( pj_pool_t *pool, const char *thread_name,
+ pj_thread_proc *proc, void *arg,
+ pj_size_t stack_size, unsigned flags,
+ pj_thread_t **ptr_thread)
+{
+ pj_thread_t *thread;
+
+ TRACE_((THIS_FILE, "pj_thread_create()"));
+
+ PJ_ASSERT_RETURN(pool && proc && ptr_thread, PJ_EINVAL);
+
+ thread = pj_pool_zalloc(pool, sizeof(pj_thread_t));
+ if (!thread)
+ return PJ_ENOMEM;
+
+ PJ_UNUSED_ARG(stack_size);
+
+ /* Thread name. */
+ if (!thread_name)
+ thread_name = "thr%p";
+
+ if (strchr(thread_name, '%')) {
+ pj_snprintf(thread->obj_name, PJ_MAX_OBJ_NAME, thread_name, thread);
+ } else {
+ strncpy(thread->obj_name, thread_name, PJ_MAX_OBJ_NAME);
+ thread->obj_name[PJ_MAX_OBJ_NAME-1] = '\0';
+ }
+
+ /* Init thread's semaphore. */
+ TRACE_((THIS_FILE, "...init semaphores..."));
+ init_MUTEX_LOCKED(&thread->startstop_sem);
+ init_MUTEX_LOCKED(&thread->suspend_sem);
+
+ thread->flags = flags;
+
+ if ((flags & PJ_THREAD_SUSPENDED) == 0) {
+ up(&thread->suspend_sem);
+ }
+
+ /* Store the functions and argument. */
+ thread->func = proc;
+ thread->arg = arg;
+
+ /* Save return value. */
+ *ptr_thread = thread;
+
+ /* Create the new thread by running a task through keventd. */
+
+#if 0
+ /* Initialize the task queue struct. */
+ thread->tq.sync = 0;
+ INIT_LIST_HEAD(&thread->tq.list);
+ thread->tq.routine = kthread_launcher;
+ thread->tq.data = thread;
+
+ /* and schedule it for execution. */
+ schedule_task(&thread->tq);
+#endif
+ kthread_launcher(thread);
+
+ /* Wait until thread has reached the setup_thread routine. */
+ TRACE_((THIS_FILE, "...wait for the new thread..."));
+ down(&thread->startstop_sem);
+
+ TRACE_((THIS_FILE, "...main thread resumed..."));
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(const char*) pj_thread_get_name(pj_thread_t *thread)
+{
+ return thread->obj_name;
+}
+
+PJ_DEF(pj_status_t) pj_thread_resume(pj_thread_t *thread)
+{
+ up(&thread->suspend_sem);
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_thread_t*) pj_thread_this(void)
+{
+ return (pj_thread_t*)pj_thread_local_get(thread_tls_id);
+}
+
+PJ_DEF(pj_status_t) pj_thread_join(pj_thread_t *p)
+{
+ TRACE_((THIS_FILE, "pj_thread_join()"));
+ down(&p->startstop_sem);
+ TRACE_((THIS_FILE, " joined!"));
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_thread_destroy(pj_thread_t *thread)
+{
+ PJ_ASSERT_RETURN(thread != NULL, PJ_EINVALIDOP);
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_thread_sleep(unsigned msec)
+{
+ pj_highprec_t ticks;
+ pj_thread_t *thread = pj_thread_this();
+
+ PJ_ASSERT_RETURN(thread != NULL, PJ_EBUG);
+
+ /* Use high precision calculation to make sure we don't
+ * crop values:
+ *
+ * ticks = HZ * msec / 1000
+ */
+ ticks = HZ;
+ pj_highprec_mul(ticks, msec);
+ pj_highprec_div(ticks, 1000);
+
+ TRACE_((THIS_FILE, "this thread will sleep for %u ticks", ticks));
+ interruptible_sleep_on_timeout( &thread->queue, ticks);
+ return PJ_SUCCESS;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+PJ_DEF(pj_status_t) pj_atomic_create( pj_pool_t *pool,
+ pj_atomic_value_t value,
+ pj_atomic_t **ptr_var)
+{
+ pj_atomic_t *t = pj_pool_calloc(pool, 1, sizeof(pj_atomic_t));
+ if (!t) return PJ_ENOMEM;
+
+ atomic_set(&t->atom, value);
+ *ptr_var = t;
+
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_atomic_destroy( pj_atomic_t *var )
+{
+ return 0;
+}
+
+PJ_DEF(pj_atomic_value_t) pj_atomic_set(pj_atomic_t *var,
+ pj_atomic_value_t value)
+{
+ pj_atomic_value_t oldval = atomic_read(&var->atom);
+ atomic_set(&var->atom, value);
+ return oldval;
+}
+
+PJ_DEF(pj_atomic_value_t) pj_atomic_get(pj_atomic_t *var)
+{
+ return atomic_read(&var->atom);
+}
+
+PJ_DEF(pj_atomic_value_t) pj_atomic_inc(pj_atomic_t *var)
+{
+ atomic_inc(&var->atom);
+ return atomic_read(&var->atom);
+}
+
+PJ_DEF(pj_atomic_value_t) pj_atomic_dec(pj_atomic_t *var)
+{
+ atomic_dec(&var->atom);
+ return atomic_read(&var->atom);
+}
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+PJ_DEF(pj_status_t) pj_thread_local_alloc(long *index)
+{
+ if (tls_id >= MAX_TLS_ID)
+ return PJ_ETOOMANY;
+
+ *index = tls_id++;
+
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(void) pj_thread_local_free(long index)
+{
+ pj_assert(index >= 0 && index < MAX_TLS_ID);
+}
+
+PJ_DEF(void) pj_thread_local_set(long index, void *value)
+{
+ pj_assert(index >= 0 && index < MAX_TLS_ID);
+ tls_values[index] = value;
+}
+
+PJ_DEF(void*) pj_thread_local_get(long index)
+{
+ pj_assert(index >= 0 && index < MAX_TLS_ID);
+ return tls_values[index];
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+PJ_DEF(void) pj_enter_critical_section(void)
+{
+ spin_lock_irqsave(&critical_section, spinlock_flags);
+}
+
+PJ_DEF(void) pj_leave_critical_section(void)
+{
+ spin_unlock_irqrestore(&critical_section, spinlock_flags);
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+PJ_DEF(pj_status_t) pj_mutex_create( pj_pool_t *pool,
+ const char *name,
+ int type,
+ pj_mutex_t **ptr_mutex)
+{
+ pj_mutex_t *mutex;
+
+ PJ_UNUSED_ARG(name);
+
+ mutex = pj_pool_alloc(pool, sizeof(pj_mutex_t));
+ if (!mutex)
+ return PJ_ENOMEM;
+
+ init_MUTEX(&mutex->sem);
+
+ mutex->recursive = (type == PJ_MUTEX_RECURSE);
+ mutex->owner = NULL;
+ mutex->own_count = 0;
+
+ /* Done. */
+ *ptr_mutex = mutex;
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_mutex_create_simple( pj_pool_t *pool, const char *name,
+ pj_mutex_t **mutex )
+{
+ return pj_mutex_create(pool, name, PJ_MUTEX_SIMPLE, mutex);
+}
+
+PJ_DEF(pj_status_t) pj_mutex_create_recursive( pj_pool_t *pool,
+ const char *name,
+ pj_mutex_t **mutex )
+{
+ return pj_mutex_create( pool, name, PJ_MUTEX_RECURSE, mutex);
+}
+
+PJ_DEF(pj_status_t) pj_mutex_lock(pj_mutex_t *mutex)
+{
+ PJ_ASSERT_RETURN(mutex, PJ_EINVAL);
+
+ if (mutex->recursive) {
+ pj_thread_t *this_thread = pj_thread_this();
+ if (mutex->owner == this_thread) {
+ ++mutex->own_count;
+ } else {
+ down(&mutex->sem);
+ pj_assert(mutex->own_count == 0);
+ mutex->owner = this_thread;
+ mutex->own_count = 1;
+ }
+ } else {
+ down(&mutex->sem);
+ }
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_mutex_trylock(pj_mutex_t *mutex)
+{
+ long rc;
+
+ PJ_ASSERT_RETURN(mutex, PJ_EINVAL);
+
+ if (mutex->recursive) {
+ pj_thread_t *this_thread = pj_thread_this();
+ if (mutex->owner == this_thread) {
+ ++mutex->own_count;
+ } else {
+ rc = down_interruptible(&mutex->sem);
+ if (rc != 0)
+ return PJ_RETURN_OS_ERROR(-rc);
+ pj_assert(mutex->own_count == 0);
+ mutex->owner = this_thread;
+ mutex->own_count = 1;
+ }
+ } else {
+ int rc = down_trylock(&mutex->sem);
+ if (rc != 0)
+ return PJ_RETURN_OS_ERROR(-rc);
+ }
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_mutex_unlock(pj_mutex_t *mutex)
+{
+ PJ_ASSERT_RETURN(mutex, PJ_EINVAL);
+
+ if (mutex->recursive) {
+ pj_thread_t *this_thread = pj_thread_this();
+ if (mutex->owner == this_thread) {
+ pj_assert(mutex->own_count > 0);
+ --mutex->own_count;
+ if (mutex->own_count == 0) {
+ mutex->owner = NULL;
+ up(&mutex->sem);
+ }
+ } else {
+ pj_assert(!"Not owner!");
+ return PJ_EINVALIDOP;
+ }
+ } else {
+ up(&mutex->sem);
+ }
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_mutex_destroy(pj_mutex_t *mutex)
+{
+ PJ_ASSERT_RETURN(mutex != NULL, PJ_EINVAL);
+
+ return PJ_SUCCESS;
+}
+
+#if defined(PJ_DEBUG) && PJ_DEBUG != 0
+PJ_DEF(pj_bool_t) pj_mutex_is_locked(pj_mutex_t *mutex)
+{
+ if (mutex->recursive)
+ return mutex->owner == pj_thread_this();
+ else
+ return 1;
+}
+#endif /* PJ_DEBUG */
+
+
+#if defined(PJ_HAS_SEMAPHORE) && PJ_HAS_SEMAPHORE != 0
+
+PJ_DEF(pj_status_t) pj_sem_create( pj_pool_t *pool,
+ const char *name,
+ unsigned initial,
+ unsigned max,
+ pj_sem_t **sem)
+{
+ pj_sem_t *sem;
+
+ PJ_UNUSED_ARG(max);
+
+ PJ_ASSERT_RETURN(pool && sem, PJ_EINVAL);
+
+ sem = pj_pool_alloc(pool, sizeof(pj_sem_t));
+ sema_init(&sem->sem, initial);
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_sem_wait(pj_sem_t *sem)
+{
+ PJ_ASSERT_RETURN(pool && sem, PJ_EINVAL);
+
+ down(&sem->sem);
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_sem_trywait(pj_sem_t *sem)
+{
+ int rc;
+
+ PJ_ASSERT_RETURN(pool && sem, PJ_EINVAL);
+
+ rc = down_trylock(&sem->sem);
+ if (rc != 0) {
+ return PJ_RETURN_OS_ERROR(-rc);
+ } else {
+ return PJ_SUCCESS;
+ }
+}
+
+PJ_DEF(pj_status_t) pj_sem_post(pj_sem_t *sem)
+{
+ PJ_ASSERT_RETURN(pool && sem, PJ_EINVAL);
+
+ up(&sem->sem);
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_sem_destroy(pj_sem_t *sem)
+{
+ PJ_ASSERT_RETURN(pool && sem, PJ_EINVAL);
+
+ return PJ_SUCCESS;
+}
+
+#endif /* PJ_HAS_SEMAPHORE */
+
+
+
+
diff --git a/pjlib/src/pj/os_core_unix.c b/pjlib/src/pj/os_core_unix.c
new file mode 100644
index 00000000..0f4f3a99
--- /dev/null
+++ b/pjlib/src/pj/os_core_unix.c
@@ -0,0 +1,1182 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/os_core_unix.c 11 10/29/05 10:27p Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pj/os_core_unix.c $
+ *
+ * 11 10/29/05 10:27p Bennylp
+ * Fixed misc warnings.
+ *
+ * 10 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 9 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ */
+#include <pj/os.h>
+#include <pj/assert.h>
+#include <pj/pool.h>
+#include <pj/log.h>
+#include <pj/rand.h>
+#include <pj/string.h>
+#include <pj/guid.h>
+#include <pj/compat/sprintf.h>
+#include <pj/except.h>
+#include <pj/errno.h>
+
+#if defined(PJ_HAS_SEMAPHORE) && PJ_HAS_SEMAPHORE != 0
+# include <semaphore.h>
+#endif
+
+#include <unistd.h> // getpid()
+#include <errno.h> // errno
+
+#define __USE_GNU
+#include <pthread.h>
+
+#define THIS_FILE "osunix"
+
+struct pj_thread_t
+{
+ char obj_name[PJ_MAX_OBJ_NAME];
+ pthread_t thread;
+ pj_thread_proc *proc;
+ void *arg;
+
+ pj_mutex_t *suspended_mutex;
+
+#if defined(PJ_OS_HAS_CHECK_STACK) && PJ_OS_HAS_CHECK_STACK!=0
+ pj_uint32_t stk_size;
+ pj_uint32_t stk_max_usage;
+ char *stk_start;
+ const char *caller_file;
+ int caller_line;
+#endif
+};
+
+struct pj_atomic_t
+{
+ pj_mutex_t *mutex;
+ pj_atomic_value_t value;
+};
+
+struct pj_mutex_t
+{
+ pthread_mutex_t mutex;
+ char obj_name[PJ_MAX_OBJ_NAME];
+#if PJ_DEBUG
+ int nesting_level;
+ pj_thread_t *owner;
+#endif
+};
+
+#if defined(PJ_HAS_SEMAPHORE) && PJ_HAS_SEMAPHORE != 0
+struct pj_sem_t
+{
+ sem_t sem;
+ char obj_name[PJ_MAX_OBJ_NAME];
+};
+#endif /* PJ_HAS_SEMAPHORE */
+
+#if defined(PJ_HAS_EVENT_OBJ) && PJ_HAS_EVENT_OBJ != 0
+struct pj_event_t
+{
+ char obj_name[PJ_MAX_OBJ_NAME];
+};
+#endif /* PJ_HAS_EVENT_OBJ */
+
+
+#if PJ_HAS_THREADS
+ static pj_thread_t main_thread;
+ static long thread_tls_id;
+ static pj_mutex_t critical_section;
+#else
+# define MAX_THREADS 32
+ static int tls_flag[MAX_THREADS];
+ static void *tls[MAX_THREADS];
+#endif
+
+static pj_status_t init_mutex(pj_mutex_t *mutex, const char *name, int type);
+
+/*
+ * pj_init(void).
+ * Init PJLIB!
+ */
+PJ_DEF(pj_status_t) pj_init(void)
+{
+ char dummy_guid[PJ_GUID_MAX_LENGTH];
+ pj_str_t guid;
+ pj_status_t rc;
+
+ PJ_LOG(5, ("pj_init", "Initializing PJ Library.."));
+
+#if PJ_HAS_THREADS
+ /* Init this thread's TLS. */
+ if ((rc=pj_thread_init()) != 0) {
+ return rc;
+ }
+
+ /* Critical section. */
+ if ((rc=init_mutex(&critical_section, "critsec", PJ_MUTEX_SIMPLE)) != 0)
+ return rc;
+
+#endif
+
+ /* Initialize exception ID for the pool.
+ * Must do so after critical section is configured.
+ */
+ rc = pj_exception_id_alloc("PJLIB/No memory", &PJ_NO_MEMORY_EXCEPTION);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ /* Init random seed. */
+ pj_srand( clock() );
+
+ /* Startup GUID. */
+ guid.ptr = dummy_guid;
+ pj_generate_unique_string( &guid );
+
+ /* Initialize exception ID for the pool.
+ * Must do so after critical section is configured.
+ */
+ rc = pj_exception_id_alloc("PJLIB/No memory", &PJ_NO_MEMORY_EXCEPTION);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ /* Startup timestamp */
+#if defined(PJ_HAS_HIGH_RES_TIMER) && PJ_HAS_HIGH_RES_TIMER != 0
+ {
+ pj_timestamp dummy_ts;
+ if ((rc=pj_get_timestamp(&dummy_ts)) != 0) {
+ return rc;
+ }
+ }
+#endif
+
+ return PJ_SUCCESS;
+}
+
+/*
+ * pj_getpid(void)
+ */
+PJ_DEF(pj_uint32_t) pj_getpid(void)
+{
+ PJ_CHECK_STACK();
+ return getpid();
+}
+
+/*
+ * pj_thread_register(..)
+ */
+PJ_DEF(pj_status_t) pj_thread_register ( const char *cstr_thread_name,
+ pj_thread_desc desc,
+ pj_thread_t **ptr_thread)
+{
+#if PJ_HAS_THREADS
+ char stack_ptr;
+ pj_thread_t *thread = (pj_thread_t *)desc;
+ pj_str_t thread_name = pj_str((char*)cstr_thread_name);
+
+ /* Size sanity check. */
+ if (sizeof(pj_thread_desc) < sizeof(pj_thread_t)) {
+ pj_assert(!"Not enough pj_thread_desc size!");
+ return PJ_EBUG;
+ }
+
+ /* If a thread descriptor has been registered before, just return it. */
+ if (pj_thread_local_get (thread_tls_id) != 0) {
+ *ptr_thread = (pj_thread_t*)pj_thread_local_get (thread_tls_id);
+ return PJ_SUCCESS;
+ }
+
+ /* Initialize and set the thread entry. */
+ pj_memset(desc, 0, sizeof(pj_thread_desc));
+ thread->thread = pthread_self();
+
+ if(cstr_thread_name && pj_strlen(&thread_name) < sizeof(thread->obj_name)-1)
+ pj_sprintf(thread->obj_name, cstr_thread_name, thread->thread);
+ else
+ pj_sprintf(thread->obj_name, "thr%p", (void*)thread->thread);
+
+ pj_thread_local_set(thread_tls_id, thread);
+
+#if defined(PJ_OS_HAS_CHECK_STACK) && PJ_OS_HAS_CHECK_STACK!=0
+ thread->stk_start = &stack_ptr;
+ thread->stk_size = 0xFFFFFFFFUL;
+ thread->stk_max_usage = 0;
+#else
+ stack_ptr = '\0';
+#endif
+
+ *ptr_thread = thread;
+ return PJ_SUCCESS;
+#else
+ pj_thread_t *thread = (pj_thread_t*)desc;
+ *ptr_thread = thread;
+ return SUCCESS;
+#endif
+}
+
+/*
+ * pj_thread_init(void)
+ */
+pj_status_t pj_thread_init(void)
+{
+#if PJ_HAS_THREADS
+ pj_status_t rc;
+ pj_thread_t *dummy;
+
+ rc = pj_thread_local_alloc(&thread_tls_id );
+ if (rc != PJ_SUCCESS) {
+ return rc;
+ }
+ return pj_thread_register("thr%p", (pj_uint8_t*)&main_thread, &dummy);
+#else
+ PJ_LOG(2,(THIS_FILE, "Thread init error. Threading is not enabled!"));
+ return PJ_EINVALIDOP;
+#endif
+}
+
+#if PJ_HAS_THREADS
+/*
+ * thread_main()
+ *
+ * This is the main entry for all threads.
+ */
+static void *thread_main(void *param)
+{
+ pj_thread_t *rec = param;
+ void *result;
+
+#if defined(PJ_OS_HAS_CHECK_STACK) && PJ_OS_HAS_CHECK_STACK!=0
+ rec->stk_start = (char*)&rec;
+#endif
+
+ /* Set current thread id. */
+ pj_thread_local_set(thread_tls_id, rec);
+
+ /* Check if suspension is required. */
+ if (rec->suspended_mutex)
+ pj_mutex_lock(rec->suspended_mutex);
+
+ PJ_LOG(6,(rec->obj_name, "Thread started"));
+
+ /* Call user's entry! */
+ result = (void*) (*rec->proc)(rec->arg);
+
+ /* Done. */
+ PJ_LOG(6,(rec->obj_name, "Thread quitting"));
+ return result;
+}
+#endif
+
+/*
+ * pj_thread_create(...)
+ */
+PJ_DEF(pj_status_t) pj_thread_create( pj_pool_t *pool,
+ const char *thread_name,
+ pj_thread_proc *proc,
+ void *arg,
+ pj_size_t stack_size,
+ unsigned flags,
+ pj_thread_t **ptr_thread)
+{
+#if PJ_HAS_THREADS
+ pj_thread_t *rec;
+ int rc;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(pool && proc && ptr_thread, PJ_EINVAL);
+
+ /* Create thread record and assign name for the thread */
+ rec = (struct pj_thread_t*) pj_pool_zalloc(pool, sizeof(pj_thread_t));
+ if (!rec)
+ return PJ_ENOMEM;
+
+ /* Set name. */
+ if (!thread_name)
+ thread_name = "thr%p";
+
+ if (strchr(thread_name, '%')) {
+ pj_snprintf(rec->obj_name, PJ_MAX_OBJ_NAME, thread_name, rec);
+ } else {
+ strncpy(rec->obj_name, thread_name, PJ_MAX_OBJ_NAME);
+ rec->obj_name[PJ_MAX_OBJ_NAME-1] = '\0';
+ }
+
+#if defined(PJ_OS_HAS_CHECK_STACK) && PJ_OS_HAS_CHECK_STACK!=0
+ rec->stk_size = stack_size ? stack_size : 0xFFFFFFFFUL;
+ rec->stk_max_usage = 0;
+#endif
+
+ /* Emulate suspended thread with mutex. */
+ if (flags & PJ_THREAD_SUSPENDED) {
+ rc = pj_mutex_create_simple(pool, NULL, &rec->suspended_mutex);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ pj_mutex_lock(rec->suspended_mutex);
+ } else {
+ pj_assert(rec->suspended_mutex == NULL);
+ }
+
+ PJ_LOG(6, (rec->obj_name, "Thread created"));
+
+ /* Create the thread. */
+ rec->proc = proc;
+ rec->arg = arg;
+ rc = pthread_create( &rec->thread, NULL, thread_main, rec);
+ if (rc != 0)
+ return PJ_RETURN_OS_ERROR(rc);
+
+ *ptr_thread = rec;
+ return PJ_SUCCESS;
+#else
+ pj_assert(!"Threading is disabled!");
+ return PJ_EINVALIDOP;
+#endif
+}
+
+/*
+ * pj_thread-get_name()
+ */
+PJ_DEF(const char*) pj_thread_get_name(pj_thread_t *p)
+{
+#if PJ_HAS_THREADS
+ pj_thread_t *rec = (pj_thread_t*)p;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(p, "");
+
+ return rec->obj_name;
+#else
+ return "";
+#endif
+}
+
+/*
+ * pj_thread_resume()
+ */
+PJ_DEF(pj_status_t) pj_thread_resume(pj_thread_t *p)
+{
+ pj_status_t rc;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(p, PJ_EINVAL);
+
+ rc = pj_mutex_unlock(p->suspended_mutex);
+
+ return rc;
+}
+
+/*
+ * pj_thread_this()
+ */
+PJ_DEF(pj_thread_t*) pj_thread_this(void)
+{
+#if PJ_HAS_THREADS
+ pj_thread_t *rec = pj_thread_local_get(thread_tls_id);
+ pj_assert(rec != NULL);
+
+ /*
+ * MUST NOT check stack because this function is called
+ * by PJ_CHECK_STACK() itself!!!
+ *
+ */
+
+ return rec;
+#else
+ pj_assert(!"Threading is not enabled!");
+ return NULL;
+#endif
+}
+
+/*
+ * pj_thread_join()
+ */
+PJ_DEF(pj_status_t) pj_thread_join(pj_thread_t *p)
+{
+#if PJ_HAS_THREADS
+ pj_thread_t *rec = (pj_thread_t *)p;
+ void *ret;
+ int result;
+
+ PJ_CHECK_STACK();
+
+ PJ_LOG(6, (pj_thread_this()->obj_name, "Joining thread %s", p->obj_name));
+ result = pthread_join( rec->thread, &ret);
+
+ if (result == 0)
+ return PJ_SUCCESS;
+ else
+ return PJ_RETURN_OS_ERROR(result);
+#else
+ PJ_CHECK_STACK();
+ pj_assert(!"No multithreading support!");
+ return PJ_EINVALIDOP;
+#endif
+}
+
+/*
+ * pj_thread_destroy()
+ */
+PJ_DEF(pj_status_t) pj_thread_destroy(pj_thread_t *p)
+{
+ /* This function is used to destroy thread handle in other platforms.
+ * I suppose there's nothing to do here..
+ */
+ PJ_CHECK_STACK();
+ return PJ_SUCCESS;
+}
+
+/*
+ * pj_thread_sleep()
+ */
+PJ_DEF(pj_status_t) pj_thread_sleep(unsigned msec)
+{
+ PJ_CHECK_STACK();
+ return usleep(msec * 1000);
+}
+
+#if defined(PJ_OS_HAS_CHECK_STACK) && PJ_OS_HAS_CHECK_STACK!=0
+/*
+ * pj_thread_check_stack()
+ * Implementation for PJ_CHECK_STACK()
+ */
+PJ_DEF(void) pj_thread_check_stack(const char *file, int line)
+{
+ char stk_ptr;
+ pj_uint32_t usage;
+ pj_thread_t *thread = pj_thread_this();
+
+ /* Calculate current usage. */
+ usage = (&stk_ptr > thread->stk_start) ? &stk_ptr - thread->stk_start :
+ thread->stk_start - &stk_ptr;
+
+ /* Assert if stack usage is dangerously high. */
+ pj_assert("STACK OVERFLOW!! " && (usage <= thread->stk_size - 128));
+
+ /* Keep statistic. */
+ if (usage > thread->stk_max_usage) {
+ thread->stk_max_usage = usage;
+ thread->caller_file = file;
+ thread->caller_line = line;
+ }
+}
+
+/*
+ * pj_thread_get_stack_max_usage()
+ */
+PJ_DEF(pj_uint32_t) pj_thread_get_stack_max_usage(pj_thread_t *thread)
+{
+ return thread->stk_max_usage;
+}
+
+/*
+ * pj_thread_get_stack_info()
+ */
+PJ_DEF(pj_status_t) pj_thread_get_stack_info( pj_thread_t *thread,
+ const char **file,
+ int *line )
+{
+ pj_assert(thread);
+
+ *file = thread->caller_file;
+ *line = thread->caller_line;
+ return 0;
+}
+
+#endif /* PJ_OS_HAS_CHECK_STACK */
+
+///////////////////////////////////////////////////////////////////////////////
+/*
+ * pj_atomic_create()
+ */
+PJ_DEF(pj_status_t) pj_atomic_create( pj_pool_t *pool,
+ pj_atomic_value_t initial,
+ pj_atomic_t **ptr_atomic)
+{
+ pj_status_t rc;
+ pj_atomic_t *atomic_var = pj_pool_calloc(pool, 1, sizeof(pj_atomic_t));
+ if (!atomic_var)
+ return PJ_ENOMEM;
+
+#if PJ_HAS_THREADS
+ rc = pj_mutex_create(pool, "atm%p", PJ_MUTEX_SIMPLE, &atomic_var->mutex);
+ if (rc != PJ_SUCCESS)
+ return rc;
+#endif
+ atomic_var->value = initial;
+
+ *ptr_atomic = atomic_var;
+ return PJ_SUCCESS;
+}
+
+/*
+ * pj_atomic_destroy()
+ */
+PJ_DEF(pj_status_t) pj_atomic_destroy( pj_atomic_t *atomic_var )
+{
+ PJ_ASSERT_RETURN(atomic_var, PJ_EINVAL);
+#if PJ_HAS_THREADS
+ return pj_mutex_destroy( atomic_var->mutex );
+#else
+ return 0;
+#endif
+}
+
+/*
+ * pj_atomic_set()
+ */
+PJ_DEF(pj_atomic_value_t) pj_atomic_set(pj_atomic_t *atomic_var,
+ pj_atomic_value_t value)
+{
+ pj_atomic_value_t oldval;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(atomic_var, 0);
+
+#if PJ_HAS_THREADS
+ pj_mutex_lock( atomic_var->mutex );
+#endif
+ oldval = atomic_var->value;
+ atomic_var->value = value;
+#if PJ_HAS_THREADS
+ pj_mutex_unlock( atomic_var->mutex);
+#endif
+ return oldval;
+}
+
+/*
+ * pj_atomic_get()
+ */
+PJ_DEF(pj_atomic_value_t) pj_atomic_get(pj_atomic_t *atomic_var)
+{
+ pj_atomic_value_t oldval;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(atomic_var, 0);
+
+#if PJ_HAS_THREADS
+ pj_mutex_lock( atomic_var->mutex );
+#endif
+ oldval = atomic_var->value;
+#if PJ_HAS_THREADS
+ pj_mutex_unlock( atomic_var->mutex);
+#endif
+ return oldval;
+}
+
+/*
+ * pj_atomic_inc()
+ */
+PJ_DEF(pj_atomic_value_t) pj_atomic_inc(pj_atomic_t *atomic_var)
+{
+ pj_atomic_value_t newval;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(atomic_var, 0);
+
+#if PJ_HAS_THREADS
+ pj_mutex_lock( atomic_var->mutex );
+#endif
+ newval = ++atomic_var->value;
+#if PJ_HAS_THREADS
+ pj_mutex_unlock( atomic_var->mutex);
+#endif
+ return newval;
+}
+
+/*
+ * pj_atomic_dec()
+ */
+PJ_DEF(pj_atomic_value_t) pj_atomic_dec(pj_atomic_t *atomic_var)
+{
+ pj_atomic_value_t newval;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(atomic_var, 0);
+
+#if PJ_HAS_THREADS
+ pj_mutex_lock( atomic_var->mutex );
+#endif
+ newval = --atomic_var->value;
+#if PJ_HAS_THREADS
+ pj_mutex_unlock( atomic_var->mutex);
+#endif
+ return newval;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+/*
+ * pj_thread_local_alloc()
+ */
+PJ_DEF(pj_status_t) pj_thread_local_alloc(long *p_index)
+{
+#if PJ_HAS_THREADS
+ pthread_key_t key;
+ int rc;
+
+ PJ_ASSERT_RETURN(p_index != NULL, PJ_EINVAL);
+
+ pj_assert( sizeof(pthread_key_t) <= sizeof(long));
+ if ((rc=pthread_key_create(&key, NULL)) != 0)
+ return PJ_RETURN_OS_ERROR(rc);
+
+ *p_index = key;
+ return PJ_SUCCESS;
+#else
+ int i;
+ for (i=0; i<MAX_THREADS; ++i) {
+ if (tls_flag[i] == 0)
+ break;
+ }
+ if (i == MAX_THREADS)
+ return PJ_ETOOMANY;
+
+ tls_flag[i] = 1;
+ tls[i] = NULL;
+
+ *p_index = i;
+ return PJ_SUCCESS;
+#endif
+}
+
+/*
+ * pj_thread_local_free()
+ */
+PJ_DEF(void) pj_thread_local_free(long index)
+{
+ PJ_CHECK_STACK();
+#if PJ_HAS_THREADS
+ pthread_key_delete(index);
+#else
+ tls_flag[index] = 0;
+#endif
+}
+
+/*
+ * pj_thread_local_set()
+ */
+PJ_DEF(void) pj_thread_local_set(long index, void *value)
+{
+ //Can't check stack because this function is called in the
+ //beginning before main thread is initialized.
+ //PJ_CHECK_STACK();
+#if PJ_HAS_THREADS
+ pthread_setspecific(index, value);
+#else
+ pj_assert(index >= 0 && index < MAX_THREADS);
+ tls[index] = value;
+#endif
+}
+
+PJ_DEF(void*) pj_thread_local_get(long index)
+{
+ //Can't check stack because this function is called
+ //by PJ_CHECK_STACK() itself!!!
+ //PJ_CHECK_STACK();
+#if PJ_HAS_THREADS
+ return pthread_getspecific(index);
+#else
+ pj_assert(index >= 0 && index < MAX_THREADS);
+ return tls[index];
+#endif
+}
+
+///////////////////////////////////////////////////////////////////////////////
+PJ_DEF(void) pj_enter_critical_section(void)
+{
+#if PJ_HAS_THREADS
+ pj_mutex_lock(&critical_section);
+#endif
+}
+
+PJ_DEF(void) pj_leave_critical_section(void)
+{
+#if PJ_HAS_THREADS
+ pj_mutex_unlock(&critical_section);
+#endif
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+static pj_status_t init_mutex(pj_mutex_t *mutex, const char *name, int type)
+{
+#if PJ_HAS_THREADS
+ PJ_UNUSED_ARG(type);
+
+ PJ_CHECK_STACK();
+
+ if (type == PJ_MUTEX_SIMPLE) {
+ pthread_mutex_t the_mutex = PTHREAD_MUTEX_INITIALIZER;
+ mutex->mutex = the_mutex;
+ } else {
+ pthread_mutex_t the_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
+ mutex->mutex = the_mutex;
+ }
+
+#if PJ_DEBUG
+ /* Set owner. */
+ mutex->nesting_level = 0;
+ mutex->owner = NULL;
+#endif
+
+ /* Set name. */
+ if (!name) {
+ name = "mtx%p";
+ }
+ if (strchr(name, '%')) {
+ pj_snprintf(mutex->obj_name, PJ_MAX_OBJ_NAME, name, mutex);
+ } else {
+ strncpy(mutex->obj_name, name, PJ_MAX_OBJ_NAME);
+ mutex->obj_name[PJ_MAX_OBJ_NAME-1] = '\0';
+ }
+
+ PJ_LOG(6, (mutex->obj_name, "Mutex created"));
+ return PJ_SUCCESS;
+#else /* PJ_HAS_THREADS */
+ return PJ_SUCCESS;
+#endif
+}
+
+/*
+ * pj_mutex_create()
+ */
+PJ_DEF(pj_status_t) pj_mutex_create(pj_pool_t *pool,
+ const char *name,
+ int type,
+ pj_mutex_t **ptr_mutex)
+{
+#if PJ_HAS_THREADS
+ pj_status_t rc;
+ pj_mutex_t *mutex;
+
+ PJ_ASSERT_RETURN(pool && ptr_mutex, PJ_EINVAL);
+
+ mutex = pj_pool_alloc(pool, sizeof(*mutex));
+ if (!mutex) return PJ_ENOMEM;
+
+ if ((rc=init_mutex(mutex, name, type)) != PJ_SUCCESS)
+ return rc;
+
+ *ptr_mutex = mutex;
+ return PJ_SUCCESS;
+#else /* PJ_HAS_THREADS */
+ return (pj_mutex_t*)1;
+#endif
+}
+
+/*
+ * pj_mutex_create_simple()
+ */
+PJ_DEF(pj_status_t) pj_mutex_create_simple( pj_pool_t *pool,
+ const char *name,
+ pj_mutex_t **mutex )
+{
+ return pj_mutex_create(pool, name, PJ_MUTEX_SIMPLE, mutex);
+}
+
+/*
+ * pj_mutex_create_recursive()
+ */
+PJ_DEF(pj_status_t) pj_mutex_create_recursive( pj_pool_t *pool,
+ const char *name,
+ pj_mutex_t **mutex )
+{
+ return pj_mutex_create(pool, name, PJ_MUTEX_RECURSE, mutex);
+}
+
+/*
+ * pj_mutex_lock()
+ */
+PJ_DEF(pj_status_t) pj_mutex_lock(pj_mutex_t *mutex)
+{
+#if PJ_HAS_THREADS
+ pj_status_t status;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(mutex, PJ_EINVAL);
+
+ PJ_LOG(6,(mutex->obj_name, "Mutex: thread %s is waiting",
+ pj_thread_this()->obj_name));
+
+ status = pthread_mutex_lock( &mutex->mutex );
+
+ PJ_LOG(6,(mutex->obj_name,
+ (status==0 ? "Mutex acquired by thread %s" : "FAILED by %s"),
+ pj_thread_this()->obj_name));
+
+#if PJ_DEBUG
+ if (status == PJ_SUCCESS) {
+ mutex->owner = pj_thread_this();
+ ++mutex->nesting_level;
+ }
+#endif
+
+ if (status == 0)
+ return PJ_SUCCESS;
+ else
+ return PJ_RETURN_OS_ERROR(status);
+#else /* PJ_HAS_THREADS */
+ pj_assert( mutex == (pj_mutex_t*)1 );
+ return PJ_SUCCESS;
+#endif
+}
+
+/*
+ * pj_mutex_unlock()
+ */
+PJ_DEF(pj_status_t) pj_mutex_unlock(pj_mutex_t *mutex)
+{
+#if PJ_HAS_THREADS
+ pj_status_t status;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(mutex, PJ_EINVAL);
+
+#if PJ_DEBUG
+ pj_assert(mutex->owner == pj_thread_this());
+ if (--mutex->nesting_level == 0) {
+ mutex->owner = NULL;
+ }
+#endif
+
+ PJ_LOG(6,(mutex->obj_name, "Mutex released by thread %s",
+ pj_thread_this()->obj_name));
+
+ status = pthread_mutex_unlock( &mutex->mutex );
+ if (status == 0)
+ return PJ_SUCCESS;
+ else
+ return PJ_RETURN_OS_ERROR(status);
+
+#else /* PJ_HAS_THREADS */
+ pj_assert( mutex == (pj_mutex_t*)1 );
+ return PJ_SUCCESS;
+#endif
+}
+
+/*
+ * pj_mutex_trylock()
+ */
+PJ_DEF(pj_status_t) pj_mutex_trylock(pj_mutex_t *mutex)
+{
+#if PJ_HAS_THREADS
+ int status;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(mutex, PJ_EINVAL);
+
+ status = pthread_mutex_trylock( &mutex->mutex );
+
+ if (status==0) {
+ PJ_LOG(6,(mutex->obj_name, "Mutex acquired by thread %s",
+ pj_thread_this()->obj_name));
+
+#if PJ_DEBUG
+ mutex->owner = pj_thread_this();
+ ++mutex->nesting_level;
+#endif
+ }
+
+ if (status==0)
+ return PJ_SUCCESS;
+ else
+ return PJ_RETURN_OS_ERROR(status);
+#else /* PJ_HAS_THREADS */
+ pj_assert( mutex == (pj_mutex_t*)1);
+ return PJ_SUCCESS;
+#endif
+}
+
+/*
+ * pj_mutex_destroy()
+ */
+PJ_DEF(pj_status_t) pj_mutex_destroy(pj_mutex_t *mutex)
+{
+ int status;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(mutex, PJ_EINVAL);
+
+#if PJ_HAS_THREADS
+ PJ_LOG(6,(mutex->obj_name, "Mutex destroyed"));
+ status = pthread_mutex_destroy( &mutex->mutex );
+ if (status == 0)
+ return PJ_SUCCESS;
+ else
+ return PJ_RETURN_OS_ERROR(status);
+#else
+ pj_assert( mutex == (pj_mutex_t*)1 );
+ status = PJ_SUCCESS;
+ return status;
+#endif
+}
+
+#if PJ_DEBUG
+PJ_DEF(pj_bool_t) pj_mutex_is_locked(pj_mutex_t *mutex)
+{
+#if PJ_HAS_THREADS
+ return mutex->owner == pj_thread_this();
+#else
+ return 1;
+#endif
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+#if defined(PJ_HAS_SEMAPHORE) && PJ_HAS_SEMAPHORE != 0
+
+/*
+ * pj_sem_create()
+ */
+PJ_DEF(pj_status_t) pj_sem_create( pj_pool_t *pool,
+ const char *name,
+ unsigned initial,
+ unsigned max,
+ pj_sem_t **ptr_sem)
+{
+#if PJ_HAS_THREADS
+ pj_sem_t *sem;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(pool != NULL && ptr_sem != NULL, PJ_EINVAL);
+
+ sem = pj_pool_alloc(pool, sizeof(*sem));
+ if (!sem) return PJ_ENOMEM;
+
+ if (sem_init( &sem->sem, 0, initial) != 0)
+ return PJ_RETURN_OS_ERROR(pj_get_native_os_error());
+
+ /* Set name. */
+ if (!name) {
+ name = "sem%p";
+ }
+ if (strchr(name, '%')) {
+ pj_snprintf(sem->obj_name, PJ_MAX_OBJ_NAME, name, sem);
+ } else {
+ strncpy(sem->obj_name, name, PJ_MAX_OBJ_NAME);
+ sem->obj_name[PJ_MAX_OBJ_NAME-1] = '\0';
+ }
+
+ PJ_LOG(6, (sem->obj_name, "Semaphore created"));
+
+ *ptr_sem = sem;
+ return PJ_SUCCESS;
+#else
+ *ptr_sem = (pj_sem_t*)1;
+ return PJ_SUCCESS;
+#endif
+}
+
+/*
+ * pj_sem_wait()
+ */
+PJ_DEF(pj_status_t) pj_sem_wait(pj_sem_t *sem)
+{
+#if PJ_HAS_THREADS
+ int result;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(sem, PJ_EINVAL);
+
+ PJ_LOG(6, (sem->obj_name, "Semaphore: thread %s is waiting",
+ pj_thread_this()->obj_name));
+
+ result = sem_wait( &sem->sem );
+
+ if (result == 0) {
+ PJ_LOG(6, (sem->obj_name, "Semaphore acquired by thread %s",
+ pj_thread_this()->obj_name));
+ } else {
+ PJ_LOG(6, (sem->obj_name, "Semaphore: thread %s FAILED to acquire",
+ pj_thread_this()->obj_name));
+ }
+
+ if (result == 0)
+ return PJ_SUCCESS;
+ else
+ return PJ_RETURN_OS_ERROR(pj_get_native_os_error());
+#else
+ pj_assert( sem == (pj_sem_t*) 1 );
+ return PJ_SUCCESS;
+#endif
+}
+
+/*
+ * pj_sem_trywait()
+ */
+PJ_DEF(pj_status_t) pj_sem_trywait(pj_sem_t *sem)
+{
+#if PJ_HAS_THREADS
+ int result;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(sem, PJ_EINVAL);
+
+ result = sem_trywait( &sem->sem );
+
+ if (result == 0) {
+ PJ_LOG(6, (sem->obj_name, "Semaphore acquired by thread %s",
+ pj_thread_this()->obj_name));
+ }
+ if (result == 0)
+ return PJ_SUCCESS;
+ else
+ return PJ_RETURN_OS_ERROR(pj_get_native_os_error());
+#else
+ pj_assert( sem == (pj_sem_t*)1 );
+ return PJ_SUCCESS;
+#endif
+}
+
+/*
+ * pj_sem_post()
+ */
+PJ_DEF(pj_status_t) pj_sem_post(pj_sem_t *sem)
+{
+#if PJ_HAS_THREADS
+ int result;
+ PJ_LOG(6, (sem->obj_name, "Semaphore released by thread %s",
+ pj_thread_this()->obj_name));
+ result = sem_post( &sem->sem );
+
+ if (result == 0)
+ return PJ_SUCCESS;
+ else
+ return PJ_RETURN_OS_ERROR(pj_get_native_os_error());
+#else
+ pj_assert( sem == (pj_sem_t*) 1);
+ return PJ_SUCCESS;
+#endif
+}
+
+/*
+ * pj_sem_destroy()
+ */
+PJ_DEF(pj_status_t) pj_sem_destroy(pj_sem_t *sem)
+{
+#if PJ_HAS_THREADS
+ int result;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(sem, PJ_EINVAL);
+
+ PJ_LOG(6, (sem->obj_name, "Semaphore destroyed by thread %s",
+ pj_thread_this()->obj_name));
+ result = sem_destroy( &sem->sem );
+
+ if (result == 0)
+ return PJ_SUCCESS;
+ else
+ return PJ_RETURN_OS_ERROR(pj_get_native_os_error());
+#else
+ pj_assert( sem == (pj_sem_t*) 1 );
+ return PJ_SUCCESS;
+#endif
+}
+
+#endif /* PJ_HAS_SEMAPHORE */
+
+///////////////////////////////////////////////////////////////////////////////
+#if defined(PJ_HAS_EVENT_OBJ) && PJ_HAS_EVENT_OBJ != 0
+
+/*
+ * pj_event_create()
+ */
+PJ_DEF(pj_status_t) pj_event_create(pj_pool_t *pool, const char *name,
+ pj_bool_t manual_reset, pj_bool_t initial,
+ pj_event_t **ptr_event)
+{
+ pj_assert(!"Not supported!");
+ PJ_UNUSED_ARG(pool);
+ PJ_UNUSED_ARG(name);
+ PJ_UNUSED_ARG(manual_reset);
+ PJ_UNUSED_ARG(initial);
+ PJ_UNUSED_ARG(ptr_event);
+ return PJ_EINVALIDOP;
+}
+
+/*
+ * pj_event_wait()
+ */
+PJ_DEF(pj_status_t) pj_event_wait(pj_event_t *event)
+{
+ PJ_UNUSED_ARG(event);
+ return PJ_EINVALIDOP;
+}
+
+/*
+ * pj_event_trywait()
+ */
+PJ_DEF(pj_status_t) pj_event_trywait(pj_event_t *event)
+{
+ PJ_UNUSED_ARG(event);
+ return PJ_EINVALIDOP;
+}
+
+/*
+ * pj_event_set()
+ */
+PJ_DEF(pj_status_t) pj_event_set(pj_event_t *event)
+{
+ PJ_UNUSED_ARG(event);
+ return PJ_EINVALIDOP;
+}
+
+/*
+ * pj_event_pulse()
+ */
+PJ_DEF(pj_status_t) pj_event_pulse(pj_event_t *event)
+{
+ PJ_UNUSED_ARG(event);
+ return PJ_EINVALIDOP;
+}
+
+/*
+ * pj_event_reset()
+ */
+PJ_DEF(pj_status_t) pj_event_reset(pj_event_t *event)
+{
+ PJ_UNUSED_ARG(event);
+ return PJ_EINVALIDOP;
+}
+
+/*
+ * pj_event_destroy()
+ */
+PJ_DEF(pj_status_t) pj_event_destroy(pj_event_t *event)
+{
+ PJ_UNUSED_ARG(event);
+ return PJ_EINVALIDOP;
+}
+
+#endif /* PJ_HAS_EVENT_OBJ */
+
+///////////////////////////////////////////////////////////////////////////////
+#if defined(PJ_TERM_HAS_COLOR) && PJ_TERM_HAS_COLOR != 0
+/*
+ * Terminal
+ */
+
+/**
+ * Set terminal color.
+ */
+PJ_DEF(pj_status_t) pj_term_set_color(pj_color_t color)
+{
+ PJ_UNUSED_ARG(color);
+ return PJ_EINVALIDOP;
+}
+
+/**
+ * Get current terminal foreground color.
+ */
+PJ_DEF(pj_color_t) pj_term_get_color(void)
+{
+ return 0;
+}
+
+#endif /* PJ_TERM_HAS_COLOR */
+
diff --git a/pjlib/src/pj/os_core_win32.c b/pjlib/src/pj/os_core_win32.c
new file mode 100644
index 00000000..dd892b5d
--- /dev/null
+++ b/pjlib/src/pj/os_core_win32.c
@@ -0,0 +1,1182 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/os_core_win32.c 12 10/29/05 11:51a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/os_core_win32.c $
+ *
+ * 12 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 11 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 10 9/21/05 1:39p Bennylp
+ * Periodic checkin for backup.
+ *
+ * 9 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/os.h>
+#include <pj/pool.h>
+#include <pj/log.h>
+#include <pj/string.h>
+#include <pj/guid.h>
+#include <pj/rand.h>
+#include <pj/assert.h>
+#include <pj/compat/vsprintf.h>
+#include <pj/compat/sprintf.h>
+#include <pj/errno.h>
+#include <pj/except.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#if defined(PJ_HAS_WINSOCK_H) && PJ_HAS_WINSOCK_H != 0
+# include <winsock.h>
+#endif
+
+#if defined(PJ_HAS_WINSOCK2_H) && PJ_HAS_WINSOCK2_H != 0
+# include <winsock2.h>
+#endif
+
+/*
+ * Implementation of pj_thread_t.
+ */
+struct pj_thread_t
+{
+ char obj_name[PJ_MAX_OBJ_NAME];
+ HANDLE hthread;
+ DWORD idthread;
+ pj_thread_proc *proc;
+ void *arg;
+
+#if defined(PJ_OS_HAS_CHECK_STACK) && PJ_OS_HAS_CHECK_STACK!=0
+ pj_uint32_t stk_size;
+ pj_uint32_t stk_max_usage;
+ char *stk_start;
+ const char *caller_file;
+ int caller_line;
+#endif
+};
+
+
+/*
+ * Implementation of pj_mutex_t.
+ */
+struct pj_mutex_t
+{
+#if PJ_WIN32_WINNT >= 0x0400
+ CRITICAL_SECTION crit;
+#else
+ HANDLE hMutex;
+#endif
+ char obj_name[PJ_MAX_OBJ_NAME];
+#if PJ_DEBUG
+ int nesting_level;
+ pj_thread_t *owner;
+#endif
+};
+
+/*
+ * Implementation of pj_sem_t.
+ */
+typedef struct pj_sem_t
+{
+ HANDLE hSemaphore;
+ char obj_name[PJ_MAX_OBJ_NAME];
+} pj_mem_t;
+
+
+/*
+ * Implementation of pj_event_t.
+ */
+struct pj_event_t
+{
+ HANDLE hEvent;
+ char obj_name[PJ_MAX_OBJ_NAME];
+};
+
+/*
+ * Implementation of pj_atomic_t.
+ */
+struct pj_atomic_t
+{
+ long value;
+};
+
+/*
+ * Static global variables.
+ */
+static pj_thread_desc main_thread;
+static long thread_tls_id;
+static pj_mutex_t critical_section_mutex;
+
+
+/*
+ * Some static prototypes.
+ */
+static pj_status_t init_mutex(pj_mutex_t *mutex, const char *name);
+
+
+/*
+ * pj_init(void).
+ * Init PJLIB!
+ */
+PJ_DEF(pj_status_t) pj_init(void)
+{
+ WSADATA wsa;
+ char dummy_guid[32]; /* use maximum GUID length */
+ pj_str_t guid;
+ pj_status_t rc;
+
+ PJ_LOG(5, ("pj_init", "Initializing PJ Library.."));
+
+ /* Init Winsock.. */
+ if (WSAStartup(MAKEWORD(2,0), &wsa) != 0) {
+ PJ_LOG(1, ("pj_init", "Winsock initialization has returned an error"));
+ return PJ_RETURN_OS_ERROR(WSAGetLastError());
+ }
+
+ /* Init this thread's TLS. */
+ if ((rc=pj_thread_init()) != PJ_SUCCESS) {
+ PJ_LOG(1, ("pj_init", "Thread initialization has returned an error"));
+ return rc;
+ }
+
+ /* Init random seed. */
+ pj_srand( GetCurrentProcessId() );
+
+ /* Startup GUID. */
+ guid.ptr = dummy_guid;
+ pj_generate_unique_string( &guid );
+
+ /* Initialize critical section. */
+ if ((rc=init_mutex(&critical_section_mutex, "pj%p")) != PJ_SUCCESS)
+ return rc;
+
+ /* Initialize exception ID for the pool.
+ * Must do so after critical section is configured.
+ */
+ rc = pj_exception_id_alloc("PJLIB/No memory", &PJ_NO_MEMORY_EXCEPTION);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ /* Startup timestamp */
+#if defined(PJ_HAS_HIGH_RES_TIMER) && PJ_HAS_HIGH_RES_TIMER != 0
+ {
+ pj_timestamp dummy_ts;
+ if ((rc=pj_get_timestamp(&dummy_ts)) != PJ_SUCCESS) {
+ PJ_LOG(1, ("pj_init", "Unable to initialize timestamp"));
+ return rc;
+ }
+ }
+#endif
+
+ return PJ_SUCCESS;
+}
+
+/*
+ * pj_getpid(void)
+ */
+PJ_DEF(pj_uint32_t) pj_getpid(void)
+{
+ PJ_CHECK_STACK();
+ return GetCurrentProcessId();
+}
+
+/*
+ * pj_thread_register(..)
+ */
+PJ_DEF(pj_status_t) pj_thread_register ( const char *cstr_thread_name,
+ pj_thread_desc desc,
+ pj_thread_t **thread_ptr)
+{
+ char stack_ptr;
+ pj_thread_t *thread = (pj_thread_t *)desc;
+ pj_str_t thread_name = pj_str((char*)cstr_thread_name);
+
+ /* Size sanity check. */
+ if (sizeof(pj_thread_desc) < sizeof(pj_thread_t)) {
+ pj_assert(!"Not enough pj_thread_desc size!");
+ return PJ_EBUG;
+ }
+
+ /* If a thread descriptor has been registered before, just return it. */
+ if (pj_thread_local_get (thread_tls_id) != 0) {
+ *thread_ptr = (pj_thread_t*)pj_thread_local_get (thread_tls_id);
+ return PJ_SUCCESS;
+ }
+
+ /* Initialize and set the thread entry. */
+ pj_memset(desc, 0, sizeof(pj_thread_desc));
+ thread->hthread = GetCurrentThread();
+ thread->idthread = GetCurrentThreadId();
+
+#if defined(PJ_OS_HAS_CHECK_STACK) && PJ_OS_HAS_CHECK_STACK!=0
+ thread->stk_start = &stack_ptr;
+ thread->stk_size = 0xFFFFFFFFUL;
+ thread->stk_max_usage = 0;
+#else
+ stack_ptr = '\0';
+#endif
+
+ if (cstr_thread_name && pj_strlen(&thread_name) < sizeof(thread->obj_name)-1)
+ pj_sprintf(thread->obj_name, cstr_thread_name, thread->idthread);
+ else
+ pj_sprintf(thread->obj_name, "thr%p", (void*)thread->idthread);
+
+ pj_thread_local_set(thread_tls_id, thread);
+
+ *thread_ptr = thread;
+ return PJ_SUCCESS;
+}
+
+/*
+ * pj_thread_init(void)
+ */
+pj_status_t pj_thread_init(void)
+{
+ pj_status_t rc;
+ pj_thread_t *thread;
+
+ rc = pj_thread_local_alloc(&thread_tls_id);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ return pj_thread_register("thr%p", main_thread, &thread);
+}
+
+static DWORD WINAPI thread_main(void *param)
+{
+ pj_thread_t *rec = param;
+ DWORD result;
+
+#if defined(PJ_OS_HAS_CHECK_STACK) && PJ_OS_HAS_CHECK_STACK!=0
+ rec->stk_start = (char*)&rec;
+#endif
+
+ PJ_LOG(6,(rec->obj_name, "Thread started"));
+
+ pj_thread_local_set(thread_tls_id, rec);
+ result = (*rec->proc)(rec->arg);
+
+ PJ_LOG(6,(rec->obj_name, "Thread quitting"));
+ return (DWORD)result;
+}
+
+/*
+ * pj_thread_create(...)
+ */
+PJ_DEF(pj_status_t) pj_thread_create( pj_pool_t *pool,
+ const char *thread_name,
+ pj_thread_proc *proc,
+ void *arg,
+ pj_size_t stack_size,
+ unsigned flags,
+ pj_thread_t **thread_ptr)
+{
+ DWORD dwflags = 0;
+ pj_thread_t *rec;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(pool && proc && thread_ptr, PJ_EINVAL);
+
+ /* Set flags */
+ if (flags & PJ_THREAD_SUSPENDED)
+ dwflags |= CREATE_SUSPENDED;
+
+ /* Create thread record and assign name for the thread */
+ rec = (struct pj_thread_t*) pj_pool_calloc(pool, 1, sizeof(pj_thread_t));
+ if (!rec)
+ return PJ_ENOMEM;
+
+ /* Set name. */
+ if (!thread_name)
+ thread_name = "thr%p";
+
+ if (strchr(thread_name, '%')) {
+ pj_snprintf(rec->obj_name, PJ_MAX_OBJ_NAME, thread_name, rec);
+ } else {
+ strncpy(rec->obj_name, thread_name, PJ_MAX_OBJ_NAME);
+ rec->obj_name[PJ_MAX_OBJ_NAME-1] = '\0';
+ }
+
+ PJ_LOG(6, (rec->obj_name, "Thread created"));
+
+#if defined(PJ_OS_HAS_CHECK_STACK) && PJ_OS_HAS_CHECK_STACK!=0
+ rec->stk_size = stack_size ? stack_size : 0xFFFFFFFFUL;
+ rec->stk_max_usage = 0;
+#endif
+
+ /* Create the thread. */
+ rec->proc = proc;
+ rec->arg = arg;
+ rec->hthread = CreateThread(NULL, stack_size,
+ thread_main, rec,
+ dwflags, &rec->idthread);
+ if (rec->hthread == NULL)
+ return PJ_RETURN_OS_ERROR(GetLastError());
+
+ /* Success! */
+ *thread_ptr = rec;
+ return PJ_SUCCESS;
+}
+
+/*
+ * pj_thread-get_name()
+ */
+PJ_DEF(const char*) pj_thread_get_name(pj_thread_t *p)
+{
+ pj_thread_t *rec = (pj_thread_t*)p;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(p, "");
+
+ return rec->obj_name;
+}
+
+/*
+ * pj_thread_resume()
+ */
+PJ_DEF(pj_status_t) pj_thread_resume(pj_thread_t *p)
+{
+ pj_thread_t *rec = (pj_thread_t*)p;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(p, PJ_EINVAL);
+
+ if (ResumeThread(rec->hthread) == (DWORD)-1)
+ return PJ_RETURN_OS_ERROR(GetLastError());
+ else
+ return PJ_SUCCESS;
+}
+
+/*
+ * pj_thread_this()
+ */
+PJ_DEF(pj_thread_t*) pj_thread_this(void)
+{
+ pj_thread_t *rec = pj_thread_local_get(thread_tls_id);
+ pj_assert(rec != NULL);
+
+ /*
+ * MUST NOT check stack because this function is called
+ * by PJ_CHECK_STACK() itself!!!
+ *
+ */
+
+ return rec;
+}
+
+/*
+ * pj_thread_join()
+ */
+PJ_DEF(pj_status_t) pj_thread_join(pj_thread_t *p)
+{
+ pj_thread_t *rec = (pj_thread_t *)p;
+ DWORD rc;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(p, PJ_EINVAL);
+
+ PJ_LOG(6, (pj_thread_this()->obj_name, "Joining thread %s", p->obj_name));
+
+ rc = WaitForSingleObject(rec->hthread, INFINITE);
+
+ if (rc==WAIT_OBJECT_0)
+ return PJ_SUCCESS;
+ else if (rc==WAIT_TIMEOUT)
+ return PJ_ETIMEDOUT;
+ else
+ return PJ_RETURN_OS_ERROR(GetLastError());
+}
+
+/*
+ * pj_thread_destroy()
+ */
+PJ_DEF(pj_status_t) pj_thread_destroy(pj_thread_t *p)
+{
+ pj_thread_t *rec = (pj_thread_t *)p;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(p, PJ_EINVAL);
+
+ if (CloseHandle(rec->hthread) == TRUE)
+ return PJ_SUCCESS;
+ else
+ return PJ_RETURN_OS_ERROR(GetLastError());
+}
+
+/*
+ * pj_thread_sleep()
+ */
+PJ_DEF(pj_status_t) pj_thread_sleep(unsigned msec)
+{
+ PJ_CHECK_STACK();
+ Sleep(msec);
+ return PJ_SUCCESS;
+}
+
+#if defined(PJ_OS_HAS_CHECK_STACK) && PJ_OS_HAS_CHECK_STACK != 0
+/*
+ * pj_thread_check_stack()
+ * Implementation for PJ_CHECK_STACK()
+ */
+PJ_DEF(void) pj_thread_check_stack(const char *file, int line)
+{
+ char stk_ptr;
+ pj_uint32_t usage;
+ pj_thread_t *thread = pj_thread_this();
+
+ pj_assert(thread);
+
+ /* Calculate current usage. */
+ usage = (&stk_ptr > thread->stk_start) ? &stk_ptr - thread->stk_start :
+ thread->stk_start - &stk_ptr;
+
+ /* Assert if stack usage is dangerously high. */
+ pj_assert("STACK OVERFLOW!! " && (usage <= thread->stk_size - 128));
+
+ /* Keep statistic. */
+ if (usage > thread->stk_max_usage) {
+ thread->stk_max_usage = usage;
+ thread->caller_file = file;
+ thread->caller_line = line;
+ }
+
+}
+
+/*
+ * pj_thread_get_stack_max_usage()
+ */
+PJ_DEF(pj_uint32_t) pj_thread_get_stack_max_usage(pj_thread_t *thread)
+{
+ return thread->stk_max_usage;
+}
+
+/*
+ * pj_thread_get_stack_info()
+ */
+PJ_DEF(pj_status_t) pj_thread_get_stack_info( pj_thread_t *thread,
+ const char **file,
+ int *line )
+{
+ pj_assert(thread);
+
+ *file = thread->caller_file;
+ *line = thread->caller_line;
+ return 0;
+}
+
+#endif /* PJ_OS_HAS_CHECK_STACK */
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+/*
+ * pj_atomic_create()
+ */
+PJ_DEF(pj_status_t) pj_atomic_create( pj_pool_t *pool,
+ pj_atomic_value_t initial,
+ pj_atomic_t **atomic_ptr)
+{
+ pj_atomic_t *atomic_var = pj_pool_alloc(pool, sizeof(pj_atomic_t));
+ if (!atomic_var)
+ return PJ_ENOMEM;
+
+ atomic_var->value = initial;
+ *atomic_ptr = atomic_var;
+
+ return PJ_SUCCESS;
+}
+
+/*
+ * pj_atomic_destroy()
+ */
+PJ_DEF(pj_status_t) pj_atomic_destroy( pj_atomic_t *var )
+{
+ PJ_UNUSED_ARG(var);
+ PJ_ASSERT_RETURN(var, PJ_EINVAL);
+
+ return 0;
+}
+
+/*
+ * pj_atomic_set()
+ */
+PJ_DEF(long) pj_atomic_set(pj_atomic_t *atomic_var, long value)
+{
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(atomic_var, 0);
+
+ return InterlockedExchange(&atomic_var->value, value);
+}
+
+/*
+ * pj_atomic_get()
+ */
+PJ_DEF(long) pj_atomic_get(pj_atomic_t *atomic_var)
+{
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(atomic_var, 0);
+
+ return atomic_var->value;
+}
+
+/*
+ * pj_atomic_inc()
+ */
+PJ_DEF(long) pj_atomic_inc(pj_atomic_t *atomic_var)
+{
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(atomic_var, 0);
+
+#if defined(PJ_WIN32_WINNT) && PJ_WIN32_WINNT >= 0x0400
+ return InterlockedIncrement(&atomic_var->value);
+#else
+# error Fix Me
+#endif
+}
+
+/*
+ * pj_atomic_dec()
+ */
+PJ_DEF(long) pj_atomic_dec(pj_atomic_t *atomic_var)
+{
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(atomic_var, 0);
+
+#if defined(PJ_WIN32_WINNT) && PJ_WIN32_WINNT >= 0x0400
+ return InterlockedDecrement(&atomic_var->value);
+#else
+# error Fix me
+#endif
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+/*
+ * pj_thread_local_alloc()
+ */
+PJ_DEF(pj_status_t) pj_thread_local_alloc(long *index)
+{
+ PJ_ASSERT_RETURN(index != NULL, PJ_EINVAL);
+
+ //Can't check stack because this function is called in the
+ //beginning before main thread is initialized.
+ //PJ_CHECK_STACK();
+
+ *index = TlsAlloc();
+
+ if (*index == TLS_OUT_OF_INDEXES)
+ return PJ_RETURN_OS_ERROR(GetLastError());
+ else
+ return PJ_SUCCESS;
+}
+
+/*
+ * pj_thread_local_free()
+ */
+PJ_DEF(void) pj_thread_local_free(long index)
+{
+ PJ_CHECK_STACK();
+ TlsFree(index);
+}
+
+/*
+ * pj_thread_local_set()
+ */
+PJ_DEF(void) pj_thread_local_set(long index, void *value)
+{
+ //Can't check stack because this function is called in the
+ //beginning before main thread is initialized.
+ //PJ_CHECK_STACK();
+ TlsSetValue(index, value);
+}
+
+/*
+ * pj_thread_local_get()
+ */
+PJ_DEF(void*) pj_thread_local_get(long index)
+{
+ //Can't check stack because this function is called
+ //by PJ_CHECK_STACK() itself!!!
+ //PJ_CHECK_STACK();
+ return TlsGetValue(index);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+static pj_status_t init_mutex(pj_mutex_t *mutex, const char *name)
+{
+
+ PJ_CHECK_STACK();
+
+#if PJ_WIN32_WINNT >= 0x0400
+ InitializeCriticalSection(&mutex->crit);
+#else
+ mutex->hMutex = CreateMutex(NULL, FALSE, NULL);
+ if (!mutex->hMutex) {
+ return PJ_RETURN_OS_ERROR(GetLastError());
+ }
+#endif
+
+#if PJ_DEBUG
+ /* Set owner. */
+ mutex->nesting_level = 0;
+ mutex->owner = NULL;
+#endif
+
+ /* Set name. */
+ if (!name) {
+ name = "mtx%p";
+ }
+ if (strchr(name, '%')) {
+ pj_snprintf(mutex->obj_name, PJ_MAX_OBJ_NAME, name, mutex);
+ } else {
+ strncpy(mutex->obj_name, name, PJ_MAX_OBJ_NAME);
+ mutex->obj_name[PJ_MAX_OBJ_NAME-1] = '\0';
+ }
+
+ PJ_LOG(6, (mutex->obj_name, "Mutex created"));
+ return PJ_SUCCESS;
+}
+
+/*
+ * pj_mutex_create()
+ */
+PJ_DEF(pj_status_t) pj_mutex_create(pj_pool_t *pool,
+ const char *name,
+ int type,
+ pj_mutex_t **mutex_ptr)
+{
+ pj_status_t rc;
+ pj_mutex_t *mutex;
+
+ PJ_UNUSED_ARG(type);
+ PJ_ASSERT_RETURN(pool && mutex_ptr, PJ_EINVAL);
+
+ mutex = pj_pool_alloc(pool, sizeof(*mutex));
+ if (!mutex)
+ return PJ_ENOMEM;
+
+ rc = init_mutex(mutex, name);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ *mutex_ptr = mutex;
+
+ return PJ_SUCCESS;
+}
+
+/*
+ * pj_mutex_create_simple()
+ */
+PJ_DEF(pj_status_t) pj_mutex_create_simple( pj_pool_t *pool,
+ const char *name,
+ pj_mutex_t **mutex )
+{
+ return pj_mutex_create(pool, name, PJ_MUTEX_SIMPLE, mutex);
+}
+
+/*
+ * pj_mutex_create_recursive()
+ */
+PJ_DEF(pj_status_t) pj_mutex_create_recursive( pj_pool_t *pool,
+ const char *name,
+ pj_mutex_t **mutex )
+{
+ return pj_mutex_create(pool, name, PJ_MUTEX_RECURSE, mutex);
+}
+
+/*
+ * pj_mutex_lock()
+ */
+PJ_DEF(pj_status_t) pj_mutex_lock(pj_mutex_t *mutex)
+{
+ pj_status_t status;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(mutex, PJ_EINVAL);
+
+ PJ_LOG(6,(mutex->obj_name, "Mutex: thread %s is waiting",
+ pj_thread_this()->obj_name));
+
+#if PJ_WIN32_WINNT >= 0x0400
+ EnterCriticalSection(&mutex->crit);
+ status=PJ_SUCCESS;
+#else
+ if (WaitForSingleObject(mutex->hMutex, INFINITE)==WAIT_OBJECT_0)
+ status = PJ_SUCCESS;
+ else
+ status = PJ_STATUS_FROM_OS(GetLastError());
+
+#endif
+ PJ_LOG(6,(mutex->obj_name,
+ (status==PJ_SUCCESS ? "Mutex acquired by thread %s" : "FAILED by %s"),
+ pj_thread_this()->obj_name));
+
+#if PJ_DEBUG
+ if (status == PJ_SUCCESS) {
+ mutex->owner = pj_thread_this();
+ ++mutex->nesting_level;
+ }
+#endif
+
+ return status;
+}
+
+/*
+ * pj_mutex_unlock()
+ */
+PJ_DEF(pj_status_t) pj_mutex_unlock(pj_mutex_t *mutex)
+{
+ pj_status_t status;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(mutex, PJ_EINVAL);
+
+#if PJ_DEBUG
+ pj_assert(mutex->owner == pj_thread_this());
+ if (--mutex->nesting_level == 0) {
+ mutex->owner = NULL;
+ }
+#endif
+
+ PJ_LOG(6,(mutex->obj_name, "Mutex released by thread %s",
+ pj_thread_this()->obj_name));
+
+#if PJ_WIN32_WINNT >= 0x0400
+ LeaveCriticalSection(&mutex->crit);
+ status=PJ_SUCCESS;
+#else
+ status = ReleaseMutex(mutex->hMutex) ? PJ_SUCCESS :
+ PJ_STATUS_FROM_OS(GetLastError());
+#endif
+ return status;
+}
+
+/*
+ * pj_mutex_trylock()
+ */
+PJ_DEF(pj_status_t) pj_mutex_trylock(pj_mutex_t *mutex)
+{
+ pj_status_t status;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(mutex, PJ_EINVAL);
+
+#if PJ_WIN32_WINNT >= 0x0400
+ status=TryEnterCriticalSection(&mutex->crit) ? PJ_SUCCESS : PJ_EUNKNOWN;
+#else
+ status = WaitForSingleObject(mutex->hMutex, 0)==WAIT_OBJECT_0 ?
+ PJ_SUCCESS : PJ_ETIMEDOUT;
+#endif
+ if (status==PJ_SUCCESS) {
+ PJ_LOG(6,(mutex->obj_name, "Mutex acquired by thread %s",
+ pj_thread_this()->obj_name));
+
+#if PJ_DEBUG
+ mutex->owner = pj_thread_this();
+ ++mutex->nesting_level;
+#endif
+ }
+ return status;
+}
+
+/*
+ * pj_mutex_destroy()
+ */
+PJ_DEF(pj_status_t) pj_mutex_destroy(pj_mutex_t *mutex)
+{
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(mutex, PJ_EINVAL);
+
+ PJ_LOG(6,(mutex->obj_name, "Mutex destroyed"));
+
+#if PJ_WIN32_WINNT >= 0x0400
+ DeleteCriticalSection(&mutex->crit);
+ return PJ_SUCCESS;
+#else
+ return CloseHandle(mutex->hMutex) ? PJ_SUCCESS :
+ PJ_RETURN_OS_ERROR(GetLastError());
+#endif
+}
+
+#if PJ_DEBUG
+/*
+ * pj_mutex_is_locked()
+ */
+PJ_DEF(pj_bool_t) pj_mutex_is_locked(pj_mutex_t *mutex)
+{
+ return mutex->owner == pj_thread_this();
+}
+#endif
+
+///////////////////////////////////////////////////////////////////////////////
+/*
+ * pj_enter_critical_section()
+ */
+PJ_DEF(void) pj_enter_critical_section(void)
+{
+ pj_mutex_lock(&critical_section_mutex);
+}
+
+
+/*
+ * pj_leave_critical_section()
+ */
+PJ_DEF(void) pj_leave_critical_section(void)
+{
+ pj_mutex_unlock(&critical_section_mutex);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+#if defined(PJ_HAS_SEMAPHORE) && PJ_HAS_SEMAPHORE != 0
+
+/*
+ * pj_sem_create()
+ */
+PJ_DEF(pj_status_t) pj_sem_create( pj_pool_t *pool,
+ const char *name,
+ unsigned initial,
+ unsigned max,
+ pj_sem_t **sem_ptr)
+{
+ pj_sem_t *sem;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(pool && sem_ptr, PJ_EINVAL);
+
+ sem = pj_pool_alloc(pool, sizeof(*sem));
+ sem->hSemaphore = CreateSemaphore(NULL, initial, max, NULL);
+ if (!sem->hSemaphore)
+ return PJ_RETURN_OS_ERROR(GetLastError());
+
+ /* Set name. */
+ if (!name) {
+ name = "sem%p";
+ }
+ if (strchr(name, '%')) {
+ pj_snprintf(sem->obj_name, PJ_MAX_OBJ_NAME, name, sem);
+ } else {
+ strncpy(sem->obj_name, name, PJ_MAX_OBJ_NAME);
+ sem->obj_name[PJ_MAX_OBJ_NAME-1] = '\0';
+ }
+
+ PJ_LOG(6, (sem->obj_name, "Semaphore created"));
+
+ *sem_ptr = sem;
+ return PJ_SUCCESS;
+}
+
+static pj_status_t pj_sem_wait_for(pj_sem_t *sem, unsigned timeout)
+{
+ DWORD result;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(sem, PJ_EINVAL);
+
+ PJ_LOG(6, (sem->obj_name, "Semaphore: thread %s is waiting",
+ pj_thread_this()->obj_name));
+
+ result = WaitForSingleObject(sem->hSemaphore, timeout);
+ if (result == WAIT_OBJECT_0) {
+ PJ_LOG(6, (sem->obj_name, "Semaphore acquired by thread %s",
+ pj_thread_this()->obj_name));
+ } else {
+ PJ_LOG(6, (sem->obj_name, "Semaphore: thread %s FAILED to acquire",
+ pj_thread_this()->obj_name));
+ }
+
+ if (result==WAIT_OBJECT_0)
+ return PJ_SUCCESS;
+ else if (result==WAIT_TIMEOUT)
+ return PJ_ETIMEDOUT;
+ else
+ return PJ_RETURN_OS_ERROR(GetLastError());
+}
+
+/*
+ * pj_sem_wait()
+ */
+PJ_DEF(pj_status_t) pj_sem_wait(pj_sem_t *sem)
+{
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(sem, PJ_EINVAL);
+
+ return pj_sem_wait_for(sem, INFINITE);
+}
+
+/*
+ * pj_sem_trywait()
+ */
+PJ_DEF(pj_status_t) pj_sem_trywait(pj_sem_t *sem)
+{
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(sem, PJ_EINVAL);
+
+ return pj_sem_wait_for(sem, 0);
+}
+
+/*
+ * pj_sem_post()
+ */
+PJ_DEF(pj_status_t) pj_sem_post(pj_sem_t *sem)
+{
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(sem, PJ_EINVAL);
+
+ PJ_LOG(6, (sem->obj_name, "Semaphore released by thread %s",
+ pj_thread_this()->obj_name));
+
+ if (ReleaseSemaphore(sem->hSemaphore, 1, NULL))
+ return PJ_SUCCESS;
+ else
+ return PJ_RETURN_OS_ERROR(GetLastError());
+}
+
+/*
+ * pj_sem_destroy()
+ */
+PJ_DEF(pj_status_t) pj_sem_destroy(pj_sem_t *sem)
+{
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(sem, PJ_EINVAL);
+
+ PJ_LOG(6, (sem->obj_name, "Semaphore destroyed by thread %s",
+ pj_thread_this()->obj_name));
+
+ if (CloseHandle(sem->hSemaphore))
+ return PJ_SUCCESS;
+ else
+ return PJ_RETURN_OS_ERROR(GetLastError());
+}
+
+#endif /* PJ_HAS_SEMAPHORE */
+///////////////////////////////////////////////////////////////////////////////
+
+
+#if defined(PJ_HAS_EVENT_OBJ) && PJ_HAS_EVENT_OBJ != 0
+
+/*
+ * pj_event_create()
+ */
+PJ_DEF(pj_status_t) pj_event_create( pj_pool_t *pool,
+ const char *name,
+ pj_bool_t manual_reset,
+ pj_bool_t initial,
+ pj_event_t **event_ptr)
+{
+ pj_event_t *event;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(pool && event_ptr, PJ_EINVAL);
+
+ event = pj_pool_alloc(pool, sizeof(*event));
+ if (!event)
+ return PJ_ENOMEM;
+
+ event->hEvent = CreateEvent(NULL, manual_reset?TRUE:FALSE,
+ initial?TRUE:FALSE, NULL);
+
+ if (!event->hEvent)
+ return PJ_RETURN_OS_ERROR(GetLastError());
+
+ /* Set name. */
+ if (!name) {
+ name = "evt%p";
+ }
+ if (strchr(name, '%')) {
+ pj_snprintf(event->obj_name, PJ_MAX_OBJ_NAME, name, event);
+ } else {
+ strncpy(event->obj_name, name, PJ_MAX_OBJ_NAME);
+ event->obj_name[PJ_MAX_OBJ_NAME-1] = '\0';
+ }
+
+ PJ_LOG(6, (event->obj_name, "Event created"));
+
+ *event_ptr = event;
+ return PJ_SUCCESS;
+}
+
+static pj_status_t pj_event_wait_for(pj_event_t *event, unsigned timeout)
+{
+ DWORD result;
+
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(event, PJ_EINVAL);
+
+ PJ_LOG(6, (event->obj_name, "Event: thread %s is waiting",
+ pj_thread_this()->obj_name));
+
+ result = WaitForSingleObject(event->hEvent, timeout);
+ if (result == WAIT_OBJECT_0) {
+ PJ_LOG(6, (event->obj_name, "Event: thread %s is released",
+ pj_thread_this()->obj_name));
+ } else {
+ PJ_LOG(6, (event->obj_name, "Event: thread %s FAILED to acquire",
+ pj_thread_this()->obj_name));
+ }
+
+ if (result==WAIT_OBJECT_0)
+ return PJ_SUCCESS;
+ else if (result==WAIT_TIMEOUT)
+ return PJ_ETIMEDOUT;
+ else
+ return PJ_RETURN_OS_ERROR(GetLastError());
+}
+
+/*
+ * pj_event_wait()
+ */
+PJ_DEF(pj_status_t) pj_event_wait(pj_event_t *event)
+{
+ PJ_ASSERT_RETURN(event, PJ_EINVAL);
+
+ return pj_event_wait_for(event, INFINITE);
+}
+
+/*
+ * pj_event_trywait()
+ */
+PJ_DEF(pj_status_t) pj_event_trywait(pj_event_t *event)
+{
+ PJ_ASSERT_RETURN(event, PJ_EINVAL);
+
+ return pj_event_wait_for(event, 0);
+}
+
+/*
+ * pj_event_set()
+ */
+PJ_DEF(pj_status_t) pj_event_set(pj_event_t *event)
+{
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(event, PJ_EINVAL);
+
+ PJ_LOG(6, (event->obj_name, "Setting event"));
+
+ if (SetEvent(event->hEvent))
+ return PJ_SUCCESS;
+ else
+ return PJ_RETURN_OS_ERROR(GetLastError());
+}
+
+/*
+ * pj_event_pulse()
+ */
+PJ_DEF(pj_status_t) pj_event_pulse(pj_event_t *event)
+{
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(event, PJ_EINVAL);
+
+ PJ_LOG(6, (event->obj_name, "Pulsing event"));
+
+ if (PulseEvent(event->hEvent))
+ return PJ_SUCCESS;
+ else
+ return PJ_RETURN_OS_ERROR(GetLastError());
+}
+
+/*
+ * pj_event_reset()
+ */
+PJ_DEF(pj_status_t) pj_event_reset(pj_event_t *event)
+{
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(event, PJ_EINVAL);
+
+ PJ_LOG(6, (event->obj_name, "Event is reset"));
+
+ if (ResetEvent(event->hEvent))
+ return PJ_SUCCESS;
+ else
+ return PJ_RETURN_OS_ERROR(GetLastError());
+}
+
+/*
+ * pj_event_destroy()
+ */
+PJ_DEF(pj_status_t) pj_event_destroy(pj_event_t *event)
+{
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(event, PJ_EINVAL);
+
+ PJ_LOG(6, (event->obj_name, "Event is destroying"));
+
+ if (CloseHandle(event->hEvent))
+ return PJ_SUCCESS;
+ else
+ return PJ_RETURN_OS_ERROR(GetLastError());
+}
+
+#endif /* PJ_HAS_EVENT_OBJ */
+
+///////////////////////////////////////////////////////////////////////////////
+#if defined(PJ_TERM_HAS_COLOR) && PJ_TERM_HAS_COLOR != 0
+/*
+ * Terminal color
+ */
+
+static WORD pj_color_to_os_attr(pj_color_t color)
+{
+ WORD attr = 0;
+
+ if (color & PJ_TERM_COLOR_R)
+ attr |= FOREGROUND_RED;
+ if (color & PJ_TERM_COLOR_G)
+ attr |= FOREGROUND_GREEN;
+ if (color & PJ_TERM_COLOR_B)
+ attr |= FOREGROUND_BLUE;
+ if (color & PJ_TERM_COLOR_BRIGHT)
+ attr |= FOREGROUND_INTENSITY;
+
+ return attr;
+}
+
+static pj_color_t os_attr_to_pj_color(WORD attr)
+{
+ int color = 0;
+
+ if (attr & FOREGROUND_RED)
+ color |= PJ_TERM_COLOR_R;
+ if (attr & FOREGROUND_GREEN)
+ color |= PJ_TERM_COLOR_G;
+ if (attr & FOREGROUND_BLUE)
+ color |= PJ_TERM_COLOR_B;
+ if (attr & FOREGROUND_INTENSITY)
+ color |= PJ_TERM_COLOR_BRIGHT;
+
+ return color;
+}
+
+
+/*
+ * pj_term_set_color()
+ */
+PJ_DEF(pj_status_t) pj_term_set_color(pj_color_t color)
+{
+ BOOL rc;
+ WORD attr = 0;
+
+ PJ_CHECK_STACK();
+
+ attr = pj_color_to_os_attr(color);
+ rc = SetConsoleTextAttribute( GetStdHandle(STD_OUTPUT_HANDLE), attr);
+ return rc ? PJ_SUCCESS : PJ_RETURN_OS_ERROR(GetLastError());
+}
+
+/*
+ * pj_term_get_color()
+ * Get current terminal foreground color.
+ */
+PJ_DEF(pj_color_t) pj_term_get_color(void)
+{
+ CONSOLE_SCREEN_BUFFER_INFO info;
+
+ PJ_CHECK_STACK();
+
+ GetConsoleScreenBufferInfo( GetStdHandle(STD_OUTPUT_HANDLE), &info);
+ return os_attr_to_pj_color(info.wAttributes);
+}
+
+#endif /* PJ_TERM_HAS_COLOR */
diff --git a/pjlib/src/pj/os_error_linux_kernel.c b/pjlib/src/pj/os_error_linux_kernel.c
new file mode 100644
index 00000000..4c83b491
--- /dev/null
+++ b/pjlib/src/pj/os_error_linux_kernel.c
@@ -0,0 +1,73 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/os_error_linux_kernel.c 2 10/29/05 11:51a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pj/os_error_linux_kernel.c $
+ *
+ * 2 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 1 10/19/05 1:48p Bennylp
+ * Created.
+ *
+ */
+#include <pj/string.h>
+#include <pj/compat/errno.h>
+#include <linux/config.h>
+#include <linux/version.h>
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+int kernel_errno;
+
+PJ_DEF(pj_status_t) pj_get_os_error(void)
+{
+ return errno;
+}
+
+PJ_DEF(void) pj_set_os_error(pj_status_t code)
+{
+ errno = code;
+}
+
+PJ_DEF(pj_status_t) pj_get_netos_error(void)
+{
+ return errno;
+}
+
+PJ_DEF(void) pj_set_netos_error(pj_status_t code)
+{
+ errno = code;
+}
+
+/*
+ * platform_strerror()
+ *
+ * Platform specific error message. This file is called by pj_strerror()
+ * in errno.c
+ */
+int platform_strerror( pj_os_err_type os_errcode,
+ char *buf, pj_size_t bufsize)
+{
+ char errmsg[32];
+ int len;
+
+ /* Handle EINVAL as special case so that it'll pass errno test. */
+ if (os_errcode==EINVAL)
+ strcpy(errmsg, "Invalid value");
+ else
+ sprintf(errmsg, "errno=%d", os_errcode);
+
+ len = strlen(errmsg);
+
+ if (len >= bufsize)
+ len = bufsize-1;
+
+ pj_memcpy(buf, errmsg, len);
+ buf[len] = '\0';
+
+ return len;
+}
+
+
diff --git a/pjlib/src/pj/os_error_unix.c b/pjlib/src/pj/os_error_unix.c
new file mode 100644
index 00000000..f526c671
--- /dev/null
+++ b/pjlib/src/pj/os_error_unix.c
@@ -0,0 +1,52 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/os_error_unix.c 1 10/14/05 12:19a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pj/os_error_unix.c $
+ *
+ * 1 10/14/05 12:19a Bennylp
+ * Created.
+ *
+ */
+#include <pj/errno.h>
+#include <pj/string.h>
+#include <errno.h>
+
+PJ_DEF(pj_status_t) pj_get_os_error(void)
+{
+ return PJ_STATUS_FROM_OS(errno);
+}
+
+PJ_DEF(void) pj_set_os_error(pj_status_t code)
+{
+ errno = PJ_STATUS_TO_OS(code);
+}
+
+PJ_DEF(pj_status_t) pj_get_netos_error(void)
+{
+ return PJ_STATUS_FROM_OS(errno);
+}
+
+PJ_DEF(void) pj_set_netos_error(pj_status_t code)
+{
+ errno = PJ_STATUS_TO_OS(code);
+}
+
+/*
+ * platform_strerror()
+ *
+ * Platform specific error message. This file is called by pj_strerror()
+ * in errno.c
+ */
+int platform_strerror( pj_os_err_type os_errcode,
+ char *buf, pj_size_t bufsize)
+{
+ const char *syserr = strerror(os_errcode);
+ pj_size_t len = syserr ? strlen(syserr) : 0;
+
+ if (len >= bufsize) len = bufsize - 1;
+ if (len > 0)
+ pj_memcpy(buf, syserr, len);
+ buf[len] = '\0';
+ return len;
+}
+
+
diff --git a/pjlib/src/pj/os_error_win32.c b/pjlib/src/pj/os_error_win32.c
new file mode 100644
index 00000000..19471fcf
--- /dev/null
+++ b/pjlib/src/pj/os_error_win32.c
@@ -0,0 +1,161 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/os_error_win32.c 3 10/14/05 12:26a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/os_error_win32.c $
+ *
+ * 3 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 2 9/21/05 1:39p Bennylp
+ * Periodic checkin for backup.
+ *
+ * 1 9/17/05 10:36a Bennylp
+ * Created.
+ *
+ */
+#include <pj/errno.h>
+#include <pj/assert.h>
+#include <pj/compat/stdarg.h>
+#include <pj/compat/sprintf.h>
+#include <pj/compat/vsprintf.h>
+#include <pj/string.h>
+
+
+#if defined(PJ_HAS_WINSOCK2_H) && PJ_HAS_WINSOCK2_H != 0
+# include <winsock2.h>
+#elif defined(PJ_HAS_WINSOCK_H) && PJ_HAS_WINSOCK_H != 0
+# include <winsock.h>
+#endif
+
+
+/*
+ * From Apache's APR:
+ */
+static const struct {
+ pj_os_err_type code;
+ const char *msg;
+} gaErrorList[] = {
+ {WSAEINTR, "Interrupted system call"},
+ {WSAEBADF, "Bad file number"},
+ {WSAEACCES, "Permission denied"},
+ {WSAEFAULT, "Bad address"},
+ {WSAEINVAL, "Invalid argument"},
+ {WSAEMFILE, "Too many open sockets"},
+ {WSAEWOULDBLOCK, "Operation would block"},
+ {WSAEINPROGRESS, "Operation now in progress"},
+ {WSAEALREADY, "Operation already in progress"},
+ {WSAENOTSOCK, "Socket operation on non-socket"},
+ {WSAEDESTADDRREQ, "Destination address required"},
+ {WSAEMSGSIZE, "Message too long"},
+ {WSAEPROTOTYPE, "Protocol wrong type for socket"},
+ {WSAENOPROTOOPT, "Bad protocol option"},
+ {WSAEPROTONOSUPPORT, "Protocol not supported"},
+ {WSAESOCKTNOSUPPORT, "Socket type not supported"},
+ {WSAEOPNOTSUPP, "Operation not supported on socket"},
+ {WSAEPFNOSUPPORT, "Protocol family not supported"},
+ {WSAEAFNOSUPPORT, "Address family not supported"},
+ {WSAEADDRINUSE, "Address already in use"},
+ {WSAEADDRNOTAVAIL, "Can't assign requested address"},
+ {WSAENETDOWN, "Network is down"},
+ {WSAENETUNREACH, "Network is unreachable"},
+ {WSAENETRESET, "Net connection reset"},
+ {WSAECONNABORTED, "Software caused connection abort"},
+ {WSAECONNRESET, "Connection reset by peer"},
+ {WSAENOBUFS, "No buffer space available"},
+ {WSAEISCONN, "Socket is already connected"},
+ {WSAENOTCONN, "Socket is not connected"},
+ {WSAESHUTDOWN, "Can't send after socket shutdown"},
+ {WSAETOOMANYREFS, "Too many references, can't splice"},
+ {WSAETIMEDOUT, "Connection timed out"},
+ {WSAECONNREFUSED, "Connection refused"},
+ {WSAELOOP, "Too many levels of symbolic links"},
+ {WSAENAMETOOLONG, "File name too long"},
+ {WSAEHOSTDOWN, "Host is down"},
+ {WSAEHOSTUNREACH, "No route to host"},
+ {WSAENOTEMPTY, "Directory not empty"},
+ {WSAEPROCLIM, "Too many processes"},
+ {WSAEUSERS, "Too many users"},
+ {WSAEDQUOT, "Disc quota exceeded"},
+ {WSAESTALE, "Stale NFS file handle"},
+ {WSAEREMOTE, "Too many levels of remote in path"},
+ {WSASYSNOTREADY, "Network system is unavailable"},
+ {WSAVERNOTSUPPORTED, "Winsock version out of range"},
+ {WSANOTINITIALISED, "WSAStartup not yet called"},
+ {WSAEDISCON, "Graceful shutdown in progress"},
+ {WSAHOST_NOT_FOUND, "Host not found"},
+ {WSANO_DATA, "No host data of that type was found"},
+ {0, NULL}
+};
+
+
+PJ_DEF(pj_status_t) pj_get_os_error(void)
+{
+ return PJ_STATUS_FROM_OS(GetLastError());
+}
+
+PJ_DEF(void) pj_set_os_error(pj_status_t code)
+{
+ SetLastError(PJ_STATUS_TO_OS(code));
+}
+
+PJ_DEF(pj_status_t) pj_get_netos_error(void)
+{
+ return PJ_STATUS_FROM_OS(WSAGetLastError());
+}
+
+PJ_DEF(void) pj_set_netos_error(pj_status_t code)
+{
+ WSASetLastError(PJ_STATUS_TO_OS(code));
+}
+
+/*
+ * platform_strerror()
+ *
+ * Platform specific error message. This file is called by pj_strerror()
+ * in errno.c
+ */
+int platform_strerror( pj_os_err_type os_errcode,
+ char *buf, pj_size_t bufsize)
+{
+ int len;
+
+ pj_assert(buf != NULL);
+ pj_assert(bufsize >= 0);
+
+ /*
+ * MUST NOT check stack here.
+ * This function might be called from PJ_CHECK_STACK() itself!
+ //PJ_CHECK_STACK();
+ */
+
+ len = FormatMessage( FORMAT_MESSAGE_FROM_SYSTEM
+ | FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL,
+ os_errcode,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPTSTR)buf,
+ (DWORD)bufsize,
+ NULL);
+
+ if (!len) {
+ int i;
+ for (i = 0; gaErrorList[i].msg; ++i) {
+ if (gaErrorList[i].code == os_errcode) {
+ len = strlen(gaErrorList[i].msg);
+ if ((pj_size_t)len >= bufsize) {
+ len = bufsize-1;
+ }
+ pj_memcpy(buf, gaErrorList[i].msg, len);
+ buf[len] = '\0';
+ break;
+ }
+ }
+ }
+
+ if (!len) {
+ len = snprintf( buf, bufsize, "Unknown native error %u", (unsigned)os_errcode);
+ buf[len] = '\0';
+ }
+
+ return len;
+}
+
diff --git a/pjlib/src/pj/os_time_ansi.c b/pjlib/src/pj/os_time_ansi.c
new file mode 100644
index 00000000..906b21d6
--- /dev/null
+++ b/pjlib/src/pj/os_time_ansi.c
@@ -0,0 +1,65 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/os_time_ansi.c 2 10/14/05 12:26a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/os_time_ansi.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 9/17/05 10:36a Bennylp
+ * Created.
+ *
+ */
+#include <pj/os.h>
+#include <pj/compat/time.h>
+
+///////////////////////////////////////////////////////////////////////////////
+
+PJ_DEF(pj_status_t) pj_gettimeofday(pj_time_val *tv)
+{
+ struct timeb tb;
+
+ PJ_CHECK_STACK();
+
+ ftime(&tb);
+ tv->sec = tb.time;
+ tv->msec = tb.millitm;
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_time_decode(const pj_time_val *tv, pj_parsed_time *pt)
+{
+ struct tm *local_time;
+
+ PJ_CHECK_STACK();
+
+ local_time = localtime((time_t*)&tv->sec);
+
+ pt->year = local_time->tm_year+1900;
+ pt->mon = local_time->tm_mon;
+ pt->day = local_time->tm_mday;
+ pt->hour = local_time->tm_hour;
+ pt->min = local_time->tm_min;
+ pt->sec = local_time->tm_sec;
+ pt->wday = local_time->tm_wday;
+ pt->yday = local_time->tm_yday;
+ pt->msec = tv->msec;
+
+ return PJ_SUCCESS;
+}
+
+/**
+ * Encode parsed time to time value.
+ */
+PJ_DEF(pj_status_t) pj_time_encode(const pj_parsed_time *pt, pj_time_val *tv);
+
+/**
+ * Convert local time to GMT.
+ */
+PJ_DEF(pj_status_t) pj_time_local_to_gmt(pj_time_val *tv);
+
+/**
+ * Convert GMT to local time.
+ */
+PJ_DEF(pj_status_t) pj_time_gmt_to_local(pj_time_val *tv);
+
+
diff --git a/pjlib/src/pj/os_time_linux_kernel.c b/pjlib/src/pj/os_time_linux_kernel.c
new file mode 100644
index 00000000..4d5f4cb4
--- /dev/null
+++ b/pjlib/src/pj/os_time_linux_kernel.c
@@ -0,0 +1,58 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/os_time_linux_kernel.c 2 10/14/05 12:26a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/os_time_linux_kernel.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 9/22/05 10:39a Bennylp
+ * Created.
+ *
+ */
+#include <pj/os.h>
+#include <linux/time.h>
+
+///////////////////////////////////////////////////////////////////////////////
+
+PJ_DEF(pj_status_t) pj_gettimeofday(pj_time_val *tv)
+{
+ struct timeval tval;
+
+ do_gettimeofday(&tval);
+ tv->sec = tval.tv_sec;
+ tv->msec = tval.tv_usec / 1000;
+
+ return 0;
+}
+
+PJ_DEF(pj_status_t) pj_time_decode(const pj_time_val *tv, pj_parsed_time *pt)
+{
+ pt->year = 2005;
+ pt->mon = 8;
+ pt->day = 20;
+ pt->hour = 16;
+ pt->min = 30;
+ pt->sec = 30;
+ pt->wday = 3;
+ pt->yday = 200;
+ pt->msec = 777;
+
+ return -1;
+}
+
+/**
+ * Encode parsed time to time value.
+ */
+PJ_DEF(pj_status_t) pj_time_encode(const pj_parsed_time *pt, pj_time_val *tv);
+
+/**
+ * Convert local time to GMT.
+ */
+PJ_DEF(pj_status_t) pj_time_local_to_gmt(pj_time_val *tv);
+
+/**
+ * Convert GMT to local time.
+ */
+PJ_DEF(pj_status_t) pj_time_gmt_to_local(pj_time_val *tv);
+
+
diff --git a/pjlib/src/pj/os_timestamp_common.c b/pjlib/src/pj/os_timestamp_common.c
new file mode 100644
index 00000000..630ffb27
--- /dev/null
+++ b/pjlib/src/pj/os_timestamp_common.c
@@ -0,0 +1,129 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/os_timestamp_common.c 2 10/14/05 12:26a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pj/os_timestamp_common.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/09/05 2:56p Bennylp
+ * Created.
+ *
+ */
+#include <pj/os.h>
+#include <pj/compat/high_precision.h>
+
+#if defined(PJ_HAS_HIGH_RES_TIMER) && PJ_HAS_HIGH_RES_TIMER != 0
+
+#define U32MAX (0xFFFFFFFFUL)
+#define NANOSEC (1000000000UL)
+#define USEC (1000000UL)
+#define MSEC (1000)
+
+static pj_highprec_t get_elapsed( const pj_timestamp *start,
+ const pj_timestamp *stop )
+{
+ pj_highprec_t elapsed_hi, elapsed_lo;
+
+ elapsed_hi = stop->u32.hi - start->u32.hi;
+ elapsed_lo = stop->u32.lo - start->u32.lo;
+
+ /* elapsed_hi = elapsed_hi * U32MAX */
+ pj_highprec_mul(elapsed_hi, U32MAX);
+
+ return elapsed_hi + elapsed_lo;
+}
+
+static pj_highprec_t elapsed_usec( const pj_timestamp *start,
+ const pj_timestamp *stop )
+{
+ pj_timestamp ts_freq;
+ pj_highprec_t freq, elapsed;
+
+ if (pj_get_timestamp_freq(&ts_freq) != PJ_SUCCESS)
+ return 0;
+
+ /* Convert frequency timestamp */
+ freq = ts_freq.u32.hi;
+ pj_highprec_mul(freq, U32MAX);
+ freq += ts_freq.u32.lo;
+
+ /* Avoid division by zero. */
+ if (freq == 0) freq = 1;
+
+ /* Get elapsed time in cycles. */
+ elapsed = get_elapsed(start, stop);
+
+ /* usec = elapsed * USEC / freq */
+ pj_highprec_mul(elapsed, USEC);
+ pj_highprec_div(elapsed, freq);
+
+ return elapsed;
+}
+
+PJ_DEF(pj_uint32_t) pj_elapsed_nanosec( const pj_timestamp *start,
+ const pj_timestamp *stop )
+{
+ pj_timestamp ts_freq;
+ pj_highprec_t freq, elapsed;
+
+ if (pj_get_timestamp_freq(&ts_freq) != PJ_SUCCESS)
+ return 0;
+
+ /* Convert frequency timestamp */
+ freq = ts_freq.u32.hi;
+ pj_highprec_mul(freq, U32MAX);
+ freq += ts_freq.u32.lo;
+
+ /* Avoid division by zero. */
+ if (freq == 0) freq = 1;
+
+ /* Get elapsed time in cycles. */
+ elapsed = get_elapsed(start, stop);
+
+ /* usec = elapsed * USEC / freq */
+ pj_highprec_mul(elapsed, NANOSEC);
+ pj_highprec_div(elapsed, freq);
+
+ return (pj_uint32_t)elapsed;
+}
+
+PJ_DEF(pj_uint32_t) pj_elapsed_usec( const pj_timestamp *start,
+ const pj_timestamp *stop )
+{
+ return (pj_uint32_t)elapsed_usec(start, stop);
+}
+
+PJ_DEF(pj_time_val) pj_elapsed_time( const pj_timestamp *start,
+ const pj_timestamp *stop )
+{
+ pj_highprec_t elapsed = elapsed_usec(start, stop);
+ pj_time_val tv_elapsed;
+
+ if (PJ_HIGHPREC_VALUE_IS_ZERO(elapsed)) {
+ tv_elapsed.sec = tv_elapsed.msec = 0;
+ return tv_elapsed;
+ } else {
+ pj_highprec_t sec, msec;
+
+ sec = elapsed;
+ pj_highprec_div(sec, USEC);
+ tv_elapsed.sec = (long)sec;
+
+ msec = elapsed;
+ pj_highprec_mod(msec, USEC);
+ pj_highprec_div(msec, 1000);
+ tv_elapsed.msec = (long)msec;
+
+ return tv_elapsed;
+ }
+}
+
+PJ_DEF(pj_uint32_t) pj_elapsed_cycle( const pj_timestamp *start,
+ const pj_timestamp *stop )
+{
+ return stop->u32.lo - start->u32.lo;
+}
+
+#endif /* PJ_HAS_HIGH_RES_TIMER */
+
diff --git a/pjlib/src/pj/os_timestamp_linux.c b/pjlib/src/pj/os_timestamp_linux.c
new file mode 100644
index 00000000..52639dcd
--- /dev/null
+++ b/pjlib/src/pj/os_timestamp_linux.c
@@ -0,0 +1,137 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/os_timestamp_linux.c 4 10/29/05 10:27p Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/os_timestamp_linux.c $
+ *
+ * 4 10/29/05 10:27p Bennylp
+ * Fixed misc warnings.
+ *
+ * 3 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 2 9/21/05 1:39p Bennylp
+ * Periodic checkin for backup.
+ *
+ * 1 9/18/05 9:25p Bennylp
+ * Created.
+ *
+ */
+#include <pj/os.h>
+#include <pj/errno.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <ctype.h>
+
+#if defined(PJ_HAS_PENTIUM) && PJ_HAS_PENTIUM!=0
+static int machine_speed_mhz;
+static pj_timestamp machine_speed;
+
+static __inline__ unsigned long long int rdtsc()
+{
+ unsigned long long int x;
+ __asm__ volatile (".byte 0x0f, 0x31" : "=A" (x));
+ return x;
+}
+
+/* Determine machine's CPU MHz to get the counter's frequency.
+ */
+static int get_machine_speed_mhz()
+{
+ FILE *strm;
+ char buf[512];
+ int len;
+ char *pos, *end;
+
+ PJ_CHECK_STACK();
+
+ /* Open /proc/cpuinfo and read the file */
+ strm = fopen("/proc/cpuinfo", "r");
+ if (!strm)
+ return -1;
+ len = fread(buf, 1, sizeof(buf), strm);
+ fclose(strm);
+ if (len < 1) {
+ return -1;
+ }
+ buf[len] = '\0';
+
+ /* Locate the MHz digit. */
+ pos = strstr(buf, "cpu MHz");
+ if (!pos)
+ return -1;
+ pos = strchr(pos, ':');
+ if (!pos)
+ return -1;
+ end = (pos += 2);
+ while (isdigit(*end)) ++end;
+ *end = '\0';
+
+ /* Return the Mhz part, and give it a +1. */
+ return atoi(pos)+1;
+}
+
+PJ_DEF(pj_status_t) pj_get_timestamp(pj_timestamp *ts)
+{
+ if (machine_speed_mhz == 0) {
+ machine_speed_mhz = get_machine_speed_mhz();
+ if (machine_speed_mhz > 0) {
+ machine_speed.u64 = machine_speed_mhz * 1000000.0;
+ }
+ }
+
+ if (machine_speed_mhz == -1) {
+ ts->u64 = 0;
+ return -1;
+ }
+ ts->u64 = rdtsc();
+ return 0;
+}
+
+PJ_DEF(pj_status_t) pj_get_timestamp_freq(pj_timestamp *freq)
+{
+ if (machine_speed_mhz == 0) {
+ machine_speed_mhz = get_machine_speed_mhz();
+ if (machine_speed_mhz > 0) {
+ machine_speed.u64 = machine_speed_mhz * 1000000.0;
+ }
+ }
+
+ if (machine_speed_mhz == -1) {
+ freq->u64 = 1; /* return 1 to prevent division by zero in apps. */
+ return -1;
+ }
+
+ freq->u64 = machine_speed.u64;
+ return 0;
+}
+
+#else
+#include <sys/time.h>
+#include <errno.h>
+
+#define USEC_PER_SEC 1000000
+
+PJ_DEF(pj_status_t) pj_get_timestamp(pj_timestamp *ts)
+{
+ struct timeval tv;
+
+ if (gettimeofday(&tv, NULL) != 0) {
+ return PJ_RETURN_OS_ERROR(pj_get_native_os_error());
+ }
+
+ ts->u64 = tv.tv_sec;
+ ts->u64 *= USEC_PER_SEC;
+ ts->u64 += tv.tv_usec;
+
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_get_timestamp_freq(pj_timestamp *freq)
+{
+ freq->u32.hi = 0;
+ freq->u32.lo = USEC_PER_SEC;
+
+ return PJ_SUCCESS;
+}
+
+#endif
+
diff --git a/pjlib/src/pj/os_timestamp_linux_kernel.c b/pjlib/src/pj/os_timestamp_linux_kernel.c
new file mode 100644
index 00000000..8895cf9d
--- /dev/null
+++ b/pjlib/src/pj/os_timestamp_linux_kernel.c
@@ -0,0 +1,70 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/os_timestamp_linux_kernel.c 2 10/29/05 11:51a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/os_timestamp_linux_kernel.c $
+ *
+ * 2 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 1 9/22/05 10:39a Bennylp
+ * Created.
+ *
+ */
+#include <pj/os.h>
+#include <linux/time.h>
+
+#if 0
+PJ_DEF(pj_status_t) pj_get_timestamp(pj_timestamp *ts)
+{
+ ts->u32.hi = 0;
+ ts->u32.lo = jiffies;
+ return 0;
+}
+
+PJ_DEF(pj_status_t) pj_get_timestamp_freq(pj_timestamp *freq)
+{
+ freq->u32.hi = 0;
+ freq->u32.lo = HZ;
+ return 0;
+}
+#elif 0
+PJ_DEF(pj_status_t) pj_get_timestamp(pj_timestamp *ts)
+{
+ struct timespec tv;
+
+ tv = CURRENT_TIME;
+
+ ts->u64 = tv.tv_sec;
+ ts->u64 *= NSEC_PER_SEC;
+ ts->u64 += tv.tv_nsec;
+
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_get_timestamp_freq(pj_timestamp *freq)
+{
+ freq->u32.hi = 0;
+ freq->u32.lo = NSEC_PER_SEC;
+ return 0;
+}
+#else
+PJ_DEF(pj_status_t) pj_get_timestamp(pj_timestamp *ts)
+{
+ struct timeval tv;
+
+ do_gettimeofday(&tv);
+
+ ts->u64 = tv.tv_sec;
+ ts->u64 *= USEC_PER_SEC;
+ ts->u64 += tv.tv_usec;
+
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_get_timestamp_freq(pj_timestamp *freq)
+{
+ freq->u32.hi = 0;
+ freq->u32.lo = USEC_PER_SEC;
+ return 0;
+}
+
+#endif
+
diff --git a/pjlib/src/pj/os_timestamp_win32.c b/pjlib/src/pj/os_timestamp_win32.c
new file mode 100644
index 00000000..787c4bfc
--- /dev/null
+++ b/pjlib/src/pj/os_timestamp_win32.c
@@ -0,0 +1,38 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/os_timestamp_win32.c 2 10/14/05 12:26a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pj/os_timestamp_win32.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 9/18/05 8:15p Bennylp
+ * Created.
+ *
+ */
+#include <pj/os.h>
+#include <pj/errno.h>
+#include <windows.h>
+
+PJ_DEF(pj_status_t) pj_get_timestamp(pj_timestamp *ts)
+{
+ LARGE_INTEGER val;
+
+ if (!QueryPerformanceCounter(&val))
+ return PJ_RETURN_OS_ERROR(GetLastError());
+
+ ts->u64 = val.QuadPart;
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_status_t) pj_get_timestamp_freq(pj_timestamp *freq)
+{
+ LARGE_INTEGER val;
+
+ if (!QueryPerformanceFrequency(&val))
+ return PJ_RETURN_OS_ERROR(GetLastError());
+
+ freq->u64 = val.QuadPart;
+ return PJ_SUCCESS;
+}
+
diff --git a/pjlib/src/pj/pool.c b/pjlib/src/pj/pool.c
new file mode 100644
index 00000000..3bf195b0
--- /dev/null
+++ b/pjlib/src/pj/pool.c
@@ -0,0 +1,265 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/pool.c 8 10/14/05 12:26a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/pool.c $
+ *
+ * 8 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 7 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/pool.h>
+#include <pj/log.h>
+#include <pj/except.h>
+#include <pj/assert.h>
+#include <pj/os.h>
+#include <pj/compat/sprintf.h>
+
+/* Include inline definitions when inlining is disabled. */
+#if !PJ_FUNCTIONS_ARE_INLINED
+# include <pj/pool_i.h>
+#endif
+
+#define LOG(expr) PJ_LOG(5,expr)
+
+int PJ_NO_MEMORY_EXCEPTION;
+
+/*
+ * Create new block.
+ * Create a new big chunk of memory block, from which user allocation will be
+ * taken from.
+ */
+static pj_pool_block *pj_pool_create_block( pj_pool_t *pool, pj_size_t size)
+{
+ pj_pool_block *block;
+
+ PJ_CHECK_STACK();
+ pj_assert(size >= sizeof(pj_pool_block));
+
+ LOG((pool->obj_name, "create_block(sz=%u), cur.cap=%u, cur.used=%u",
+ size, pool->capacity, pool->used_size));
+
+ /* Request memory from allocator. */
+ block = (pj_pool_block*)
+ (*pool->factory->policy.block_alloc)(pool->factory, size);
+ if (block == NULL) {
+ (*pool->callback)(pool, size);
+ return NULL;
+ }
+
+ /* Add capacity. */
+ pool->capacity += size;
+ pool->used_size += sizeof(pj_pool_block);
+
+ /* Set block attribytes. */
+ block->cur = block->buf = ((unsigned char*)block) + sizeof(pj_pool_block);
+ block->end = ((unsigned char*)block) + size;
+
+ /* Insert in the front of the list. */
+ pj_list_insert_after(&pool->block_list, block);
+
+ LOG((pool->obj_name," block created, buffer=%p-%p",block->buf, block->end));
+
+ return block;
+}
+
+/*
+ * Allocate memory chunk for user from available blocks.
+ * This will iterate through block list to find space to allocate the chunk.
+ * If no space is available in all the blocks, a new block might be created
+ * (depending on whether the pool is allowed to resize).
+ */
+PJ_DEF(void*) pj_pool_allocate_find(pj_pool_t *pool, unsigned size)
+{
+ pj_pool_block *block = pool->block_list.next;
+ void *p;
+ unsigned block_size;
+
+ PJ_CHECK_STACK();
+
+ while (block != &pool->block_list) {
+ p = pj_pool_alloc_from_block(pool, block, size);
+ if (p != NULL)
+ return p;
+ block = block->next;
+ }
+ /* No available space in all blocks. */
+
+ /* If pool is configured NOT to expand, return error. */
+ if (pool->increment_size == 0) {
+ LOG((pool->obj_name, "Can't expand pool to allocate %u bytes "
+ "(used=%u, cap=%u)",
+ size, pool->used_size, pool->capacity));
+ (*pool->callback)(pool, size);
+ return NULL;
+ }
+
+ /* If pool is configured to expand, but the increment size
+ * is less than the required size, expand the pool by multiple
+ * increment size
+ */
+ if (pool->increment_size < size + sizeof(pj_pool_block)) {
+ unsigned count;
+ count = (size + pool->increment_size + sizeof(pj_pool_block)) /
+ pool->increment_size;
+ block_size = count * pool->increment_size;
+
+ } else {
+ block_size = pool->increment_size;
+ }
+
+ LOG((pool->obj_name,
+ "%u bytes requested, resizing pool by %u bytes (used=%u, cap=%u)",
+ size, block_size, pool->used_size, pool->capacity));
+
+ block = pj_pool_create_block(pool, block_size);
+ if (!block)
+ return NULL;
+
+ p = pj_pool_alloc_from_block(pool, block, size);
+ pj_assert(p != NULL);
+#if PJ_DEBUG
+ if (p == NULL) {
+ p = p;
+ }
+#endif
+ return p;
+}
+
+/*
+ * Internal function to initialize pool.
+ */
+PJ_DEF(void) pj_pool_init_int( pj_pool_t *pool,
+ const char *name,
+ pj_size_t increment_size,
+ pj_pool_callback *callback)
+{
+ pj_pool_block *block;
+
+ PJ_CHECK_STACK();
+
+ pool->increment_size = increment_size;
+ pool->callback = callback;
+ pool->used_size = sizeof(*pool);
+ block = pool->block_list.next;
+ while (block != &pool->block_list) {
+ pool->used_size += sizeof(pj_pool_block);
+ block = block->next;
+ }
+
+ if (name) {
+ if (strchr(name, '%') != NULL) {
+ sprintf(pool->obj_name, name, pool);
+ } else {
+ strncpy(pool->obj_name, name, PJ_MAX_OBJ_NAME);
+ }
+ } else {
+ pool->obj_name[0] = '\0';
+ }
+}
+
+/*
+ * Create new memory pool.
+ */
+PJ_DEF(pj_pool_t*) pj_pool_create_int( pj_pool_factory *f, const char *name,
+ pj_size_t initial_size,
+ pj_size_t increment_size,
+ pj_pool_callback *callback)
+{
+ pj_pool_t *pool;
+ pj_pool_block *block;
+ unsigned char *buffer;
+
+ PJ_CHECK_STACK();
+
+ buffer = (*f->policy.block_alloc)(f, initial_size);
+ if (!buffer)
+ return NULL;
+
+ /* Set pool administrative data. */
+ pool = (pj_pool_t*)buffer;
+ pj_memset(pool, 0, sizeof(*pool));
+
+ pj_list_init(&pool->block_list);
+ pool->factory = f;
+
+ /* Create the first block from the memory. */
+ block = (pj_pool_block*) (buffer + sizeof(*pool));
+ block->cur = block->buf = ((unsigned char*)block) + sizeof(pj_pool_block);
+ block->end = buffer + initial_size;
+ pj_list_insert_after(&pool->block_list, block);
+
+ pj_pool_init_int(pool, name, increment_size, callback);
+
+ /* Pool initial capacity and used size */
+ pool->capacity = initial_size;
+
+ LOG((pool->obj_name, "pool created, size=%u", pool->capacity));
+ return pool;
+}
+
+/*
+ * Reset the pool to the state when it was created.
+ * All blocks will be deallocated except the first block. All memory areas
+ * are marked as free.
+ */
+static void reset_pool(pj_pool_t *pool)
+{
+ pj_pool_block *block;
+
+ PJ_CHECK_STACK();
+
+ block = pool->block_list.prev;
+ if (block == &pool->block_list)
+ return;
+
+ /* Skip the first block because it is occupying the same memory
+ as the pool itself.
+ */
+ block = block->prev;
+
+ while (block != &pool->block_list) {
+ pj_pool_block *prev = block->prev;
+ pj_list_erase(block);
+ (*pool->factory->policy.block_free)(pool->factory, block,
+ block->end - (unsigned char*)block);
+ block = prev;
+ }
+
+ block = pool->block_list.next;
+ block->cur = block->buf;
+ pool->capacity = block->end - (unsigned char*)pool;
+ pool->used_size = 0;
+}
+
+/*
+ * The public function to reset pool.
+ */
+PJ_DEF(void) pj_pool_reset(pj_pool_t *pool)
+{
+ LOG((pool->obj_name, "reset(): cap=%d, used=%d(%d%%)",
+ pool->capacity, pool->used_size, pool->used_size*100/pool->capacity));
+
+ reset_pool(pool);
+}
+
+/*
+ * Destroy the pool.
+ */
+PJ_DEF(void) pj_pool_destroy_int(pj_pool_t *pool)
+{
+ pj_size_t initial_size;
+
+ LOG((pool->obj_name, "destroy(): cap=%d, used=%d(%d%%), block0=%p-%p",
+ pool->capacity, pool->used_size, pool->used_size*100/pool->capacity,
+ ((pj_pool_block*)pool->block_list.next)->buf,
+ ((pj_pool_block*)pool->block_list.next)->end));
+
+ reset_pool(pool);
+ initial_size = ((pj_pool_block*)pool->block_list.next)->end -
+ (unsigned char*)pool;
+ (*pool->factory->policy.block_free)(pool->factory, pool, initial_size);
+}
+
+
diff --git a/pjlib/src/pj/pool_caching.c b/pjlib/src/pj/pool_caching.c
new file mode 100644
index 00000000..b72b0d45
--- /dev/null
+++ b/pjlib/src/pj/pool_caching.c
@@ -0,0 +1,210 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/pool_caching.c 5 9/17/05 10:37a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/pool_caching.c $
+ *
+ * 5 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/pool.h>
+#include <pj/log.h>
+#include <pj/string.h>
+#include <pj/assert.h>
+#include <pj/os.h>
+
+static pj_pool_t* cpool_create_pool(pj_pool_factory *pf,
+ const char *name,
+ pj_size_t initial_size,
+ pj_size_t increment_sz,
+ pj_pool_callback *callback);
+static void cpool_release_pool(pj_pool_factory *pf, pj_pool_t *pool);
+static void cpool_dump_status(pj_pool_factory *factory, pj_bool_t detail );
+
+static pj_size_t pool_sizes[PJ_CACHING_POOL_ARRAY_SIZE] =
+{
+ 256, 512, 1024, 2048, 4096, 8192, 12288, 16384,
+ 20480, 24576, 28672, 32768, 40960, 49152, 57344, 65536
+};
+
+
+PJ_DEF(void) pj_caching_pool_init( pj_caching_pool *cp,
+ const pj_pool_factory_policy *policy,
+ pj_size_t max_capacity)
+{
+ int i;
+
+ PJ_CHECK_STACK();
+
+ pj_memset(cp, 0, sizeof(*cp));
+
+ cp->max_capacity = max_capacity;
+ pj_list_init(&cp->used_list);
+ for (i=0; i<PJ_CACHING_POOL_ARRAY_SIZE; ++i)
+ pj_list_init(&cp->free_list[i]);
+
+ pj_memcpy(&cp->factory.policy, policy, sizeof(pj_pool_factory_policy));
+ cp->factory.create_pool = &cpool_create_pool;
+ cp->factory.release_pool = &cpool_release_pool;
+ cp->factory.dump_status = &cpool_dump_status;
+}
+
+PJ_DEF(void) pj_caching_pool_destroy( pj_caching_pool *cp )
+{
+ int i;
+ pj_pool_t *pool;
+
+ PJ_CHECK_STACK();
+
+ /* Delete all pool in free list */
+ for (i=0; i < PJ_CACHING_POOL_ARRAY_SIZE; ++i) {
+ pj_pool_t *pool = cp->free_list[i].next;
+ pj_pool_t *next;
+ for (; pool != (void*)&cp->free_list[i]; pool = next) {
+ next = pool->next;
+ pj_list_erase(pool);
+ pj_pool_destroy_int(pool);
+ }
+ }
+
+ /* Delete all pools in used list */
+ pool = cp->used_list.next;
+ while (pool != (pj_pool_t*) &cp->used_list) {
+ pj_pool_t *next = pool->next;
+ pj_list_erase(pool);
+ pj_pool_destroy_int(pool);
+ pool = next;
+ }
+}
+
+static pj_pool_t* cpool_create_pool(pj_pool_factory *pf,
+ const char *name,
+ pj_size_t initial_size,
+ pj_size_t increment_sz,
+ pj_pool_callback *callback)
+{
+ pj_caching_pool *cp = (pj_caching_pool*)pf;
+ pj_pool_t *pool;
+ int idx;
+
+ PJ_CHECK_STACK();
+
+ /* Use pool factory's policy when callback is NULL */
+ if (callback == NULL) {
+ callback = pf->policy.callback;
+ }
+
+ /* Search the suitable size for the pool.
+ * We'll just do linear search to the size array, as the array size itself
+ * is only a few elements. Binary search I suspect will be less efficient
+ * for this purpose.
+ */
+ for (idx=0;
+ idx < PJ_CACHING_POOL_ARRAY_SIZE && pool_sizes[idx] < initial_size;
+ ++idx)
+ ;
+
+ /* Check whether there's a pool in the list. */
+ if (idx==PJ_CACHING_POOL_ARRAY_SIZE || pj_list_empty(&cp->free_list[idx])) {
+ /* No pool is available. */
+ /* Set minimum size. */
+ if (idx < PJ_CACHING_POOL_ARRAY_SIZE)
+ initial_size = pool_sizes[idx];
+
+ /* Create new pool */
+ pool = pj_pool_create_int(&cp->factory, name, initial_size,
+ increment_sz, callback);
+ if (!pool)
+ return NULL;
+
+ } else {
+ /* Get one pool from the list. */
+ pool = cp->free_list[idx].next;
+ pj_list_erase(pool);
+
+ /* Initialize the pool. */
+ pj_pool_init_int(pool, name, increment_sz, callback);
+
+ /* Update pool manager's free capacity. */
+ cp->capacity -= pj_pool_get_capacity(pool);
+
+ PJ_LOG(5, (pool->obj_name, "pool reused, size=%u", pool->capacity));
+ }
+
+ /* Put in used list. */
+ pj_list_insert_before( &cp->used_list, pool );
+
+ /* Increment used count. */
+ ++cp->used_count;
+ return pool;
+}
+
+static void cpool_release_pool( pj_pool_factory *pf, pj_pool_t *pool)
+{
+ pj_caching_pool *cp = (pj_caching_pool*)pf;
+ int i;
+
+ PJ_CHECK_STACK();
+
+ /* Erase from the used list. */
+ pj_list_erase(pool);
+
+ /* Decrement used count. */
+ --cp->used_count;
+
+ /* Destroy the pool if the size is greater than our size or if the total
+ * capacity in our recycle list (plus the size of the pool) exceeds
+ * maximum capacity.
+ . */
+ if (pool->capacity > pool_sizes[PJ_CACHING_POOL_ARRAY_SIZE-1] ||
+ cp->capacity + pool->capacity > cp->max_capacity)
+ {
+ pj_pool_destroy_int(pool);
+ return;
+ }
+
+ /* Reset pool. */
+ PJ_LOG(4, (pool->obj_name, "recycle(): cap=%d, used=%d(%d%%)",
+ pool->capacity, pool->used_size, pool->used_size*100/pool->capacity));
+ pj_pool_reset(pool);
+
+ /*
+ * Otherwise put the pool in our recycle list.
+ */
+ for (i=0; i < PJ_CACHING_POOL_ARRAY_SIZE && pool_sizes[i] != pool->capacity; ++i)
+ ;
+
+ pj_assert( i != PJ_CACHING_POOL_ARRAY_SIZE );
+ if (i == PJ_CACHING_POOL_ARRAY_SIZE) {
+ /* Something has gone wrong with the pool. */
+ pj_pool_destroy_int(pool);
+ return;
+ }
+
+ pj_list_insert_after(&cp->free_list[i], pool);
+ cp->capacity += pool->capacity;
+}
+
+static void cpool_dump_status(pj_pool_factory *factory, pj_bool_t detail )
+{
+#if PJ_LOG_MAX_LEVEL >= 3
+ pj_caching_pool *cp = (pj_caching_pool*)factory;
+ PJ_LOG(3,("cachpool", " Dumping caching pool:"));
+ PJ_LOG(3,("cachpool", " Capacity=%u, max_capacity=%u, used_cnt=%u", \
+ cp->capacity, cp->max_capacity, cp->used_count));
+ if (detail) {
+ pj_pool_t *pool = cp->used_list.next;
+ pj_uint32_t total_used = 0, total_capacity = 0;
+ PJ_LOG(3,("cachpool", " Dumping all active pools:"));
+ while (pool != (void*)&cp->used_list) {
+ PJ_LOG(3,("cachpool", " %12s: %8d of %8d (%d%%) used", pool->obj_name,
+ pool->used_size, pool->capacity,
+ pool->used_size*100/pool->capacity));
+ total_used += pool->used_size;
+ total_capacity += pool->capacity;
+ pool = pool->next;
+ }
+ PJ_LOG(3,("cachpool", " Total %9d of %9d (%d %%) used!",
+ total_used, total_capacity,
+ total_used * 100 / total_capacity));
+ }
+#endif
+}
diff --git a/pjlib/src/pj/pool_dbg_win32.c b/pjlib/src/pj/pool_dbg_win32.c
new file mode 100644
index 00000000..4419d048
--- /dev/null
+++ b/pjlib/src/pj/pool_dbg_win32.c
@@ -0,0 +1,226 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/pool_dbg_win32.c 4 9/17/05 10:37a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/pool_dbg_win32.c $
+ *
+ * 4 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/pool.h>
+
+/* Only if we ARE debugging memory allocations. */
+#if PJ_POOL_DEBUG
+
+#include <pj/list.h>
+#include <pj/log.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+typedef struct memory_entry
+{
+ PJ_DECL_LIST_MEMBER(struct memory_entry)
+ void *ptr;
+ char *file;
+ int line;
+} memory_entry;
+
+struct pj_pool_t
+{
+ char obj_name[32];
+ HANDLE hHeap;
+ memory_entry first;
+ pj_size_t initial_size;
+ pj_size_t increment;
+ pj_size_t used_size;
+ char *file;
+ int line;
+};
+
+PJ_DEF(void) pj_pool_set_functions( void *(*malloc_func)(pj_size_t),
+ void (*free_func)(void *ptr, pj_size_t))
+{
+ /* Ignored. */
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(malloc_func)
+ PJ_UNUSED_ARG(free_func)
+}
+
+PJ_DEF(pj_pool_t*) pj_pool_create_dbg( const char *name,
+ pj_size_t initial_size,
+ pj_size_t increment_size,
+ pj_pool_callback *callback,
+ char *file, int line)
+{
+ pj_pool_t *pool;
+ HANDLE hHeap;
+
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(callback)
+
+ /* Create Win32 heap for the pool. */
+ hHeap = HeapCreate(HEAP_GENERATE_EXCEPTIONS|HEAP_NO_SERIALIZE,
+ initial_size, 0);
+ if (!hHeap) {
+ return NULL;
+ }
+
+
+ /* Create and initialize the pool structure. */
+ pool = HeapAlloc(hHeap, HEAP_GENERATE_EXCEPTIONS|HEAP_NO_SERIALIZE,
+ sizeof(*pool));
+ memset(pool, 0, sizeof(*pool));
+ pool->file = file;
+ pool->line = line;
+ pool->hHeap = hHeap;
+ pool->initial_size = initial_size;
+ pool->increment = increment_size;
+ pool->used_size = 0;
+
+ /* Set name. */
+ if (name) {
+ if (strchr(name, '%') != NULL) {
+ sprintf(pool->obj_name, name, pool);
+ } else {
+ strncpy(pool->obj_name, name, PJ_MAX_OBJ_NAME);
+ }
+ } else {
+ pool->obj_name[0] = '\0';
+ }
+
+ /* List pool's entry. */
+ pj_list_init(&pool->first);
+
+ PJ_LOG(3,(pool->obj_name, "Pool created"));
+ return pool;
+}
+
+PJ_DEF(void) pj_pool_destroy( pj_pool_t *pool )
+{
+ memory_entry *entry;
+
+ PJ_CHECK_STACK();
+
+ PJ_LOG(3,(pool->obj_name, "Destoying pool, init_size=%u, used=%u",
+ pool->initial_size, pool->used_size));
+
+ if (!HeapValidate( pool->hHeap, HEAP_NO_SERIALIZE, pool)) {
+ PJ_LOG(2,(pool->obj_name, "Corrupted pool structure, allocated in %s:%d",
+ pool->file, pool->line));
+ }
+
+ /* Validate all memory entries in the pool. */
+ for (entry=pool->first.next; entry != &pool->first; entry = entry->next) {
+ if (!HeapValidate( pool->hHeap, HEAP_NO_SERIALIZE, entry)) {
+ PJ_LOG(2,(pool->obj_name, "Corrupted pool entry, allocated in %s:%d",
+ entry->file, entry->line));
+ }
+
+ if (!HeapValidate( pool->hHeap, HEAP_NO_SERIALIZE, entry->ptr)) {
+ PJ_LOG(2,(pool->obj_name, "Corrupted pool memory, allocated in %s:%d",
+ entry->file, entry->line));
+ }
+ }
+
+ /* Destroy heap. */
+ HeapDestroy(pool->hHeap);
+}
+
+PJ_DEF(void) pj_pool_reset( pj_pool_t *pool )
+{
+ /* Do nothing. */
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(pool)
+}
+
+PJ_DEF(pj_size_t) pj_pool_get_capacity( pj_pool_t *pool )
+{
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(pool)
+ return 0;
+}
+
+PJ_DEF(pj_size_t) pj_pool_get_used_size( pj_pool_t *pool )
+{
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(pool)
+ return 0;
+}
+
+PJ_DEF(pj_size_t) pj_pool_get_request_count( pj_pool_t *pool )
+{
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(pool)
+ return 0;
+}
+
+PJ_DEF(void*) pj_pool_alloc_dbg( pj_pool_t *pool, pj_size_t size,
+ char *file, int line)
+{
+ memory_entry *entry;
+ int entry_size;
+
+ PJ_CHECK_STACK();
+
+ entry_size = sizeof(*entry);
+ entry = HeapAlloc(pool->hHeap, HEAP_GENERATE_EXCEPTIONS|HEAP_NO_SERIALIZE,
+ entry_size);
+ entry->file = file;
+ entry->line = line;
+ entry->ptr = HeapAlloc(pool->hHeap, HEAP_GENERATE_EXCEPTIONS|HEAP_NO_SERIALIZE,
+ size);
+ pj_list_insert_before( &pool->first, entry);
+
+ pool->used_size += size;
+ return entry->ptr;
+}
+
+PJ_DEF(void*) pj_pool_calloc_dbg( pj_pool_t *pool, pj_size_t count, pj_size_t elem,
+ char *file, int line)
+{
+ void *ptr;
+
+ PJ_CHECK_STACK();
+
+ ptr = pj_pool_alloc_dbg(pool, count*elem, file, line);
+ memset(ptr, 0, count*elem);
+ return ptr;
+}
+
+
+PJ_DEF(void) pj_pool_pool_init( pj_pool_pool_t *pool_pool,
+ pj_size_t max_capacity)
+{
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(pool_pool)
+ PJ_UNUSED_ARG(max_capacity)
+}
+
+PJ_DEF(void) pj_pool_pool_destroy( pj_pool_pool_t *pool_pool )
+{
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(pool_pool)
+}
+
+PJ_DEF(pj_pool_t*) pj_pool_pool_create_pool( pj_pool_pool_t *pool_pool,
+ const char *name,
+ pj_size_t initial_size,
+ pj_size_t increment_size,
+ pj_pool_callback *callback)
+{
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(pool_pool)
+ return pj_pool_create(name, initial_size, increment_size, callback);
+}
+
+PJ_DEF(void) pj_pool_pool_release_pool( pj_pool_pool_t *pool_pool,
+ pj_pool_t *pool )
+{
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(pool_pool)
+ pj_pool_destroy(pool);
+}
+
+
+#endif /* PJ_POOL_DEBUG */
diff --git a/pjlib/src/pj/pool_policy_kmalloc.c b/pjlib/src/pj/pool_policy_kmalloc.c
new file mode 100644
index 00000000..4accff7d
--- /dev/null
+++ b/pjlib/src/pj/pool_policy_kmalloc.c
@@ -0,0 +1,54 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/pool_policy_kmalloc.c 3 10/29/05 11:51a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/pool_policy_kmalloc.c $
+ *
+ * 3 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 9/22/05 10:40a Bennylp
+ * Created.
+ *
+ */
+#include <pj/pool.h>
+#include <pj/except.h>
+#include <pj/os.h>
+
+
+static void *default_block_alloc(pj_pool_factory *factory, pj_size_t size)
+{
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(factory);
+
+ return kmalloc(size, GFP_ATOMIC);
+}
+
+static void default_block_free(pj_pool_factory *factory,
+ void *mem, pj_size_t size)
+{
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(factory);
+ PJ_UNUSED_ARG(size);
+
+ kfree(mem);
+}
+
+static void default_pool_callback(pj_pool_t *pool, pj_size_t size)
+{
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(pool);
+ PJ_UNUSED_ARG(size);
+
+ PJ_THROW(PJ_NO_MEMORY_EXCEPTION);
+}
+
+pj_pool_factory_policy pj_pool_factory_default_policy =
+{
+ &default_block_alloc,
+ &default_block_free,
+ &default_pool_callback,
+ 0
+};
+
diff --git a/pjlib/src/pj/pool_policy_malloc.c b/pjlib/src/pj/pool_policy_malloc.c
new file mode 100644
index 00000000..12eb7c34
--- /dev/null
+++ b/pjlib/src/pj/pool_policy_malloc.c
@@ -0,0 +1,58 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/pool_policy_malloc.c 2 10/14/05 12:26a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/pool_policy_malloc.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 9/21/05 1:37p Bennylp
+ * Renamed from pool_policy.c
+ *
+ * 3 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/pool.h>
+#include <pj/except.h>
+#include <pj/os.h>
+#include <pj/compat/malloc.h>
+
+/*
+ * This file contains pool default policy definition and implementation.
+ */
+
+
+static void *default_block_alloc(pj_pool_factory *factory, pj_size_t size)
+{
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(factory);
+ PJ_UNUSED_ARG(size);
+
+ return malloc(size);
+}
+
+static void default_block_free(pj_pool_factory *factory, void *mem, pj_size_t size)
+{
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(factory);
+ PJ_UNUSED_ARG(size);
+
+ free(mem);
+}
+
+static void default_pool_callback(pj_pool_t *pool, pj_size_t size)
+{
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(pool);
+ PJ_UNUSED_ARG(size);
+
+ PJ_THROW(PJ_NO_MEMORY_EXCEPTION);
+}
+
+pj_pool_factory_policy pj_pool_factory_default_policy =
+{
+ &default_block_alloc,
+ &default_block_free,
+ &default_pool_callback,
+ 0
+};
diff --git a/pjlib/src/pj/rand.c b/pjlib/src/pj/rand.c
new file mode 100644
index 00000000..6d25670f
--- /dev/null
+++ b/pjlib/src/pj/rand.c
@@ -0,0 +1,29 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/rand.c 3 10/14/05 12:26a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/rand.c $
+ *
+ * 3 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 2 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ * 1 9/15/05 8:40p Bennylp
+ * Created.
+ */
+#include <pj/rand.h>
+#include <pj/os.h>
+#include <pj/compat/rand.h>
+
+PJ_DEF(void) pj_srand(unsigned int seed)
+{
+ PJ_CHECK_STACK();
+ platform_srand(seed);
+}
+
+PJ_DEF(int) pj_rand(void)
+{
+ PJ_CHECK_STACK();
+ return platform_rand();
+}
+
diff --git a/pjlib/src/pj/rbtree.c b/pjlib/src/pj/rbtree.c
new file mode 100644
index 00000000..582a6f75
--- /dev/null
+++ b/pjlib/src/pj/rbtree.c
@@ -0,0 +1,416 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/rbtree.c 5 9/17/05 10:37a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/rbtree.c $
+ *
+ * 5 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/rbtree.h>
+#include <pj/os.h>
+
+static void left_rotate( pj_rbtree *tree, pj_rbtree_node *node )
+{
+ pj_rbtree_node *rnode, *parent;
+
+ PJ_CHECK_STACK();
+
+ rnode = node->right;
+ if (rnode == tree->null)
+ return;
+
+ node->right = rnode->left;
+ if (rnode->left != tree->null)
+ rnode->left->parent = node;
+ parent = node->parent;
+ rnode->parent = parent;
+ if (parent != tree->null) {
+ if (parent->left == node)
+ parent->left = rnode;
+ else
+ parent->right = rnode;
+ } else {
+ tree->root = rnode;
+ }
+ rnode->left = node;
+ node->parent = rnode;
+}
+
+static void right_rotate( pj_rbtree *tree, pj_rbtree_node *node )
+{
+ pj_rbtree_node *lnode, *parent;
+
+ PJ_CHECK_STACK();
+
+ lnode = node->left;
+ if (lnode == tree->null)
+ return;
+
+ node->left = lnode->right;
+ if (lnode->right != tree->null)
+ lnode->right->parent = node;
+ parent = node->parent;
+ lnode->parent = parent;
+
+ if (parent != tree->null) {
+ if (parent->left == node)
+ parent->left = lnode;
+ else
+ parent->right = lnode;
+ } else {
+ tree->root = lnode;
+ }
+ lnode->right = node;
+ node->parent = lnode;
+}
+
+static void insert_fixup( pj_rbtree *tree, pj_rbtree_node *node )
+{
+ pj_rbtree_node *temp, *parent;
+
+ PJ_CHECK_STACK();
+
+ while (node != tree->root && node->parent->color == PJ_RBCOLOR_RED) {
+ parent = node->parent;
+ if (parent == parent->parent->left) {
+ temp = parent->parent->right;
+ if (temp->color == PJ_RBCOLOR_RED) {
+ temp->color = PJ_RBCOLOR_BLACK;
+ node = parent;
+ node->color = PJ_RBCOLOR_BLACK;
+ node = node->parent;
+ node->color = PJ_RBCOLOR_RED;
+ } else {
+ if (node == parent->right) {
+ node = parent;
+ left_rotate(tree, node);
+ }
+ temp = node->parent;
+ temp->color = PJ_RBCOLOR_BLACK;
+ temp = temp->parent;
+ temp->color = PJ_RBCOLOR_RED;
+ right_rotate( tree, temp);
+ }
+ } else {
+ temp = parent->parent->left;
+ if (temp->color == PJ_RBCOLOR_RED) {
+ temp->color = PJ_RBCOLOR_BLACK;
+ node = parent;
+ node->color = PJ_RBCOLOR_BLACK;
+ node = node->parent;
+ node->color = PJ_RBCOLOR_RED;
+ } else {
+ if (node == parent->left) {
+ node = parent;
+ right_rotate(tree, node);
+ }
+ temp = node->parent;
+ temp->color = PJ_RBCOLOR_BLACK;
+ temp = temp->parent;
+ temp->color = PJ_RBCOLOR_RED;
+ left_rotate(tree, temp);
+ }
+ }
+ }
+
+ tree->root->color = PJ_RBCOLOR_BLACK;
+}
+
+
+static void delete_fixup( pj_rbtree *tree, pj_rbtree_node *node )
+{
+ pj_rbtree_node *temp;
+
+ PJ_CHECK_STACK();
+
+ while (node != tree->root && node->color == PJ_RBCOLOR_BLACK) {
+ if (node->parent->left == node) {
+ temp = node->parent->right;
+ if (temp->color == PJ_RBCOLOR_RED) {
+ temp->color = PJ_RBCOLOR_BLACK;
+ node->parent->color = PJ_RBCOLOR_RED;
+ left_rotate(tree, node->parent);
+ temp = node->parent->right;
+ }
+ if (temp->left->color == PJ_RBCOLOR_BLACK &&
+ temp->right->color == PJ_RBCOLOR_BLACK)
+ {
+ temp->color = PJ_RBCOLOR_RED;
+ node = node->parent;
+ } else {
+ if (temp->right->color == PJ_RBCOLOR_BLACK) {
+ temp->left->color = PJ_RBCOLOR_BLACK;
+ temp->color = PJ_RBCOLOR_RED;
+ right_rotate( tree, temp);
+ temp = node->parent->right;
+ }
+ temp->color = node->parent->color;
+ temp->right->color = PJ_RBCOLOR_BLACK;
+ node->parent->color = PJ_RBCOLOR_BLACK;
+ left_rotate(tree, node->parent);
+ node = tree->root;
+ }
+ } else {
+ temp = node->parent->left;
+ if (temp->color == PJ_RBCOLOR_RED) {
+ temp->color = PJ_RBCOLOR_BLACK;
+ node->parent->color = PJ_RBCOLOR_RED;
+ right_rotate( tree, node->parent);
+ temp = node->parent->left;
+ }
+ if (temp->right->color == PJ_RBCOLOR_BLACK &&
+ temp->left->color == PJ_RBCOLOR_BLACK)
+ {
+ temp->color = PJ_RBCOLOR_RED;
+ node = node->parent;
+ } else {
+ if (temp->left->color == PJ_RBCOLOR_BLACK) {
+ temp->right->color = PJ_RBCOLOR_BLACK;
+ temp->color = PJ_RBCOLOR_RED;
+ left_rotate( tree, temp);
+ temp = node->parent->left;
+ }
+ temp->color = node->parent->color;
+ node->parent->color = PJ_RBCOLOR_BLACK;
+ temp->left->color = PJ_RBCOLOR_BLACK;
+ right_rotate(tree, node->parent);
+ node = tree->root;
+ }
+ }
+ }
+
+ node->color = PJ_RBCOLOR_BLACK;
+}
+
+
+PJ_DEF(void) pj_rbtree_init( pj_rbtree *tree, pj_rbtree_comp *comp )
+{
+ PJ_CHECK_STACK();
+
+ tree->null = tree->root = &tree->null_node;
+ tree->null->key = NULL;
+ tree->null->user_data = NULL;
+ tree->size = 0;
+ tree->null->left = tree->null->right = tree->null->parent = tree->null;
+ tree->null->color = PJ_RBCOLOR_BLACK;
+ tree->comp = comp;
+}
+
+PJ_DEF(pj_rbtree_node*) pj_rbtree_first( pj_rbtree *tree )
+{
+ register pj_rbtree_node *node = tree->root;
+ register pj_rbtree_node *null = tree->null;
+
+ PJ_CHECK_STACK();
+
+ while (node->left != null)
+ node = node->left;
+ return node != null ? node : NULL;
+}
+
+PJ_DEF(pj_rbtree_node*) pj_rbtree_last( pj_rbtree *tree )
+{
+ register pj_rbtree_node *node = tree->root;
+ register pj_rbtree_node *null = tree->null;
+
+ PJ_CHECK_STACK();
+
+ while (node->right != null)
+ node = node->right;
+ return node != null ? node : NULL;
+}
+
+PJ_DEF(pj_rbtree_node*) pj_rbtree_next( pj_rbtree *tree,
+ register pj_rbtree_node *node )
+{
+ register pj_rbtree_node *null = tree->null;
+
+ PJ_CHECK_STACK();
+
+ if (node->right != null) {
+ for (node=node->right; node->left!=null; node = node->left)
+ /* void */;
+ } else {
+ register pj_rbtree_node *temp = node->parent;
+ while (temp!=null && temp->right==node) {
+ node = temp;
+ temp = temp->parent;
+ }
+ node = temp;
+ }
+ return node != null ? node : NULL;
+}
+
+PJ_DEF(pj_rbtree_node*) pj_rbtree_prev( pj_rbtree *tree,
+ register pj_rbtree_node *node )
+{
+ register pj_rbtree_node *null = tree->null;
+
+ PJ_CHECK_STACK();
+
+ if (node->left != null) {
+ for (node=node->left; node->right!=null; node=node->right)
+ /* void */;
+ } else {
+ register pj_rbtree_node *temp = node->parent;
+ while (temp!=null && temp->left==node) {
+ node = temp;
+ temp = temp->parent;
+ }
+ node = temp;
+ }
+ return node != null ? node : NULL;
+}
+
+PJ_DEF(int) pj_rbtree_insert( pj_rbtree *tree,
+ pj_rbtree_node *element )
+{
+ int rv = 0;
+ pj_rbtree_node *node, *parent = tree->null,
+ *null = tree->null;
+ pj_rbtree_comp *comp = tree->comp;
+
+ PJ_CHECK_STACK();
+
+ node = tree->root;
+ while (node != null) {
+ rv = (*comp)(element->key, node->key);
+ if (rv == 0) {
+ /* found match, i.e. entry with equal key already exist */
+ return -1;
+ }
+ parent = node;
+ node = rv < 0 ? node->left : node->right;
+ }
+
+ element->color = PJ_RBCOLOR_RED;
+ element->left = element->right = null;
+
+ node = element;
+ if (parent != null) {
+ node->parent = parent;
+ if (rv < 0)
+ parent->left = node;
+ else
+ parent->right = node;
+ insert_fixup( tree, node);
+ } else {
+ tree->root = node;
+ node->parent = null;
+ node->color = PJ_RBCOLOR_BLACK;
+ }
+
+ ++tree->size;
+ return 0;
+}
+
+
+PJ_DEF(pj_rbtree_node*) pj_rbtree_find( pj_rbtree *tree,
+ const void *key )
+{
+ int rv;
+ pj_rbtree_node *node = tree->root;
+ pj_rbtree_node *null = tree->null;
+ pj_rbtree_comp *comp = tree->comp;
+
+ while (node != null) {
+ rv = (*comp)(key, node->key);
+ if (rv == 0)
+ return node;
+ node = rv < 0 ? node->left : node->right;
+ }
+ return node != null ? node : NULL;
+}
+
+PJ_DEF(pj_rbtree_node*) pj_rbtree_erase( pj_rbtree *tree,
+ pj_rbtree_node *node )
+{
+ pj_rbtree_node *succ;
+ pj_rbtree_node *null = tree->null;
+ pj_rbtree_node *child;
+ pj_rbtree_node *parent;
+
+ PJ_CHECK_STACK();
+
+ if (node->left == null || node->right == null) {
+ succ = node;
+ } else {
+ for (succ=node->right; succ->left!=null; succ=succ->left)
+ /* void */;
+ }
+
+ child = succ->left != null ? succ->left : succ->right;
+ parent = succ->parent;
+ child->parent = parent;
+
+ if (parent != null) {
+ if (parent->left == succ)
+ parent->left = child;
+ else
+ parent->right = child;
+ } else
+ tree->root = child;
+
+ if (succ != node) {
+ succ->parent = node->parent;
+ succ->left = node->left;
+ succ->right = node->right;
+ succ->color = node->color;
+
+ parent = node->parent;
+ if (parent != null) {
+ if (parent->left==node)
+ parent->left=succ;
+ else
+ parent->right=succ;
+ }
+ if (node->left != null)
+ node->left->parent = succ;;
+ if (node->right != null)
+ node->right->parent = succ;
+
+ if (tree->root == node)
+ tree->root = succ;
+ }
+
+ if (succ->color == PJ_RBCOLOR_BLACK) {
+ if (child != null)
+ delete_fixup(tree, child);
+ tree->null->color = PJ_RBCOLOR_BLACK;
+ }
+
+ --tree->size;
+ return node;
+}
+
+
+PJ_DEF(unsigned) pj_rbtree_max_height( pj_rbtree *tree,
+ pj_rbtree_node *node )
+{
+ unsigned l, r;
+
+ PJ_CHECK_STACK();
+
+ if (node==NULL)
+ node = tree->root;
+
+ l = node->left != tree->null ? pj_rbtree_max_height(tree,node->left)+1 : 0;
+ r = node->right != tree->null ? pj_rbtree_max_height(tree,node->right)+1 : 0;
+ return l > r ? l : r;
+}
+
+PJ_DEF(unsigned) pj_rbtree_min_height( pj_rbtree *tree,
+ pj_rbtree_node *node )
+{
+ unsigned l, r;
+
+ PJ_CHECK_STACK();
+
+ if (node==NULL)
+ node=tree->root;
+
+ l = (node->left != tree->null) ? pj_rbtree_max_height(tree,node->left)+1 : 0;
+ r = (node->right != tree->null) ? pj_rbtree_max_height(tree,node->right)+1 : 0;
+ return l > r ? r : l;
+}
+
+
diff --git a/pjlib/src/pj/scanner.c b/pjlib/src/pj/scanner.c
new file mode 100644
index 00000000..08c7c757
--- /dev/null
+++ b/pjlib/src/pj/scanner.c
@@ -0,0 +1,556 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/scanner.c 9 10/14/05 12:26a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/scanner.c $
+ *
+ * 9 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 8 9/21/05 1:39p Bennylp
+ * Periodic checkin for backup.
+ *
+ * 7 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/scanner.h>
+#include <pj/string.h>
+#include <pj/except.h>
+#include <pj/os.h>
+
+#define PJ_SCAN_IS_SPACE(c) ((c)==' ' || (c)=='\t')
+#define PJ_SCAN_IS_NEWLINE(c) ((c)=='\r' || (c)=='\n')
+#define PJ_SCAN_CHECK_EOF(s) (s != end)
+
+
+static void pj_scan_syntax_err(pj_scanner *scanner)
+{
+ (*scanner->callback)(scanner);
+}
+
+PJ_DEF(void) pj_cs_init( pj_char_spec cs)
+{
+ PJ_CHECK_STACK();
+ memset(cs, 0, sizeof(cs));
+}
+
+PJ_DEF(void) pj_cs_set( pj_char_spec cs, int c)
+{
+ PJ_CHECK_STACK();
+ cs[c] = 1;
+}
+
+PJ_DEF(void) pj_cs_add_range( pj_char_spec cs, int cstart, int cend)
+{
+ PJ_CHECK_STACK();
+ while (cstart != cend)
+ cs[cstart++] = 1;
+}
+
+PJ_DEF(void) pj_cs_add_alpha( pj_char_spec cs)
+{
+ pj_cs_add_range( cs, 'a', 'z'+1);
+ pj_cs_add_range( cs, 'A', 'Z'+1);
+}
+
+PJ_DEF(void) pj_cs_add_num( pj_char_spec cs)
+{
+ pj_cs_add_range( cs, '0', '9'+1);
+}
+
+PJ_DEF(void) pj_cs_add_str( pj_char_spec cs, const char *str)
+{
+ PJ_CHECK_STACK();
+ while (*str) {
+ cs[(int)*str] = 1;
+ ++str;
+ }
+}
+
+PJ_DEF(void) pj_cs_del_range( pj_char_spec cs, int cstart, int cend)
+{
+ PJ_CHECK_STACK();
+ while (cstart != cend)
+ cs[cstart++] = 0;
+}
+
+PJ_DEF(void) pj_cs_del_str( pj_char_spec cs, const char *str)
+{
+ PJ_CHECK_STACK();
+ while (*str) {
+ cs[(int)*str] = 0;
+ ++str;
+ }
+}
+
+PJ_DEF(void) pj_cs_invert( pj_char_spec cs )
+{
+ unsigned i;
+ PJ_CHECK_STACK();
+ for (i=0; i<sizeof(pj_char_spec)/sizeof(cs[0]); ++i) {
+ cs[i] = (pj_char_spec_element_t) !cs[i];
+ }
+}
+
+PJ_DEF(void) pj_scan_init( pj_scanner *scanner, char *bufstart, int buflen,
+ unsigned options, pj_syn_err_func_ptr callback )
+{
+ PJ_CHECK_STACK();
+
+ scanner->begin = scanner->curptr = bufstart;
+ scanner->end = bufstart + buflen;
+ scanner->line = 1;
+ scanner->col = 1;
+ scanner->callback = callback;
+ scanner->skip_ws = options;
+
+ if (scanner->skip_ws)
+ pj_scan_skip_whitespace(scanner);
+
+ scanner->col = scanner->curptr - scanner->begin + 1;
+}
+
+
+PJ_DEF(void) pj_scan_fini( pj_scanner *scanner )
+{
+ PJ_CHECK_STACK();
+ PJ_UNUSED_ARG(scanner);
+}
+
+PJ_DEF(void) pj_scan_skip_whitespace( pj_scanner *scanner )
+{
+ register char *s = scanner->curptr;
+
+ PJ_CHECK_STACK();
+
+ while (PJ_SCAN_IS_SPACE(*s)) {
+ ++s;
+ }
+
+ if ((scanner->skip_ws & PJ_SCAN_AUTOSKIP_NEWLINE) && PJ_SCAN_IS_NEWLINE(*s)) {
+ for (;;) {
+ if (*s == '\r') {
+ ++s;
+ if (*s == '\n') ++s;
+ ++scanner->line;
+ scanner->col = 1;
+ scanner->curptr = s;
+ } else if (*s == '\n') {
+ ++s;
+ ++scanner->line;
+ scanner->col = 1;
+ scanner->curptr = s;
+ } else if (PJ_SCAN_IS_SPACE(*s)) {
+ do {
+ ++s;
+ } while (PJ_SCAN_IS_SPACE(*s));
+ } else {
+ break;
+ }
+ }
+ }
+
+ if (PJ_SCAN_IS_NEWLINE(*s) && (scanner->skip_ws & PJ_SCAN_AUTOSKIP_WS_HEADER)==PJ_SCAN_AUTOSKIP_WS_HEADER) {
+ /* Check for header continuation. */
+ scanner->col += s - scanner->curptr;
+ scanner->curptr = s;
+
+ if (*s == '\r') {
+ ++s;
+ }
+ if (*s == '\n') {
+ ++s;
+ }
+ if (PJ_SCAN_IS_SPACE(*s)) {
+ register char *t = s;
+ do {
+ ++t;
+ } while (PJ_SCAN_IS_SPACE(*t));
+
+ ++scanner->line;
+ scanner->col = t-s;
+ scanner->curptr = t;
+ }
+ } else {
+ scanner->col += s - scanner->curptr;
+ scanner->curptr = s;
+ }
+}
+
+PJ_DEF(int) pj_scan_peek( pj_scanner *scanner,
+ const pj_char_spec spec, pj_str_t *out)
+{
+ register char *s = scanner->curptr;
+ register char *end = scanner->end;
+
+ PJ_CHECK_STACK();
+
+ if (pj_scan_is_eof(scanner)) {
+ pj_scan_syntax_err(scanner);
+ return -1;
+ }
+
+ while (PJ_SCAN_CHECK_EOF(s) && pj_cs_match(spec, *s))
+ ++s;
+
+ pj_strset3(out, scanner->curptr, s);
+ return s < scanner->end ? *s : 0;
+}
+
+
+PJ_DEF(int) pj_scan_peek_n( pj_scanner *scanner,
+ pj_size_t len, pj_str_t *out)
+{
+ char *endpos = scanner->curptr + len;
+
+ PJ_CHECK_STACK();
+
+ if (endpos > scanner->end) {
+ pj_scan_syntax_err(scanner);
+ return -1;
+ }
+
+ pj_strset(out, scanner->curptr, len);
+ return *endpos;
+}
+
+
+PJ_DEF(int) pj_scan_peek_until( pj_scanner *scanner,
+ const pj_char_spec spec,
+ pj_str_t *out)
+{
+ register char *s = scanner->curptr;
+ register char *end = scanner->end;
+
+ PJ_CHECK_STACK();
+
+ if (pj_scan_is_eof(scanner)) {
+ pj_scan_syntax_err(scanner);
+ return -1;
+ }
+
+ while (PJ_SCAN_CHECK_EOF(s) && !pj_cs_match( spec, *s))
+ ++s;
+
+ pj_strset3(out, scanner->curptr, s);
+ return s!=scanner->end ? *s : 0;
+}
+
+
+PJ_DEF(void) pj_scan_get( pj_scanner *scanner,
+ const pj_char_spec spec, pj_str_t *out)
+{
+ register char *s = scanner->curptr;
+ register char *end = scanner->end;
+ char *start = s;
+
+ PJ_CHECK_STACK();
+
+ if (pj_scan_is_eof(scanner) || !pj_cs_match(spec, *s)) {
+ pj_scan_syntax_err(scanner);
+ return;
+ }
+
+ do {
+ ++s;
+ } while (PJ_SCAN_CHECK_EOF(s) && pj_cs_match(spec, *s));
+
+ pj_strset3(out, scanner->curptr, s);
+
+ scanner->col += (s - start);
+ scanner->curptr = s;
+
+ if (scanner->skip_ws) {
+ pj_scan_skip_whitespace(scanner);
+ }
+}
+
+
+PJ_DEF(void) pj_scan_get_quote( pj_scanner *scanner,
+ int begin_quote, int end_quote,
+ pj_str_t *out)
+{
+ register char *s = scanner->curptr;
+ register char *end = scanner->end;
+ char *start = s;
+
+ PJ_CHECK_STACK();
+
+ /* Check and eat the begin_quote. */
+ if (*s != begin_quote) {
+ pj_scan_syntax_err(scanner);
+ return;
+ }
+ ++s;
+
+ /* Loop until end_quote is found.
+ */
+ do {
+ /* loop until end_quote is found. */
+ do {
+ ++s;
+ } while (s != end && *s != '\n' && *s != end_quote);
+
+ /* check that no backslash character precedes the end_quote. */
+ if (*s == end_quote) {
+ if (*(s-1) == '\\') {
+ if (s-2 == scanner->begin) {
+ break;
+ } else {
+ char *q = s-2;
+ char *r = s-2;
+
+ while (r != scanner->begin && *r == '\\') {
+ --r;
+ }
+ /* break from main loop if we have odd number of backslashes */
+ if (((unsigned)(q-r) & 0x01) == 1) {
+ break;
+ }
+ }
+ } else {
+ /* end_quote is not preceeded by backslash. break now. */
+ break;
+ }
+ } else {
+ /* loop ended by non-end_quote character. break now. */
+ break;
+ }
+ } while (1);
+
+ /* Check and eat the end quote. */
+ if (*s != end_quote) {
+ pj_scan_syntax_err(scanner);
+ return;
+ }
+ ++s;
+
+ pj_strset3(out, scanner->curptr, s);
+
+ scanner->col += (s - start);
+ scanner->curptr = s;
+
+ if (scanner->skip_ws) {
+ pj_scan_skip_whitespace(scanner);
+ }
+}
+
+PJ_DEF(void) pj_scan_get_n( pj_scanner *scanner,
+ unsigned N, pj_str_t *out)
+{
+ register char *s = scanner->curptr;
+ char *start = scanner->curptr;
+
+ PJ_CHECK_STACK();
+
+ if (scanner->curptr + N > scanner->end) {
+ pj_scan_syntax_err(scanner);
+ return;
+ }
+
+ pj_strset(out, s, N);
+
+ s += N;
+ scanner->col += (s - start);
+ scanner->curptr = s;
+
+ if (scanner->skip_ws) {
+ pj_scan_skip_whitespace(scanner);
+ }
+}
+
+
+PJ_DEF(int) pj_scan_get_char( pj_scanner *scanner )
+{
+ char *start = scanner->curptr;
+ int chr = *start;
+
+ PJ_CHECK_STACK();
+
+ if (pj_scan_is_eof(scanner)) {
+ pj_scan_syntax_err(scanner);
+ return 0;
+ }
+
+ ++scanner->curptr;
+ scanner->col += (scanner->curptr - start);
+
+ if (scanner->skip_ws) {
+ pj_scan_skip_whitespace(scanner);
+ }
+ return chr;
+}
+
+
+PJ_DEF(void) pj_scan_get_newline( pj_scanner *scanner )
+{
+ PJ_CHECK_STACK();
+
+ if (!PJ_SCAN_IS_NEWLINE(*scanner->curptr)) {
+ pj_scan_syntax_err(scanner);
+ return;
+ }
+
+ if (*scanner->curptr == '\r') {
+ ++scanner->curptr;
+ }
+ if (*scanner->curptr == '\n') {
+ ++scanner->curptr;
+ }
+
+ ++scanner->line;
+ scanner->col = 1;
+
+ if (scanner->skip_ws) {
+ pj_scan_skip_whitespace(scanner);
+ }
+}
+
+
+PJ_DEF(void) pj_scan_get_until( pj_scanner *scanner,
+ const pj_char_spec spec, pj_str_t *out)
+{
+ register char *s = scanner->curptr;
+ register char *end = scanner->end;
+ char *start = s;
+
+ PJ_CHECK_STACK();
+
+ if (pj_scan_is_eof(scanner)) {
+ pj_scan_syntax_err(scanner);
+ return;
+ }
+
+ while (PJ_SCAN_CHECK_EOF(s) && !pj_cs_match(spec, *s)) {
+ ++s;
+ }
+
+ pj_strset3(out, scanner->curptr, s);
+
+ scanner->col += (s - start);
+ scanner->curptr = s;
+
+ if (scanner->skip_ws) {
+ pj_scan_skip_whitespace(scanner);
+ }
+}
+
+
+PJ_DEF(void) pj_scan_get_until_ch( pj_scanner *scanner,
+ int until_char, pj_str_t *out)
+{
+ register char *s = scanner->curptr;
+ register char *end = scanner->end;
+ char *start = s;
+
+ PJ_CHECK_STACK();
+
+ if (pj_scan_is_eof(scanner)) {
+ pj_scan_syntax_err(scanner);
+ return;
+ }
+
+ while (PJ_SCAN_CHECK_EOF(s) && *s != until_char) {
+ ++s;
+ }
+
+ pj_strset3(out, scanner->curptr, s);
+
+ scanner->col += (s - start);
+ scanner->curptr = s;
+
+ if (scanner->skip_ws) {
+ pj_scan_skip_whitespace(scanner);
+ }
+}
+
+
+PJ_DEF(void) pj_scan_get_until_chr( pj_scanner *scanner,
+ const char *until_spec, pj_str_t *out)
+{
+ register char *s = scanner->curptr;
+ register char *end = scanner->end;
+ char *start = scanner->curptr;
+
+ PJ_CHECK_STACK();
+
+ if (pj_scan_is_eof(scanner)) {
+ pj_scan_syntax_err(scanner);
+ return;
+ }
+
+ while (PJ_SCAN_CHECK_EOF(s) && !strchr(until_spec, *s)) {
+ ++s;
+ }
+
+ pj_strset3(out, scanner->curptr, s);
+
+ scanner->col += (s - start);
+ scanner->curptr = s;
+
+ if (scanner->skip_ws) {
+ pj_scan_skip_whitespace(scanner);
+ }
+}
+
+PJ_DEF(void) pj_scan_advance_n( pj_scanner *scanner,
+ unsigned N, pj_bool_t skip_ws)
+{
+ char *start = scanner->curptr;
+
+ PJ_CHECK_STACK();
+
+ if (scanner->curptr + N > scanner->end) {
+ pj_scan_syntax_err(scanner);
+ return;
+ }
+
+ scanner->curptr += N;
+ scanner->col += (scanner->curptr - start);
+
+ if (skip_ws) {
+ pj_scan_skip_whitespace(scanner);
+ }
+}
+
+
+PJ_DEF(int) pj_scan_strcmp( pj_scanner *scanner, const char *s, int len)
+{
+ if (scanner->curptr + len > scanner->end) {
+ pj_scan_syntax_err(scanner);
+ return -1;
+ }
+ return strncmp(scanner->curptr, s, len);
+}
+
+
+PJ_DEF(int) pj_scan_stricmp( pj_scanner *scanner, const char *s, int len)
+{
+ if (scanner->curptr + len > scanner->end) {
+ pj_scan_syntax_err(scanner);
+ return -1;
+ }
+ return strnicmp(scanner->curptr, s, len);
+}
+
+
+PJ_DEF(void) pj_scan_save_state( pj_scanner *scanner, pj_scan_state *state)
+{
+ PJ_CHECK_STACK();
+
+ state->curptr = scanner->curptr;
+ state->line = scanner->line;
+ state->col = scanner->col;
+}
+
+
+PJ_DEF(void) pj_scan_restore_state( pj_scanner *scanner,
+ pj_scan_state *state)
+{
+ PJ_CHECK_STACK();
+
+ scanner->curptr = state->curptr;
+ scanner->line = state->line;
+ scanner->col = state->col;
+}
+
+
diff --git a/pjlib/src/pj/sock_bsd.c b/pjlib/src/pj/sock_bsd.c
new file mode 100644
index 00000000..c69b7e25
--- /dev/null
+++ b/pjlib/src/pj/sock_bsd.c
@@ -0,0 +1,572 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/sock_bsd.c 10 10/29/05 11:51a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/sock_bsd.c $
+ *
+ * 10 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 9 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 8 9/21/05 1:39p Bennylp
+ * Periodic checkin for backup.
+ *
+ * 7 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/sock.h>
+#include <pj/os.h>
+#include <pj/assert.h>
+#include <pj/string.h>
+#include <pj/compat/socket.h>
+#include <pj/addr_resolv.h>
+#include <pj/errno.h>
+
+/*
+ * Address families conversion.
+ * The values here are indexed based on pj_addr_family-0xFF00.
+ */
+const pj_uint16_t PJ_AF_UNIX = AF_UNIX;
+const pj_uint16_t PJ_AF_INET = AF_INET;
+const pj_uint16_t PJ_AF_INET6 = AF_INET6;
+#ifdef AF_PACKET
+const pj_uint16_t PJ_AF_PACKET = AF_PACKET;
+#else
+const pj_uint16_t PJ_AF_PACKET = 0xFFFF;
+#endif
+#ifdef AF_IRDA
+const pj_uint16_t PJ_AF_IRDA = AF_IRDA;
+#else
+const pj_uint16_t PJ_AF_IRDA = 0xFFFF;
+#endif
+
+/*
+ * Socket types conversion.
+ * The values here are indexed based on pj_sock_type-0xFF00
+ */
+const pj_uint16_t PJ_SOCK_STREAM = SOCK_STREAM;
+const pj_uint16_t PJ_SOCK_DGRAM = SOCK_DGRAM;
+const pj_uint16_t PJ_SOCK_RAW = SOCK_RAW;
+const pj_uint16_t PJ_SOCK_RDM = SOCK_RDM;
+
+/*
+ * Socket level values.
+ */
+const pj_uint16_t PJ_SOL_SOCKET = SOL_SOCKET;
+#ifdef SOL_IP
+const pj_uint16_t PJ_SOL_IP = SOL_IP;
+#else
+const pj_uint16_t PJ_SOL_IP = 0xFFFF;
+#endif /* SOL_IP */
+#if defined(SOL_TCP)
+const pj_uint16_t PJ_SOL_TCP = SOL_TCP;
+#elif defined(IPPROTO_TCP)
+const pj_uint16_t PJ_SOL_TCP = IPPROTO_TCP;
+#endif /* SOL_TCP */
+#ifdef SOL_UDP
+const pj_uint16_t PJ_SOL_UDP = SOL_UDP;
+#else
+const pj_uint16_t PJ_SOL_UDP = 0xFFFF;
+#endif
+#ifdef SOL_IPV6
+const pj_uint16_t PJ_SOL_IPV6 = SOL_IPV6;
+#else
+const pj_uint16_t PJ_SOL_IPV6 = 0xFFFF;
+#endif
+
+
+/*
+ * Convert 16-bit value from network byte order to host byte order.
+ */
+PJ_DEF(pj_uint16_t) pj_ntohs(pj_uint16_t netshort)
+{
+ return ntohs(netshort);
+}
+
+/*
+ * Convert 16-bit value from host byte order to network byte order.
+ */
+PJ_DEF(pj_uint16_t) pj_htons(pj_uint16_t hostshort)
+{
+ return htons(hostshort);
+}
+
+/*
+ * Convert 32-bit value from network byte order to host byte order.
+ */
+PJ_DEF(pj_uint32_t) pj_ntohl(pj_uint32_t netlong)
+{
+ return ntohl(netlong);
+}
+
+/*
+ * Convert 32-bit value from host byte order to network byte order.
+ */
+PJ_DEF(pj_uint32_t) pj_htonl(pj_uint32_t hostlong)
+{
+ return htonl(hostlong);
+}
+
+/*
+ * Convert an Internet host address given in network byte order
+ * to string in standard numbers and dots notation.
+ */
+PJ_DEF(char*) pj_inet_ntoa(pj_in_addr inaddr)
+{
+ return inet_ntoa(*(struct in_addr*)&inaddr);
+}
+
+/*
+ * This function converts the Internet host address cp from the standard
+ * numbers-and-dots notation into binary data and stores it in the structure
+ * that inp points to.
+ */
+PJ_DEF(int) pj_inet_aton(const pj_str_t *cp, struct pj_in_addr *inp)
+{
+ char tempaddr[16];
+
+ /* Initialize output with PJ_INADDR_NONE.
+ * Some apps relies on this instead of the return value
+ * (and anyway the return value is quite confusing!)
+ */
+ inp->s_addr = PJ_INADDR_NONE;
+
+ /* Caution:
+ * this function might be called with cp->slen >= 16
+ * (i.e. when called with hostname to check if it's an IP addr).
+ */
+ PJ_ASSERT_RETURN(cp && cp->slen && inp, 0);
+ if (cp->slen >= 16) {
+ return 0;
+ }
+
+ pj_memcpy(tempaddr, cp->ptr, cp->slen);
+ tempaddr[cp->slen] = '\0';
+
+#if defined(PJ_SOCK_HAS_INET_ATON) && PJ_SOCK_HAS_INET_ATON != 0
+ return inet_aton(tempaddr, (struct in_addr*)inp);
+#else
+ inp->s_addr = inet_addr(tempaddr);
+ return inp->s_addr == PJ_INADDR_NONE ? 0 : 1;
+#endif
+}
+
+/*
+ * Convert address string with numbers and dots to binary IP address.
+ */
+PJ_DEF(pj_in_addr) pj_inet_addr(const pj_str_t *cp)
+{
+ pj_in_addr addr;
+
+ pj_inet_aton(cp, &addr);
+ return addr;
+}
+
+/*
+ * Set the IP address of an IP socket address from string address,
+ * with resolving the host if necessary. The string address may be in a
+ * standard numbers and dots notation or may be a hostname. If hostname
+ * is specified, then the function will resolve the host into the IP
+ * address.
+ */
+PJ_DEF(pj_status_t) pj_sockaddr_in_set_str_addr( pj_sockaddr_in *addr,
+ const pj_str_t *str_addr)
+{
+ PJ_CHECK_STACK();
+
+ PJ_ASSERT_RETURN(str_addr && str_addr->slen < PJ_MAX_HOSTNAME,
+ (addr->sin_addr.s_addr=PJ_INADDR_NONE, PJ_EINVAL));
+
+ addr->sin_family = AF_INET;
+
+ if (str_addr && str_addr->slen) {
+ addr->sin_addr = pj_inet_addr(str_addr);
+ if (addr->sin_addr.s_addr == PJ_INADDR_NONE) {
+ pj_hostent he;
+ pj_status_t rc;
+
+ rc = pj_gethostbyname(str_addr, &he);
+ if (rc == 0) {
+ addr->sin_addr.s_addr = *(pj_uint32_t*)he.h_addr;
+ } else {
+ addr->sin_addr.s_addr = PJ_INADDR_NONE;
+ return rc;
+ }
+ }
+
+ } else {
+ addr->sin_addr.s_addr = 0;
+ }
+
+ return PJ_SUCCESS;
+}
+
+/*
+ * Set the IP address and port of an IP socket address.
+ * The string address may be in a standard numbers and dots notation or
+ * may be a hostname. If hostname is specified, then the function will
+ * resolve the host into the IP address.
+ */
+PJ_DEF(pj_status_t) pj_sockaddr_in_init( pj_sockaddr_in *addr,
+ const pj_str_t *str_addr,
+ pj_uint16_t port)
+{
+ PJ_ASSERT_RETURN(addr && str_addr,
+ (addr->sin_addr.s_addr=PJ_INADDR_NONE, PJ_EINVAL));
+
+ addr->sin_family = PJ_AF_INET;
+ pj_sockaddr_in_set_port(addr, port);
+ return pj_sockaddr_in_set_str_addr(addr, str_addr);
+}
+
+
+/*
+ * Get hostname.
+ */
+PJ_DEF(const pj_str_t*) pj_gethostname(void)
+{
+ static char buf[PJ_MAX_HOSTNAME];
+ static pj_str_t hostname;
+
+ PJ_CHECK_STACK();
+
+ if (hostname.ptr == NULL) {
+ hostname.ptr = buf;
+ if (gethostname(buf, sizeof(buf)) != 0) {
+ hostname.ptr[0] = '\0';
+ hostname.slen = 0;
+ } else {
+ hostname.slen = strlen(buf);
+ }
+ }
+ return &hostname;
+}
+
+/*
+ * Get first IP address associated with the hostname.
+ */
+PJ_DEF(pj_in_addr) pj_gethostaddr(void)
+{
+ pj_sockaddr_in addr;
+ const pj_str_t *hostname = pj_gethostname();
+
+ pj_sockaddr_in_set_str_addr(&addr, hostname);
+ return addr.sin_addr;
+}
+
+
+#if defined(PJ_WIN32)
+/*
+ * Create new socket/endpoint for communication and returns a descriptor.
+ */
+PJ_DEF(pj_status_t) pj_sock_socket(int af,
+ int type,
+ int proto,
+ pj_sock_t *sock)
+{
+ PJ_CHECK_STACK();
+
+ /* Sanity checks. */
+ PJ_ASSERT_RETURN(sock!=NULL, PJ_EINVAL);
+ PJ_ASSERT_RETURN((unsigned)PJ_INVALID_SOCKET==INVALID_SOCKET,
+ (*sock=PJ_INVALID_SOCKET, PJ_EINVAL));
+
+ *sock = WSASocket(af, type, proto, NULL, 0, WSA_FLAG_OVERLAPPED);
+
+ if (*sock == PJ_INVALID_SOCKET)
+ return PJ_RETURN_OS_ERROR(pj_get_native_netos_error());
+ else
+ return PJ_SUCCESS;
+}
+
+#else
+/*
+ * Create new socket/endpoint for communication and returns a descriptor.
+ */
+PJ_DEF(pj_status_t) pj_sock_socket(int af,
+ int type,
+ int proto,
+ pj_sock_t *sock)
+{
+
+ PJ_CHECK_STACK();
+
+ /* Sanity checks. */
+ PJ_ASSERT_RETURN(sock!=NULL, PJ_EINVAL);
+ PJ_ASSERT_RETURN(PJ_INVALID_SOCKET==-1,
+ (*sock=PJ_INVALID_SOCKET, PJ_EINVAL));
+
+ *sock = socket(af, type, proto);
+ if (*sock == PJ_INVALID_SOCKET)
+ return PJ_RETURN_OS_ERROR(pj_get_native_netos_error());
+ else
+ return PJ_SUCCESS;
+}
+#endif
+
+
+/*
+ * Bind socket.
+ */
+PJ_DEF(pj_status_t) pj_sock_bind( pj_sock_t sock,
+ const pj_sockaddr_t *addr,
+ int len)
+{
+ PJ_CHECK_STACK();
+
+ PJ_ASSERT_RETURN(addr && len > 0, PJ_EINVAL);
+
+ if (bind(sock, (struct sockaddr*)addr, len) != 0)
+ return PJ_RETURN_OS_ERROR(pj_get_native_netos_error());
+ else
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Bind socket.
+ */
+PJ_DEF(pj_status_t) pj_sock_bind_in( pj_sock_t sock,
+ pj_uint32_t addr32,
+ pj_uint16_t port)
+{
+ pj_sockaddr_in addr;
+
+ PJ_CHECK_STACK();
+
+ addr.sin_family = PJ_AF_INET;
+ addr.sin_addr.s_addr = pj_htonl(addr32);
+ addr.sin_port = pj_htons(port);
+
+ return pj_sock_bind(sock, &addr, sizeof(pj_sockaddr_in));
+}
+
+
+/*
+ * Close socket.
+ */
+PJ_DEF(pj_status_t) pj_sock_close(pj_sock_t sock)
+{
+ int rc;
+
+ PJ_CHECK_STACK();
+#if defined(PJ_WIN32) && PJ_WIN32==1
+ rc = closesocket(sock);
+#else
+ rc = close(sock);
+#endif
+
+ if (rc != 0)
+ return PJ_RETURN_OS_ERROR(pj_get_native_netos_error());
+ else
+ return PJ_SUCCESS;
+}
+
+/*
+ * Get remote's name.
+ */
+PJ_DEF(pj_status_t) pj_sock_getpeername( pj_sock_t sock,
+ pj_sockaddr_t *addr,
+ int *namelen)
+{
+ PJ_CHECK_STACK();
+ if (getpeername(sock, (struct sockaddr*)addr, (socklen_t*)namelen) != 0)
+ return PJ_RETURN_OS_ERROR(pj_get_native_netos_error());
+ else
+ return PJ_SUCCESS;
+}
+
+/*
+ * Get socket name.
+ */
+PJ_DEF(pj_status_t) pj_sock_getsockname( pj_sock_t sock,
+ pj_sockaddr_t *addr,
+ int *namelen)
+{
+ PJ_CHECK_STACK();
+ if (getsockname(sock, (struct sockaddr*)addr, (socklen_t*)namelen) != 0)
+ return PJ_RETURN_OS_ERROR(pj_get_native_netos_error());
+ else
+ return PJ_SUCCESS;
+}
+
+/*
+ * Send data
+ */
+PJ_DEF(pj_status_t) pj_sock_send(pj_sock_t sock,
+ const void *buf,
+ pj_ssize_t *len,
+ unsigned flags)
+{
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(len, PJ_EINVAL);
+
+ *len = send(sock, (const char*)buf, *len, flags);
+
+ if (*len < 0)
+ return PJ_RETURN_OS_ERROR(pj_get_native_netos_error());
+ else
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Send data.
+ */
+PJ_DEF(pj_status_t) pj_sock_sendto(pj_sock_t sock,
+ const void *buf,
+ pj_ssize_t *len,
+ unsigned flags,
+ const pj_sockaddr_t *to,
+ int tolen)
+{
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(len, PJ_EINVAL);
+
+ *len = sendto(sock, (const char*)buf, *len, flags,
+ (const struct sockaddr*)to, tolen);
+
+ if (*len < 0)
+ return PJ_RETURN_OS_ERROR(pj_get_native_netos_error());
+ else
+ return PJ_SUCCESS;
+}
+
+/*
+ * Receive data.
+ */
+PJ_DEF(pj_status_t) pj_sock_recv(pj_sock_t sock,
+ void *buf,
+ pj_ssize_t *len,
+ unsigned flags)
+{
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(buf && len, PJ_EINVAL);
+
+ *len = recv(sock, (char*)buf, *len, flags);
+
+ if (*len < 0)
+ return PJ_RETURN_OS_ERROR(pj_get_native_netos_error());
+ else
+ return PJ_SUCCESS;
+}
+
+/*
+ * Receive data.
+ */
+PJ_DEF(pj_status_t) pj_sock_recvfrom(pj_sock_t sock,
+ void *buf,
+ pj_ssize_t *len,
+ unsigned flags,
+ pj_sockaddr_t *from,
+ int *fromlen)
+{
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(buf && len, PJ_EINVAL);
+ PJ_ASSERT_RETURN(from && fromlen, (*len=-1, PJ_EINVAL));
+
+ *len = recvfrom(sock, (char*)buf, *len, flags,
+ (struct sockaddr*)from, (socklen_t*)fromlen);
+
+ if (*len < 0)
+ return PJ_RETURN_OS_ERROR(pj_get_native_netos_error());
+ else
+ return PJ_SUCCESS;
+}
+
+/*
+ * Get socket option.
+ */
+PJ_DEF(pj_status_t) pj_sock_getsockopt( pj_sock_t sock,
+ int level,
+ int optname,
+ void *optval,
+ int *optlen)
+{
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(optval && optlen, PJ_EINVAL);
+
+ if (getsockopt(sock, level, optname, (char*)optval, (socklen_t*)optlen)!=0)
+ return PJ_RETURN_OS_ERROR(pj_get_native_netos_error());
+ else
+ return PJ_SUCCESS;
+}
+
+/*
+ * Set socket option.
+ */
+PJ_DEF(pj_status_t) pj_sock_setsockopt( pj_sock_t sock,
+ int level,
+ int optname,
+ const void *optval,
+ int optlen)
+{
+ PJ_CHECK_STACK();
+ if (setsockopt(sock, level, optname, (const char*)optval, optlen) != 0)
+ return PJ_RETURN_OS_ERROR(pj_get_native_netos_error());
+ else
+ return PJ_SUCCESS;
+}
+
+/*
+ * Shutdown socket.
+ */
+#if PJ_HAS_TCP
+PJ_DEF(pj_status_t) pj_sock_shutdown( pj_sock_t sock,
+ int how)
+{
+ PJ_CHECK_STACK();
+ if (shutdown(sock, how) != 0)
+ return PJ_RETURN_OS_ERROR(pj_get_native_netos_error());
+ else
+ return PJ_SUCCESS;
+}
+
+/*
+ * Start listening to incoming connections.
+ */
+PJ_DEF(pj_status_t) pj_sock_listen( pj_sock_t sock,
+ int backlog)
+{
+ PJ_CHECK_STACK();
+ if (listen(sock, backlog) != 0)
+ return PJ_RETURN_OS_ERROR(pj_get_native_netos_error());
+ else
+ return PJ_SUCCESS;
+}
+
+/*
+ * Connect socket.
+ */
+PJ_DEF(pj_status_t) pj_sock_connect( pj_sock_t sock,
+ const pj_sockaddr_t *addr,
+ int namelen)
+{
+ PJ_CHECK_STACK();
+ if (connect(sock, (struct sockaddr*)addr, namelen) != 0)
+ return PJ_RETURN_OS_ERROR(pj_get_native_netos_error());
+ else
+ return PJ_SUCCESS;
+}
+
+/*
+ * Accept incoming connections
+ */
+PJ_DEF(pj_status_t) pj_sock_accept( pj_sock_t serverfd,
+ pj_sock_t *newsock,
+ pj_sockaddr_t *addr,
+ int *addrlen)
+{
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(newsock != NULL, PJ_EINVAL);
+
+ *newsock = accept(serverfd, (struct sockaddr*)addr, (socklen_t*)addrlen);
+ if (*newsock==PJ_INVALID_SOCKET)
+ return PJ_RETURN_OS_ERROR(pj_get_native_netos_error());
+ else
+ return PJ_SUCCESS;
+}
+#endif /* PJ_HAS_TCP */
+
+
diff --git a/pjlib/src/pj/sock_linux_kernel.c b/pjlib/src/pj/sock_linux_kernel.c
new file mode 100644
index 00000000..76bc7bd8
--- /dev/null
+++ b/pjlib/src/pj/sock_linux_kernel.c
@@ -0,0 +1,749 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/sock_linux_kernel.c 4 10/29/05 11:51a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pj/sock_linux_kernel.c $
+ *
+ * 4 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 3 10/20/05 9:19a Bennylp
+ * Updated with new API convention (error code)
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/05/05 4:43p Bennylp
+ * Created.
+ *
+ */
+#include <pj/sock.h>
+#include <pj/assert.h>
+#include <pj/string.h> /* pj_memcpy() */
+#include <pj/os.h> /* PJ_CHECK_STACK() */
+#include <pj/addr_resolv.h> /* pj_gethostbyname() */
+#include <pj/ctype.h>
+#include <pj/compat/sprintf.h>
+#include <pj/log.h>
+#include <pj/errno.h>
+
+/* Linux kernel specific. */
+#include <linux/socket.h>
+#include <linux/net.h>
+//#include <net/sock.h>
+#include <linux/security.h>
+#include <linux/syscalls.h> /* sys_xxx() */
+#include <asm/ioctls.h> /* FIONBIO */
+#include <linux/utsname.h> /* for pj_gethostname() */
+
+/*
+ * Address families conversion.
+ * The values here are indexed based on pj_addr_family-0xFF00.
+ */
+const pj_uint16_t PJ_AF_UNIX = AF_UNIX;
+const pj_uint16_t PJ_AF_INET = AF_INET;
+const pj_uint16_t PJ_AF_INET6 = AF_INET6;
+#ifdef AF_PACKET
+const pj_uint16_t PJ_AF_PACKET = AF_PACKET;
+#else
+# error "AF_PACKET undeclared!"
+#endif
+#ifdef AF_IRDA
+const pj_uint16_t PJ_AF_IRDA = AF_IRDA;
+#else
+# error "AF_IRDA undeclared!"
+#endif
+
+/*
+ * Socket types conversion.
+ * The values here are indexed based on pj_sock_type-0xFF00
+ */
+const pj_uint16_t PJ_SOCK_STREAM= SOCK_STREAM;
+const pj_uint16_t PJ_SOCK_DGRAM = SOCK_DGRAM;
+const pj_uint16_t PJ_SOCK_RAW = SOCK_RAW;
+const pj_uint16_t PJ_SOCK_RDM = SOCK_RDM;
+
+/*
+ * Socket level values.
+ */
+const pj_uint16_t PJ_SOL_SOCKET = SOL_SOCKET;
+#ifdef SOL_IP
+const pj_uint16_t PJ_SOL_IP = SOL_IP;
+#else
+# error "SOL_IP undeclared!"
+#endif /* SOL_IP */
+#if defined(SOL_TCP)
+const pj_uint16_t PJ_SOL_TCP = SOL_TCP;
+#else
+# error "SOL_TCP undeclared!"
+#endif /* SOL_TCP */
+#ifdef SOL_UDP
+const pj_uint16_t PJ_SOL_UDP = SOL_UDP;
+#else
+# error "SOL_UDP undeclared!"
+#endif
+#ifdef SOL_IPV6
+const pj_uint16_t PJ_SOL_IPV6 = SOL_IPV6;
+#else
+# error "SOL_IPV6 undeclared!"
+#endif
+
+/*
+ * Convert 16-bit value from network byte order to host byte order.
+ */
+PJ_DEF(pj_uint16_t) pj_ntohs(pj_uint16_t netshort)
+{
+ return ntohs(netshort);
+}
+
+/*
+ * Convert 16-bit value from host byte order to network byte order.
+ */
+PJ_DEF(pj_uint16_t) pj_htons(pj_uint16_t hostshort)
+{
+ return htons(hostshort);
+}
+
+/*
+ * Convert 32-bit value from network byte order to host byte order.
+ */
+PJ_DEF(pj_uint32_t) pj_ntohl(pj_uint32_t netlong)
+{
+ return ntohl(netlong);
+}
+
+/*
+ * Convert 32-bit value from host byte order to network byte order.
+ */
+PJ_DEF(pj_uint32_t) pj_htonl(pj_uint32_t hostlong)
+{
+ return htonl(hostlong);
+}
+
+/*
+ * Convert an Internet host address given in network byte order
+ * to string in standard numbers and dots notation.
+ */
+PJ_DEF(char*) pj_inet_ntoa(pj_in_addr in)
+{
+#define UC(b) (((int)b)&0xff)
+ static char b[18];
+ char *p;
+
+ p = (char *)&in;
+ pj_snprintf(b, sizeof(b), "%d.%d.%d.%d",
+ UC(p[0]), UC(p[1]), UC(p[2]), UC(p[3]));
+
+ return b;
+}
+
+/*
+ * This function converts the Internet host address ccp from the standard
+ * numbers-and-dots notation into binary data and stores it in the structure
+ * that inp points to.
+ */
+PJ_DEF(int) pj_inet_aton(const pj_str_t *ccp, struct pj_in_addr *addr)
+{
+ pj_uint32_t val;
+ int base, n;
+ char c;
+ unsigned parts[4];
+ unsigned *pp = parts;
+ char cp_copy[18];
+ char *cp = cp_copy;
+
+ addr->s_addr = PJ_INADDR_NONE;
+
+ if (ccp->slen > 15) return 0;
+
+ pj_memcpy(cp, ccp->ptr, ccp->slen);
+ cp[ccp->slen] = '\0';
+
+ c = *cp;
+ for (;;) {
+ /*
+ * Collect number up to ``.''.
+ * Values are specified as for C:
+ * 0x=hex, 0=octal, isdigit=decimal.
+ */
+ if (!pj_isdigit((int)c))
+ return (0);
+ val = 0; base = 10;
+ if (c == '0') {
+ c = *++cp;
+ if (c == 'x' || c == 'X')
+ base = 16, c = *++cp;
+ else
+ base = 8;
+ }
+
+ for (;;) {
+ if (pj_isascii((int)c) && pj_isdigit((int)c)) {
+ val = (val * base) + (c - '0');
+ c = *++cp;
+ } else if (base==16 && pj_isascii((int)c) && pj_isxdigit((int)c)) {
+ val = (val << 4) |
+ (c + 10 - (pj_islower((int)c) ? 'a' : 'A'));
+ c = *++cp;
+ } else
+ break;
+ }
+
+ if (c == '.') {
+ /*
+ * Internet format:
+ * a.b.c.d
+ * a.b.c (with c treated as 16 bits)
+ * a.b (with b treated as 24 bits)
+ */
+ if (pp >= parts + 3)
+ return (0);
+ *pp++ = val;
+ c = *++cp;
+ } else
+ break;
+ }
+
+ /*
+ * Check for trailing characters.
+ */
+ if (c != '\0' && (!pj_isascii((int)c) || !pj_isspace((int)c)))
+ return (0);
+ /*
+ * Concoct the address according to
+ * the number of parts specified.
+ */
+ n = pp - parts + 1;
+ switch (n) {
+ case 0:
+ return (0); /* initial nondigit */
+ case 1: /* a -- 32 bits */
+ break;
+ case 2: /* a.b -- 8.24 bits */
+ if (val > 0xffffff)
+ return (0);
+ val |= parts[0] << 24;
+ break;
+ case 3: /* a.b.c -- 8.8.16 bits */
+ if (val > 0xffff)
+ return (0);
+ val |= (parts[0] << 24) | (parts[1] << 16);
+ break;
+ case 4: /* a.b.c.d -- 8.8.8.8 bits */
+ if (val > 0xff)
+ return (0);
+ val |= (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8);
+ break;
+ }
+
+ if (addr)
+ addr->s_addr = pj_htonl(val);
+ return (1);
+}
+
+/*
+ * Convert address string with numbers and dots to binary IP address.
+ */
+PJ_DEF(pj_in_addr) pj_inet_addr(const pj_str_t *cp)
+{
+ pj_in_addr addr;
+ pj_inet_aton(cp, &addr);
+ return addr;
+}
+
+/*
+ * Set the IP address of an IP socket address from string address,
+ * with resolving the host if necessary. The string address may be in a
+ * standard numbers and dots notation or may be a hostname. If hostname
+ * is specified, then the function will resolve the host into the IP
+ * address.
+ */
+PJ_DEF(pj_status_t) pj_sockaddr_in_set_str_addr( pj_sockaddr_in *addr,
+ const pj_str_t *str_addr)
+{
+ PJ_CHECK_STACK();
+
+ pj_assert(str_addr && str_addr->slen < PJ_MAX_HOSTNAME);
+
+ addr->sin_family = AF_INET;
+
+ if (str_addr && str_addr->slen) {
+ addr->sin_addr = pj_inet_addr(str_addr);
+ if (addr->sin_addr.s_addr == PJ_INADDR_NONE) {
+ pj_hostent he;
+ if (pj_gethostbyname(str_addr, &he) == 0) {
+ addr->sin_addr.s_addr = *(pj_uint32_t*)he.h_addr;
+ } else {
+ addr->sin_addr.s_addr = PJ_INADDR_NONE;
+ return -1;
+ }
+ }
+
+ } else {
+ addr->sin_addr.s_addr = 0;
+ }
+
+ return PJ_SUCCESS;
+}
+
+/*
+ * Set the IP address and port of an IP socket address.
+ * The string address may be in a standard numbers and dots notation or
+ * may be a hostname. If hostname is specified, then the function will
+ * resolve the host into the IP address.
+ */
+PJ_DEF(pj_status_t) pj_sockaddr_in_init( pj_sockaddr_in *addr,
+ const pj_str_t *str_addr,
+ pj_uint16_t port)
+{
+ pj_assert(addr && str_addr);
+
+ addr->sin_family = PJ_AF_INET;
+ pj_sockaddr_in_set_port(addr, port);
+ return pj_sockaddr_in_set_str_addr(addr, str_addr);
+}
+
+
+/*
+ * Get hostname.
+ */
+PJ_DEF(const pj_str_t*) pj_gethostname(void)
+{
+ static char buf[PJ_MAX_HOSTNAME];
+ static pj_str_t hostname;
+
+ PJ_CHECK_STACK();
+
+ if (hostname.ptr == NULL) {
+ hostname.ptr = buf;
+ down_read(&uts_sem);
+ hostname.slen = strlen(system_utsname.nodename);
+ if (hostname.slen > PJ_MAX_HOSTNAME) {
+ hostname.ptr[0] = '\0';
+ hostname.slen = 0;
+ } else {
+ pj_memcpy(hostname.ptr, system_utsname.nodename, hostname.slen);
+ }
+ up_read(&uts_sem);
+ }
+ return &hostname;
+}
+
+/*
+ * Get first IP address associated with the hostname.
+ */
+PJ_DEF(pj_in_addr) pj_gethostaddr(void)
+{
+ pj_sockaddr_in addr;
+ const pj_str_t *hostname = pj_gethostname();
+
+ pj_sockaddr_in_set_str_addr(&addr, hostname);
+ return addr.sin_addr;
+}
+
+
+/*
+ * Create new socket/endpoint for communication and returns a descriptor.
+ */
+PJ_DEF(pj_status_t) pj_sock_socket(int af, int type, int proto,
+ pj_sock_t *sock_fd)
+{
+ long result;
+
+ PJ_CHECK_STACK();
+
+ /* Sanity checks. */
+ PJ_ASSERT_RETURN(PJ_INVALID_SOCKET == -1 && sock_fd != NULL, PJ_EINVAL);
+
+ /* Initialize returned socket */
+ *sock_fd = PJ_INVALID_SOCKET;
+
+ /* Create socket. */
+ result = sys_socket(af, type, proto);
+ if (result < 0) {
+ return PJ_RETURN_OS_ERROR((-result));
+ }
+
+ *sock_fd = result;
+
+ return PJ_SUCCESS;
+}
+
+/*
+ * Bind socket.
+ */
+PJ_DEF(pj_status_t) pj_sock_bind( pj_sock_t sockfd,
+ const pj_sockaddr_t *addr,
+ int len)
+{
+ long err;
+ mm_segment_t oldfs;
+
+ PJ_CHECK_STACK();
+
+ PJ_ASSERT_RETURN(addr!=NULL && len >= sizeof(struct pj_sockaddr),
+ PJ_EINVAL);
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+
+ err = sys_bind(sockfd, (struct sockaddr*)addr, len);
+
+ set_fs(oldfs);
+
+ if (err)
+ return PJ_RETURN_OS_ERROR(-err);
+ else
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * Bind socket.
+ */
+PJ_DEF(pj_status_t) pj_sock_bind_in( pj_sock_t sockfd,
+ pj_uint32_t addr32,
+ pj_uint16_t port)
+{
+ pj_sockaddr_in addr;
+
+ PJ_CHECK_STACK();
+
+ addr.sin_family = PJ_AF_INET;
+ addr.sin_addr.s_addr = pj_htonl(addr32);
+ addr.sin_port = pj_htons(port);
+
+ return pj_sock_bind(sockfd, &addr, sizeof(pj_sockaddr_in));
+}
+
+/*
+ * Close socket.
+ */
+PJ_DEF(pj_status_t) pj_sock_close(pj_sock_t sockfd)
+{
+ long err;
+
+ err = sys_close(sockfd);
+
+ if (err != 0)
+ return PJ_RETURN_OS_ERROR(-err);
+ else
+ return PJ_SUCCESS;
+}
+
+/*
+ * Get remote's name.
+ */
+PJ_DEF(pj_status_t) pj_sock_getpeername( pj_sock_t sockfd,
+ pj_sockaddr_t *addr,
+ int *namelen)
+{
+ mm_segment_t oldfs;
+ long err;
+
+ PJ_CHECK_STACK();
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+
+ err = sys_getpeername( sockfd, addr, namelen);
+
+ set_fs(oldfs);
+
+ if (err)
+ return PJ_RETURN_OS_ERROR(-err);
+ else
+ return PJ_SUCCESS;
+}
+
+/*
+ * Get socket name.
+ */
+PJ_DEF(pj_status_t) pj_sock_getsockname( pj_sock_t sockfd,
+ pj_sockaddr_t *addr,
+ int *namelen)
+{
+ mm_segment_t oldfs;
+ int err;
+
+ PJ_CHECK_STACK();
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+
+ err = sys_getsockname( sockfd, addr, namelen );
+
+ set_fs(oldfs);
+
+ if (err)
+ return PJ_RETURN_OS_ERROR(-err);
+ else
+ return PJ_SUCCESS;
+}
+
+/*
+ * Send data
+ */
+PJ_DEF(pj_status_t) pj_sock_send( pj_sock_t sockfd,
+ const void *buf,
+ pj_ssize_t *len,
+ unsigned flags)
+{
+ return pj_sock_sendto(sockfd, buf, len, flags, NULL, 0);
+}
+
+
+/*
+ * Send data.
+ */
+PJ_DEF(pj_status_t) pj_sock_sendto( pj_sock_t sockfd,
+ const void *buff,
+ pj_ssize_t *len,
+ unsigned flags,
+ const pj_sockaddr_t *addr,
+ int addr_len)
+{
+ long err;
+ mm_segment_t oldfs;
+
+ PJ_CHECK_STACK();
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+
+ err = *len = sys_sendto( sockfd, (void*)buff, *len, flags,
+ (void*)addr, addr_len );
+
+ set_fs(oldfs);
+
+ if (err >= 0) {
+ return PJ_SUCCESS;
+ }
+ else {
+ return PJ_RETURN_OS_ERROR(-err);
+ }
+}
+
+/*
+ * Receive data.
+ */
+PJ_DEF(pj_status_t) pj_sock_recv( pj_sock_t sockfd,
+ void *buf,
+ pj_ssize_t *len,
+ unsigned flags)
+{
+ return pj_sock_recvfrom(sockfd, buf, len, flags, NULL, NULL);
+}
+
+/*
+ * Receive data.
+ */
+PJ_DEF(pj_status_t) pj_sock_recvfrom( pj_sock_t sockfd,
+ void *buff,
+ pj_ssize_t *size,
+ unsigned flags,
+ pj_sockaddr_t *from,
+ int *fromlen)
+{
+ mm_segment_t oldfs;
+ long err;
+
+ PJ_CHECK_STACK();
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+
+ err = *size = sys_recvfrom( sockfd, buff, *size, flags, from, fromlen);
+
+ set_fs(oldfs);
+
+ if (err >= 0) {
+ return PJ_SUCCESS;
+ }
+ else {
+ return PJ_RETURN_OS_ERROR(-err);
+ }
+}
+
+/*
+ * Get socket option.
+ */
+PJ_DEF(pj_status_t) pj_sock_getsockopt( pj_sock_t sockfd,
+ int level,
+ int optname,
+ void *optval,
+ int *optlen)
+{
+ mm_segment_t oldfs;
+ long err;
+
+ PJ_CHECK_STACK();
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+
+ err = sys_getsockopt( sockfd, level, optname, optval, optlen);
+
+ set_fs(oldfs);
+
+ if (err)
+ return PJ_RETURN_OS_ERROR(-err);
+ else
+ return PJ_SUCCESS;
+}
+
+/*
+ * Set socket option.
+ */
+PJ_DEF(pj_status_t) pj_sock_setsockopt( pj_sock_t sockfd,
+ int level,
+ int optname,
+ const void *optval,
+ int optlen)
+{
+ long err;
+ mm_segment_t oldfs;
+
+ PJ_CHECK_STACK();
+
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+
+ err = sys_setsockopt( sockfd, level, optname, (void*)optval, optlen);
+
+ set_fs(oldfs);
+
+ if (err)
+ return PJ_RETURN_OS_ERROR(-err);
+ else
+ return PJ_SUCCESS;
+}
+
+/*
+ * Shutdown socket.
+ */
+#if PJ_HAS_TCP
+PJ_DEF(pj_status_t) pj_sock_shutdown( pj_sock_t sockfd,
+ int how)
+{
+ long err;
+
+ PJ_CHECK_STACK();
+
+ err = sys_shutdown(sockfd, how);
+
+ if (err)
+ return PJ_RETURN_OS_ERROR(-err);
+ else
+ return PJ_SUCCESS;
+}
+
+/*
+ * Start listening to incoming connections.
+ */
+PJ_DEF(pj_status_t) pj_sock_listen( pj_sock_t sockfd,
+ int backlog)
+{
+ long err;
+
+ PJ_CHECK_STACK();
+
+ err = sys_listen( sockfd, backlog );
+
+ if (err)
+ return PJ_RETURN_OS_ERROR(-err);
+ else
+ return PJ_SUCCESS;
+}
+
+/*
+ * Connect socket.
+ */
+PJ_DEF(pj_status_t) pj_sock_connect( pj_sock_t sockfd,
+ const pj_sockaddr_t *addr,
+ int namelen)
+{
+ long err;
+ mm_segment_t oldfs;
+
+ PJ_CHECK_STACK();
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+
+ err = sys_connect( sockfd, (void*)addr, namelen );
+
+ set_fs(oldfs);
+
+ if (err)
+ return PJ_RETURN_OS_ERROR(-err);
+ else
+ return PJ_SUCCESS;
+}
+
+/*
+ * Accept incoming connections
+ */
+PJ_DEF(pj_status_t) pj_sock_accept( pj_sock_t sockfd,
+ pj_sock_t *newsockfd,
+ pj_sockaddr_t *addr,
+ int *addrlen)
+{
+ long err;
+
+ PJ_CHECK_STACK();
+
+ PJ_ASSERT_RETURN(newsockfd != NULL, PJ_EINVAL);
+
+ err = sys_accept( sockfd, addr, addrlen);
+
+ if (err < 0) {
+ *newsockfd = PJ_INVALID_SOCKET;
+ return PJ_RETURN_OS_ERROR(-err);
+ }
+ else {
+ *newsockfd = err;
+ return PJ_SUCCESS;
+ }
+}
+#endif /* PJ_HAS_TCP */
+
+
+
+/*
+ * Permission to steal inet_ntoa() and inet_aton() as long as this notice below
+ * is included:
+ */
+/*
+ * Copyright (c) 1983, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
diff --git a/pjlib/src/pj/sock_select.c b/pjlib/src/pj/sock_select.c
new file mode 100644
index 00000000..49fa0116
--- /dev/null
+++ b/pjlib/src/pj/sock_select.c
@@ -0,0 +1,101 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/sock_select.c 4 10/14/05 12:26a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/sock_select.c $
+ *
+ * 4 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 3 9/21/05 1:39p Bennylp
+ * Periodic checkin for backup.
+ *
+ * 2 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ * 1 9/15/05 8:40p Bennylp
+ * Created.
+ */
+#include <pj/sock_select.h>
+#include <pj/compat/socket.h>
+#include <pj/os.h>
+#include <pj/assert.h>
+#include <pj/errno.h>
+
+
+#ifdef _MSC_VER
+# pragma warning(disable: 4018) // Signed/unsigned mismatch in FD_*
+#endif
+
+#define PART_FDSET(p_fdsetp) ((fd_set*)&p_fdsetp->data[1])
+#define PART_COUNT(p_fdsetp) (p_fdsetp->data[0])
+
+PJ_DEF(void) PJ_FD_ZERO(pj_fd_set_t *fdsetp)
+{
+ PJ_CHECK_STACK();
+ pj_assert(sizeof(pj_fd_set_t)-sizeof(pj_sock_t) >= sizeof(fd_set));
+
+ FD_ZERO(PART_FDSET(fdsetp));
+ PART_COUNT(fdsetp) = 0;
+}
+
+
+PJ_DEF(void) PJ_FD_SET(pj_sock_t fd, pj_fd_set_t *fdsetp)
+{
+ PJ_CHECK_STACK();
+ pj_assert(sizeof(pj_fd_set_t)-sizeof(pj_sock_t) >= sizeof(fd_set));
+
+ if (!PJ_FD_ISSET(fd, fdsetp))
+ ++PART_COUNT(fdsetp);
+ FD_SET(fd, PART_FDSET(fdsetp));
+}
+
+
+PJ_DEF(void) PJ_FD_CLR(pj_sock_t fd, pj_fd_set_t *fdsetp)
+{
+ PJ_CHECK_STACK();
+ pj_assert(sizeof(pj_fd_set_t)-sizeof(pj_sock_t) >= sizeof(fd_set));
+
+ if (PJ_FD_ISSET(fd, fdsetp))
+ --PART_COUNT(fdsetp);
+ FD_CLR(fd, PART_FDSET(fdsetp));
+}
+
+
+PJ_DEF(pj_bool_t) PJ_FD_ISSET(pj_sock_t fd, const pj_fd_set_t *fdsetp)
+{
+ PJ_CHECK_STACK();
+ PJ_ASSERT_RETURN(sizeof(pj_fd_set_t)-sizeof(pj_sock_t) >= sizeof(fd_set),
+ 0);
+
+ return FD_ISSET(fd, PART_FDSET(fdsetp));
+}
+
+PJ_DEF(pj_size_t) PJ_FD_COUNT(const pj_fd_set_t *fdsetp)
+{
+ return PART_COUNT(fdsetp);
+}
+
+PJ_DEF(int) pj_sock_select( int n,
+ pj_fd_set_t *readfds,
+ pj_fd_set_t *writefds,
+ pj_fd_set_t *exceptfds,
+ const pj_time_val *timeout)
+{
+ struct timeval os_timeout, *p_os_timeout;
+
+ PJ_CHECK_STACK();
+
+ PJ_ASSERT_RETURN(sizeof(pj_fd_set_t)-sizeof(pj_sock_t) >= sizeof(fd_set),
+ PJ_EBUG);
+
+ if (timeout) {
+ os_timeout.tv_sec = timeout->sec;
+ os_timeout.tv_usec = timeout->msec * 1000;
+ p_os_timeout = &os_timeout;
+ } else {
+ p_os_timeout = NULL;
+ }
+
+ return select(n, PART_FDSET(readfds), PART_FDSET(writefds),
+ PART_FDSET(exceptfds), p_os_timeout);
+}
+
diff --git a/pjlib/src/pj/string.c b/pjlib/src/pj/string.c
new file mode 100644
index 00000000..c3f1b5ff
--- /dev/null
+++ b/pjlib/src/pj/string.c
@@ -0,0 +1,124 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/string.c 9 10/14/05 12:26a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/string.c $
+ *
+ * 9 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 8 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/string.h>
+#include <pj/pool.h>
+#include <pj/ctype.h>
+#include <pj/rand.h>
+#include <pj/os.h>
+
+#if PJ_FUNCTIONS_ARE_INLINED==0
+# include <pj/string_i.h>
+#endif
+
+
+static char hex[] = {'0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
+
+PJ_DEF(pj_str_t*) pj_strltrim( pj_str_t *str )
+{
+ register char *p = str->ptr;
+ while (pj_isspace(*p))
+ ++p;
+ str->slen -= (p - str->ptr);
+ str->ptr = p;
+ return str;
+}
+
+PJ_DEF(pj_str_t*) pj_strrtrim( pj_str_t *str )
+{
+ char *end = str->ptr + str->slen;
+ register char *p = end - 1;
+ while (p >= str->ptr && pj_isspace(*p))
+ --p;
+ str->slen -= ((end - p) - 1);
+ return str;
+}
+
+PJ_INLINE(void) pj_val_to_hex_digit(unsigned value, char *p)
+{
+ *p++ = hex[ (value & 0xF0) >> 4 ];
+ *p++ = hex[ (value & 0x0F) ];
+}
+
+PJ_DEF(char*) pj_create_random_string(char *str, pj_size_t len)
+{
+ unsigned i;
+ char *p = str;
+
+ PJ_CHECK_STACK();
+
+ for (i=0; i<len/8; ++i) {
+ unsigned val = pj_rand();
+ pj_val_to_hex_digit( (val & 0xFF000000) >> 24, p+0 );
+ pj_val_to_hex_digit( (val & 0x00FF0000) >> 16, p+2 );
+ pj_val_to_hex_digit( (val & 0x0000FF00) >> 8, p+4 );
+ pj_val_to_hex_digit( (val & 0x000000FF) >> 0, p+6 );
+ p += 8;
+ }
+ for (i=i * 8; i<len; ++i) {
+ *p++ = hex[ pj_rand() & 0x0F ];
+ }
+ return str;
+}
+
+
+PJ_DEF(unsigned long) pj_strtoul(const pj_str_t *str)
+{
+ unsigned long value;
+ unsigned i;
+
+ PJ_CHECK_STACK();
+
+ value = 0;
+ for (i=0; i<(unsigned)str->slen; ++i) {
+ value = value * 10 + (str->ptr[i] - '0');
+ }
+ return value;
+}
+
+PJ_DEF(int) pj_utoa(unsigned long val, char *buf)
+{
+ return pj_utoa_pad(val, buf, 0, 0);
+}
+
+PJ_DEF(int) pj_utoa_pad( unsigned long val, char *buf, int min_dig, int pad)
+{
+ char *p;
+ int len;
+
+ PJ_CHECK_STACK();
+
+ p = buf;
+ do {
+ unsigned long digval = (unsigned long) (val % 10);
+ val /= 10;
+ *p++ = (char) (digval + '0');
+ } while (val > 0);
+
+ len = p-buf;
+ while (len < min_dig) {
+ *p++ = (char)pad;
+ ++len;
+ }
+ *p-- = '\0';
+
+ do {
+ char temp = *p;
+ *p = *buf;
+ *buf = temp;
+ --p;
+ ++buf;
+ } while (buf < p);
+
+ return len;
+}
+
diff --git a/pjlib/src/pj/stun.c b/pjlib/src/pj/stun.c
new file mode 100644
index 00000000..efd43560
--- /dev/null
+++ b/pjlib/src/pj/stun.c
@@ -0,0 +1,118 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/stun.c 6 9/17/05 10:37a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/stun.c $
+ *
+ * 6 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/stun.h>
+#include <pj/pool.h>
+#include <pj/log.h>
+#include <pj/sock.h>
+#include <pj/os.h>
+
+#define THIS_FILE "stun"
+
+PJ_DEF(pj_status_t) pj_stun_create_bind_req( pj_pool_t *pool,
+ void **msg, pj_size_t *len,
+ pj_uint32_t id_hi,
+ pj_uint32_t id_lo)
+{
+ pj_stun_msg_hdr *hdr;
+
+ PJ_CHECK_STACK();
+
+ PJ_LOG(5,(THIS_FILE, "pj_stun_create_bind_req"));
+
+ hdr = pj_pool_calloc(pool, 1, sizeof(pj_stun_msg_hdr));
+ if (!hdr) {
+ PJ_LOG(5,(THIS_FILE, "Error allocating memory!"));
+ return -1;
+ }
+
+ hdr->type = pj_htons(PJ_STUN_BINDING_REQUEST);
+ hdr->tsx[2] = pj_htonl(id_hi);
+ hdr->tsx[3] = pj_htonl(id_lo);
+ *msg = hdr;
+ *len = sizeof(pj_stun_msg_hdr);
+
+ return 0;
+}
+
+PJ_DEF(pj_status_t) pj_stun_parse_msg( void *buf, pj_size_t len,
+ pj_stun_msg *msg)
+{
+ pj_uint16_t msg_type, msg_len;
+ char *p_attr;
+
+ PJ_CHECK_STACK();
+
+ PJ_LOG(5,(THIS_FILE, "pj_stun_parse_msg %p, len=%d", buf, len));
+
+ msg->hdr = (pj_stun_msg_hdr*)buf;
+ msg_type = pj_ntohs(msg->hdr->type);
+
+ switch (msg_type) {
+ case PJ_STUN_BINDING_REQUEST:
+ case PJ_STUN_BINDING_RESPONSE:
+ case PJ_STUN_BINDING_ERROR_RESPONSE:
+ case PJ_STUN_SHARED_SECRET_REQUEST:
+ case PJ_STUN_SHARED_SECRET_RESPONSE:
+ case PJ_STUN_SHARED_SECRET_ERROR_RESPONSE:
+ break;
+ default:
+ PJ_LOG(5,(THIS_FILE, "Error: unknown msg type %d", msg_type));
+ return -1;
+ }
+
+ msg_len = pj_ntohs(msg->hdr->length);
+ if (msg_len != len - sizeof(pj_stun_msg_hdr)) {
+ PJ_LOG(5,(THIS_FILE, "Error: invalid msg_len %d (expecting %d)",
+ msg_len, len - sizeof(pj_stun_msg_hdr)));
+ return -1;
+ }
+
+ msg->attr_count = 0;
+ p_attr = (char*)buf + sizeof(pj_stun_msg_hdr);
+
+ while (msg_len > 0) {
+ pj_stun_attr_hdr **attr = &msg->attr[msg->attr_count];
+ pj_uint32_t len;
+
+ *attr = (pj_stun_attr_hdr*)p_attr;
+ len = pj_ntohs((pj_uint16_t) ((*attr)->length)) + sizeof(pj_stun_attr_hdr);
+
+ if (msg_len < len) {
+ PJ_LOG(5,(THIS_FILE, "Error: length mismatch in attr %d",
+ msg->attr_count));
+ return -1;
+ }
+
+ if (pj_ntohs((*attr)->type) > PJ_STUN_ATTR_REFLECTED_FORM) {
+ PJ_LOG(5,(THIS_FILE, "Error: invalid attr type %d in attr %d",
+ pj_ntohs((*attr)->type), msg->attr_count));
+ return -1;
+ }
+
+ msg_len = (pj_uint16_t)(msg_len - len);
+ p_attr += len;
+ ++msg->attr_count;
+ }
+
+ return 0;
+}
+
+PJ_DEF(void*) pj_stun_msg_find_attr( pj_stun_msg *msg, pj_stun_attr_type t)
+{
+ int i;
+
+ PJ_CHECK_STACK();
+
+ for (i=0; i<msg->attr_count; ++i) {
+ pj_stun_attr_hdr *attr = msg->attr[i];
+ if (pj_ntohs(attr->type) == t)
+ return attr;
+ }
+
+ return 0;
+}
diff --git a/pjlib/src/pj/stun_client.c b/pjlib/src/pj/stun_client.c
new file mode 100644
index 00000000..c35a8541
--- /dev/null
+++ b/pjlib/src/pj/stun_client.c
@@ -0,0 +1,270 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/stun_client.c 6 10/14/05 12:26a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/stun_client.c $
+ *
+ * 6 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 5 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/stun.h>
+#include <pj/pool.h>
+#include <pj/log.h>
+#include <pj/string.h>
+#include <pj/os.h>
+#include <pj/sock_select.h>
+
+enum { MAX_REQUEST = 3 };
+static int stun_timer[] = {1600, 1600, 1600 };
+
+#define THIS_FILE "stunclient"
+#define LOG_ADDR(addr) pj_inet_ntoa(addr.sin_addr), pj_ntohs(addr.sin_port)
+
+
+PJ_DECL(pj_status_t) pj_stun_get_mapped_addr( pj_pool_factory *pf,
+ int sock_cnt, pj_sock_t sock[],
+ const pj_str_t *srv1, int port1,
+ const pj_str_t *srv2, int port2,
+ pj_sockaddr_in mapped_addr[])
+{
+ pj_sockaddr_in srv_addr[2];
+ int i, j, rc, send_cnt = 0;
+ pj_pool_t *pool;
+ struct {
+ struct {
+ pj_uint32_t mapped_addr;
+ pj_uint32_t mapped_port;
+ } srv[2];
+ } *rec;
+ void *out_msg;
+ pj_size_t out_msg_len;
+ int wait_resp = 0;
+ int mapped_status = 0;
+
+ PJ_CHECK_STACK();
+
+ /* Create pool. */
+ pool = pj_pool_create(pf, "stun%p", 1024, 1024, NULL);
+ if (!pool) {
+ mapped_status = PJ_STUN_ERR_MEMORY;
+ return -1;
+ }
+
+ /* Allocate client records */
+ rec = pj_pool_calloc(pool, sock_cnt, sizeof(*rec));
+ if (!rec) {
+ mapped_status = PJ_STUN_ERR_MEMORY;
+ goto on_error;
+ }
+
+ /* Create the outgoing BIND REQUEST message template */
+ rc = pj_stun_create_bind_req( pool, &out_msg, &out_msg_len, 0, 0);
+ if (rc != 0) {
+ mapped_status = -1;
+ goto on_error;
+ }
+
+ /* Resolve servers. */
+ if (pj_sockaddr_in_init(&srv_addr[0], srv1, (pj_uint16_t)port1) != 0) {
+ mapped_status = PJ_STUN_ERR_RESOLVE;
+ goto on_error;
+ }
+ if (pj_sockaddr_in_init(&srv_addr[1], srv2, (pj_uint16_t)port2) != 0) {
+ mapped_status = PJ_STUN_ERR_RESOLVE;
+ goto on_error;
+ }
+
+ /* Init mapped addresses to zero */
+ pj_memset(mapped_addr, 0, sock_cnt * sizeof(pj_sockaddr_in));
+
+ /* Main retransmission loop. */
+ for (send_cnt=0; send_cnt<MAX_REQUEST; ++send_cnt) {
+ pj_time_val next_tx, now;
+ pj_fd_set_t r;
+ int select_rc;
+
+ PJ_LOG(4,(THIS_FILE, "STUN retransmit %d, wait_resp=%d",
+ send_cnt, wait_resp));
+
+ PJ_FD_ZERO(&r);
+
+ /* Send messages to servers that has not given us response. */
+ for (i=0; i<sock_cnt && mapped_status==0; ++i) {
+ for (j=0; j<2 && mapped_status==0; ++j) {
+ pj_stun_msg_hdr *msg_hdr = out_msg;
+ pj_ssize_t sent_len;
+
+ if (rec[i].srv[j].mapped_port != 0)
+ continue;
+
+ /* Modify message so that we can distinguish response. */
+ msg_hdr->tsx[2] = pj_htonl(i);
+ msg_hdr->tsx[3] = pj_htonl(j);
+
+ /* Send! */
+ sent_len = out_msg_len;
+ rc = pj_sock_sendto(sock[i], out_msg, &sent_len, 0,
+ (pj_sockaddr_t*)&srv_addr[j],
+ sizeof(pj_sockaddr_in));
+ if (sent_len != (int)out_msg_len) {
+ PJ_LOG(4,(THIS_FILE,
+ "Error sending STUN request to %s:%d",
+ LOG_ADDR(srv_addr[j])));
+ mapped_status = PJ_STUN_ERR_TRANSPORT;
+ } else {
+ ++wait_resp;
+ }
+ }
+ }
+
+ /* All requests sent.
+ * The loop below will wait for responses until all responses have
+ * been received (i.e. wait_resp==0) or timeout occurs, which then
+ * we'll go to the next retransmission iteration.
+ */
+
+ /* Calculate time of next retransmission. */
+ pj_gettimeofday(&next_tx);
+ next_tx.sec += (stun_timer[send_cnt]/1000);
+ next_tx.msec += (stun_timer[send_cnt]%1000);
+ pj_time_val_normalize(&next_tx);
+
+ for (pj_gettimeofday(&now), select_rc=1;
+ mapped_status==0 && select_rc==1 && wait_resp>0 && PJ_TIME_VAL_LT(now, next_tx);
+ pj_gettimeofday(&now))
+ {
+ pj_time_val timeout;
+
+ timeout = next_tx;
+ PJ_TIME_VAL_SUB(timeout, now);
+
+ for (i=0; i<sock_cnt; ++i) {
+ PJ_FD_SET(sock[i], &r);
+ }
+
+ select_rc = pj_sock_select(FD_SETSIZE, &r, NULL, NULL, &timeout);
+ if (select_rc < 1)
+ continue;
+
+ for (i=0; i<sock_cnt; ++i) {
+ int sock_idx, srv_idx;
+ pj_ssize_t len;
+ pj_stun_msg msg;
+ pj_sockaddr_in addr;
+ int addrlen = sizeof(addr);
+ pj_stun_mapped_addr_attr *attr;
+ char recv_buf[128];
+
+ if (!PJ_FD_ISSET(sock[i], &r))
+ continue;
+
+ len = sizeof(recv_buf);
+ pj_sock_recvfrom( sock[i], recv_buf,
+ &len, 0,
+ (pj_sockaddr_t*)&addr,
+ &addrlen);
+
+ --wait_resp;
+
+ if (len < 1) {
+ mapped_status = PJ_STUN_ERR_TRANSPORT;
+ continue;
+ }
+
+ if (pj_stun_parse_msg(recv_buf, len, &msg) != 0) {
+ PJ_LOG(4,(THIS_FILE,
+ "Error parsing STUN response from %s:%d",
+ LOG_ADDR(addr)));
+ mapped_status = PJ_STUN_ERR_INVALID_MSG;
+ continue;
+ }
+
+ sock_idx = pj_ntohl(msg.hdr->tsx[2]);
+ srv_idx = pj_ntohl(msg.hdr->tsx[3]);
+
+ if (sock_idx<0 || sock_idx>=sock_cnt || srv_idx<0 || srv_idx>=2) {
+ PJ_LOG(4,(THIS_FILE,
+ "Invalid transaction ID from %s:%d",
+ LOG_ADDR(addr)));
+ mapped_status = PJ_STUN_ERR_INVALID_MSG;
+ continue;
+ }
+
+ if (pj_ntohs(msg.hdr->type) != PJ_STUN_BINDING_RESPONSE) {
+ PJ_LOG(4,(THIS_FILE,
+ "Non binding response %d from %s:%d",
+ pj_ntohs(msg.hdr->type), LOG_ADDR(addr)));
+ mapped_status = PJ_STUN_ERR_INVALID_MSG;
+ continue;
+ }
+
+ if (pj_stun_msg_find_attr(&msg, PJ_STUN_ATTR_ERROR_CODE) != NULL) {
+ PJ_LOG(4,(THIS_FILE,
+ "Got STUN error attribute from %s:%d",
+ LOG_ADDR(addr)));
+ mapped_status = PJ_STUN_ERR_INVALID_MSG;
+ continue;
+ }
+
+ attr = (void*)pj_stun_msg_find_attr(&msg, PJ_STUN_ATTR_MAPPED_ADDR);
+ if (!attr) {
+ PJ_LOG(4,(THIS_FILE,
+ "No mapped address in response from %s:%d",
+ LOG_ADDR(addr)));
+ mapped_status = PJ_STUN_ERR_INVALID_MSG;
+ continue;
+ }
+
+ rec[sock_idx].srv[srv_idx].mapped_addr = attr->addr;
+ rec[sock_idx].srv[srv_idx].mapped_port = attr->port;
+ }
+ }
+
+ /* The best scenario is if all requests have been replied.
+ * Then we don't need to go to the next retransmission iteration.
+ */
+ if (wait_resp <= 0)
+ break;
+ }
+
+ for (i=0; i<sock_cnt && mapped_status==0; ++i) {
+ if (rec[i].srv[0].mapped_addr == rec[i].srv[1].mapped_addr &&
+ rec[i].srv[0].mapped_port == rec[i].srv[1].mapped_port)
+ {
+ mapped_addr[i].sin_family = PJ_AF_INET;
+ mapped_addr[i].sin_addr.s_addr = rec[i].srv[0].mapped_addr;
+ mapped_addr[i].sin_port = (pj_uint16_t)rec[i].srv[0].mapped_port;
+
+ if (rec[i].srv[0].mapped_addr == 0 || rec[i].srv[0].mapped_port == 0) {
+ mapped_status = PJ_STUN_ERR_NO_RESPONSE;
+ }
+ } else {
+ mapped_status = PJ_STUN_ERR_SYMETRIC;
+ }
+ }
+
+ pj_pool_release(pool);
+
+ return mapped_status;
+
+on_error:
+ if (pool) pj_pool_release(pool);
+ return -1;
+}
+
+PJ_DEF(const char*) pj_stun_get_err_msg(pj_status_t status)
+{
+ switch (status) {
+ case 0: return "No error";
+ case -1: return "General error";
+ case PJ_STUN_ERR_MEMORY: return "Memory allocation failed";
+ case PJ_STUN_ERR_RESOLVE: return "Invalid IP or unable to resolve STUN server";
+ case PJ_STUN_ERR_TRANSPORT: return "Unable to contact STUN server";
+ case PJ_STUN_ERR_INVALID_MSG: return "Invalid response from STUN server";
+ case PJ_STUN_ERR_NO_RESPONSE: return "No response from STUN server";
+ case PJ_STUN_ERR_SYMETRIC: return "Different mappings are returned from servers";
+ }
+ return "Unknown error";
+}
diff --git a/pjlib/src/pj/symbols.c b/pjlib/src/pj/symbols.c
new file mode 100644
index 00000000..8c22ad8a
--- /dev/null
+++ b/pjlib/src/pj/symbols.c
@@ -0,0 +1,404 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/symbols.c 3 10/29/05 11:51a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pj/symbols.c $
+ *
+ * 3 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/05/05 4:43p Bennylp
+ * Created.
+ *
+ */
+#include <pjlib.h>
+
+/*
+ * addr_resolv.h
+ */
+PJ_EXPORT_SYMBOL(pj_gethostbyname)
+
+/*
+ * array.h
+ */
+PJ_EXPORT_SYMBOL(pj_array_insert)
+PJ_EXPORT_SYMBOL(pj_array_erase)
+PJ_EXPORT_SYMBOL(pj_array_find)
+
+/*
+ * config.h
+ */
+PJ_EXPORT_SYMBOL(pj_dump_config)
+
+/*
+ * errno.h
+ */
+PJ_EXPORT_SYMBOL(pj_get_os_error)
+PJ_EXPORT_SYMBOL(pj_set_os_error)
+PJ_EXPORT_SYMBOL(pj_get_netos_error)
+PJ_EXPORT_SYMBOL(pj_set_netos_error)
+PJ_EXPORT_SYMBOL(pj_strerror)
+
+/*
+ * except.h
+ */
+PJ_EXPORT_SYMBOL(pj_throw_exception_)
+PJ_EXPORT_SYMBOL(pj_push_exception_handler_)
+PJ_EXPORT_SYMBOL(pj_pop_exception_handler_)
+PJ_EXPORT_SYMBOL(pj_setjmp)
+PJ_EXPORT_SYMBOL(pj_longjmp)
+PJ_EXPORT_SYMBOL(pj_exception_id_alloc)
+PJ_EXPORT_SYMBOL(pj_exception_id_free)
+PJ_EXPORT_SYMBOL(pj_exception_id_name)
+
+
+/*
+ * fifobuf.h
+ */
+PJ_EXPORT_SYMBOL(pj_fifobuf_init)
+PJ_EXPORT_SYMBOL(pj_fifobuf_max_size)
+PJ_EXPORT_SYMBOL(pj_fifobuf_alloc)
+PJ_EXPORT_SYMBOL(pj_fifobuf_unalloc)
+PJ_EXPORT_SYMBOL(pj_fifobuf_free)
+
+/*
+ * guid.h
+ */
+PJ_EXPORT_SYMBOL(pj_generate_unique_string)
+PJ_EXPORT_SYMBOL(pj_create_unique_string)
+
+/*
+ * hash.h
+ */
+PJ_EXPORT_SYMBOL(pj_hash_calc)
+PJ_EXPORT_SYMBOL(pj_hash_create)
+PJ_EXPORT_SYMBOL(pj_hash_get)
+PJ_EXPORT_SYMBOL(pj_hash_set)
+PJ_EXPORT_SYMBOL(pj_hash_count)
+PJ_EXPORT_SYMBOL(pj_hash_first)
+PJ_EXPORT_SYMBOL(pj_hash_next)
+PJ_EXPORT_SYMBOL(pj_hash_this)
+
+/*
+ * ioqueue.h
+ */
+PJ_EXPORT_SYMBOL(pj_ioqueue_create)
+PJ_EXPORT_SYMBOL(pj_ioqueue_destroy)
+PJ_EXPORT_SYMBOL(pj_ioqueue_set_lock)
+PJ_EXPORT_SYMBOL(pj_ioqueue_register_sock)
+PJ_EXPORT_SYMBOL(pj_ioqueue_unregister)
+PJ_EXPORT_SYMBOL(pj_ioqueue_get_user_data)
+PJ_EXPORT_SYMBOL(pj_ioqueue_poll)
+PJ_EXPORT_SYMBOL(pj_ioqueue_read)
+PJ_EXPORT_SYMBOL(pj_ioqueue_recv)
+PJ_EXPORT_SYMBOL(pj_ioqueue_recvfrom)
+PJ_EXPORT_SYMBOL(pj_ioqueue_write)
+PJ_EXPORT_SYMBOL(pj_ioqueue_send)
+PJ_EXPORT_SYMBOL(pj_ioqueue_sendto)
+#if defined(PJ_HAS_TCP) && PJ_HAS_TCP != 0
+PJ_EXPORT_SYMBOL(pj_ioqueue_accept)
+PJ_EXPORT_SYMBOL(pj_ioqueue_connect)
+#endif
+
+/*
+ * list.h
+ */
+PJ_EXPORT_SYMBOL(pj_list_insert_before)
+PJ_EXPORT_SYMBOL(pj_list_insert_nodes_before)
+PJ_EXPORT_SYMBOL(pj_list_insert_after)
+PJ_EXPORT_SYMBOL(pj_list_insert_nodes_after)
+PJ_EXPORT_SYMBOL(pj_list_merge_first)
+PJ_EXPORT_SYMBOL(pj_list_merge_last)
+PJ_EXPORT_SYMBOL(pj_list_erase)
+PJ_EXPORT_SYMBOL(pj_list_find_node)
+PJ_EXPORT_SYMBOL(pj_list_search)
+
+
+/*
+ * log.h
+ */
+PJ_EXPORT_SYMBOL(pj_log_write)
+#if PJ_LOG_MAX_LEVEL >= 1
+PJ_EXPORT_SYMBOL(pj_log_set_log_func)
+PJ_EXPORT_SYMBOL(pj_log_get_log_func)
+PJ_EXPORT_SYMBOL(pj_log_set_level)
+PJ_EXPORT_SYMBOL(pj_log_get_level)
+PJ_EXPORT_SYMBOL(pj_log_set_decor)
+PJ_EXPORT_SYMBOL(pj_log_get_decor)
+PJ_EXPORT_SYMBOL(pj_log_1)
+#endif
+#if PJ_LOG_MAX_LEVEL >= 2
+PJ_EXPORT_SYMBOL(pj_log_2)
+#endif
+#if PJ_LOG_MAX_LEVEL >= 3
+PJ_EXPORT_SYMBOL(pj_log_3)
+#endif
+#if PJ_LOG_MAX_LEVEL >= 4
+PJ_EXPORT_SYMBOL(pj_log_4)
+#endif
+#if PJ_LOG_MAX_LEVEL >= 5
+PJ_EXPORT_SYMBOL(pj_log_5)
+#endif
+#if PJ_LOG_MAX_LEVEL >= 6
+PJ_EXPORT_SYMBOL(pj_log_6)
+#endif
+
+/*
+ * md5.h
+ */
+PJ_EXPORT_SYMBOL(md5_init)
+PJ_EXPORT_SYMBOL(md5_append)
+PJ_EXPORT_SYMBOL(md5_finish)
+
+
+/*
+ * os.h
+ */
+PJ_EXPORT_SYMBOL(pj_init)
+PJ_EXPORT_SYMBOL(pj_getpid)
+PJ_EXPORT_SYMBOL(pj_thread_register)
+PJ_EXPORT_SYMBOL(pj_thread_create)
+PJ_EXPORT_SYMBOL(pj_thread_get_name)
+PJ_EXPORT_SYMBOL(pj_thread_resume)
+PJ_EXPORT_SYMBOL(pj_thread_this)
+PJ_EXPORT_SYMBOL(pj_thread_join)
+PJ_EXPORT_SYMBOL(pj_thread_destroy)
+PJ_EXPORT_SYMBOL(pj_thread_sleep)
+#if defined(PJ_OS_HAS_CHECK_STACK) && PJ_OS_HAS_CHECK_STACK != 0
+PJ_EXPORT_SYMBOL(pj_thread_check_stack)
+PJ_EXPORT_SYMBOL(pj_thread_get_stack_max_usage)
+PJ_EXPORT_SYMBOL(pj_thread_get_stack_info)
+#endif
+PJ_EXPORT_SYMBOL(pj_atomic_create)
+PJ_EXPORT_SYMBOL(pj_atomic_destroy)
+PJ_EXPORT_SYMBOL(pj_atomic_set)
+PJ_EXPORT_SYMBOL(pj_atomic_get)
+PJ_EXPORT_SYMBOL(pj_atomic_inc)
+PJ_EXPORT_SYMBOL(pj_atomic_dec)
+PJ_EXPORT_SYMBOL(pj_thread_local_alloc)
+PJ_EXPORT_SYMBOL(pj_thread_local_free)
+PJ_EXPORT_SYMBOL(pj_thread_local_set)
+PJ_EXPORT_SYMBOL(pj_thread_local_get)
+PJ_EXPORT_SYMBOL(pj_enter_critical_section)
+PJ_EXPORT_SYMBOL(pj_leave_critical_section)
+PJ_EXPORT_SYMBOL(pj_mutex_create)
+PJ_EXPORT_SYMBOL(pj_mutex_lock)
+PJ_EXPORT_SYMBOL(pj_mutex_unlock)
+PJ_EXPORT_SYMBOL(pj_mutex_trylock)
+PJ_EXPORT_SYMBOL(pj_mutex_destroy)
+#if defined(PJ_DEBUG) && PJ_DEBUG != 0
+PJ_EXPORT_SYMBOL(pj_mutex_is_locked)
+#endif
+#if defined(PJ_HAS_SEMAPHORE) && PJ_HAS_SEMAPHORE != 0
+PJ_EXPORT_SYMBOL(pj_sem_create)
+PJ_EXPORT_SYMBOL(pj_sem_wait)
+PJ_EXPORT_SYMBOL(pj_sem_trywait)
+PJ_EXPORT_SYMBOL(pj_sem_post)
+PJ_EXPORT_SYMBOL(pj_sem_destroy)
+#endif
+PJ_EXPORT_SYMBOL(pj_gettimeofday)
+PJ_EXPORT_SYMBOL(pj_time_decode)
+#if defined(PJ_HAS_HIGH_RES_TIMER) && PJ_HAS_HIGH_RES_TIMER != 0
+PJ_EXPORT_SYMBOL(pj_get_timestamp)
+PJ_EXPORT_SYMBOL(pj_get_timestamp_freq)
+PJ_EXPORT_SYMBOL(pj_elapsed_time)
+PJ_EXPORT_SYMBOL(pj_elapsed_usec)
+PJ_EXPORT_SYMBOL(pj_elapsed_nanosec)
+PJ_EXPORT_SYMBOL(pj_elapsed_cycle)
+#endif
+
+
+/*
+ * pool.h
+ */
+PJ_EXPORT_SYMBOL(pj_pool_create)
+PJ_EXPORT_SYMBOL(pj_pool_release)
+PJ_EXPORT_SYMBOL(pj_pool_getobjname)
+PJ_EXPORT_SYMBOL(pj_pool_reset)
+PJ_EXPORT_SYMBOL(pj_pool_get_capacity)
+PJ_EXPORT_SYMBOL(pj_pool_get_used_size)
+PJ_EXPORT_SYMBOL(pj_pool_alloc)
+PJ_EXPORT_SYMBOL(pj_pool_calloc)
+PJ_EXPORT_SYMBOL(pj_pool_factory_default_policy)
+PJ_EXPORT_SYMBOL(pj_pool_create_int)
+PJ_EXPORT_SYMBOL(pj_pool_init_int)
+PJ_EXPORT_SYMBOL(pj_pool_destroy_int)
+PJ_EXPORT_SYMBOL(pj_caching_pool_init)
+PJ_EXPORT_SYMBOL(pj_caching_pool_destroy)
+
+/*
+ * rand.h
+ */
+PJ_EXPORT_SYMBOL(pj_rand)
+PJ_EXPORT_SYMBOL(pj_srand)
+
+/*
+ * rbtree.h
+ */
+PJ_EXPORT_SYMBOL(pj_rbtree_init)
+PJ_EXPORT_SYMBOL(pj_rbtree_first)
+PJ_EXPORT_SYMBOL(pj_rbtree_last)
+PJ_EXPORT_SYMBOL(pj_rbtree_next)
+PJ_EXPORT_SYMBOL(pj_rbtree_prev)
+PJ_EXPORT_SYMBOL(pj_rbtree_insert)
+PJ_EXPORT_SYMBOL(pj_rbtree_find)
+PJ_EXPORT_SYMBOL(pj_rbtree_erase)
+PJ_EXPORT_SYMBOL(pj_rbtree_max_height)
+PJ_EXPORT_SYMBOL(pj_rbtree_min_height)
+
+/*
+ * scanner.h
+ */
+PJ_EXPORT_SYMBOL(pj_cs_init)
+PJ_EXPORT_SYMBOL(pj_cs_set)
+PJ_EXPORT_SYMBOL(pj_cs_add_range)
+PJ_EXPORT_SYMBOL(pj_cs_add_alpha)
+PJ_EXPORT_SYMBOL(pj_cs_add_num)
+PJ_EXPORT_SYMBOL(pj_cs_add_str)
+PJ_EXPORT_SYMBOL(pj_cs_del_range)
+PJ_EXPORT_SYMBOL(pj_cs_del_str)
+PJ_EXPORT_SYMBOL(pj_cs_invert)
+PJ_EXPORT_SYMBOL(pj_scan_init)
+PJ_EXPORT_SYMBOL(pj_scan_fini)
+PJ_EXPORT_SYMBOL(pj_scan_peek)
+PJ_EXPORT_SYMBOL(pj_scan_peek_n)
+PJ_EXPORT_SYMBOL(pj_scan_peek_until)
+PJ_EXPORT_SYMBOL(pj_scan_get)
+PJ_EXPORT_SYMBOL(pj_scan_get_quote)
+PJ_EXPORT_SYMBOL(pj_scan_get_n)
+PJ_EXPORT_SYMBOL(pj_scan_get_char)
+PJ_EXPORT_SYMBOL(pj_scan_get_newline)
+PJ_EXPORT_SYMBOL(pj_scan_get_until)
+PJ_EXPORT_SYMBOL(pj_scan_get_until_ch)
+PJ_EXPORT_SYMBOL(pj_scan_get_until_chr)
+PJ_EXPORT_SYMBOL(pj_scan_advance_n)
+PJ_EXPORT_SYMBOL(pj_scan_strcmp)
+PJ_EXPORT_SYMBOL(pj_scan_stricmp)
+PJ_EXPORT_SYMBOL(pj_scan_skip_whitespace)
+PJ_EXPORT_SYMBOL(pj_scan_save_state)
+PJ_EXPORT_SYMBOL(pj_scan_restore_state)
+
+/*
+ * sock.h
+ */
+PJ_EXPORT_SYMBOL(PJ_AF_UNIX)
+PJ_EXPORT_SYMBOL(PJ_AF_INET)
+PJ_EXPORT_SYMBOL(PJ_AF_INET6)
+PJ_EXPORT_SYMBOL(PJ_AF_PACKET)
+PJ_EXPORT_SYMBOL(PJ_AF_IRDA)
+PJ_EXPORT_SYMBOL(PJ_SOCK_STREAM)
+PJ_EXPORT_SYMBOL(PJ_SOCK_DGRAM)
+PJ_EXPORT_SYMBOL(PJ_SOCK_RAW)
+PJ_EXPORT_SYMBOL(PJ_SOCK_RDM)
+PJ_EXPORT_SYMBOL(PJ_SOL_SOCKET)
+PJ_EXPORT_SYMBOL(PJ_SOL_IP)
+PJ_EXPORT_SYMBOL(PJ_SOL_TCP)
+PJ_EXPORT_SYMBOL(PJ_SOL_UDP)
+PJ_EXPORT_SYMBOL(PJ_SOL_IPV6)
+PJ_EXPORT_SYMBOL(pj_ntohs)
+PJ_EXPORT_SYMBOL(pj_htons)
+PJ_EXPORT_SYMBOL(pj_ntohl)
+PJ_EXPORT_SYMBOL(pj_htonl)
+PJ_EXPORT_SYMBOL(pj_inet_ntoa)
+PJ_EXPORT_SYMBOL(pj_inet_aton)
+PJ_EXPORT_SYMBOL(pj_inet_addr)
+PJ_EXPORT_SYMBOL(pj_sockaddr_in_set_str_addr)
+PJ_EXPORT_SYMBOL(pj_sockaddr_in_init)
+PJ_EXPORT_SYMBOL(pj_gethostname)
+PJ_EXPORT_SYMBOL(pj_gethostaddr)
+PJ_EXPORT_SYMBOL(pj_sock_socket)
+PJ_EXPORT_SYMBOL(pj_sock_close)
+PJ_EXPORT_SYMBOL(pj_sock_bind)
+PJ_EXPORT_SYMBOL(pj_sock_bind_in)
+#if defined(PJ_HAS_TCP) && PJ_HAS_TCP != 0
+PJ_EXPORT_SYMBOL(pj_sock_listen)
+PJ_EXPORT_SYMBOL(pj_sock_accept)
+PJ_EXPORT_SYMBOL(pj_sock_shutdown)
+#endif
+PJ_EXPORT_SYMBOL(pj_sock_connect)
+PJ_EXPORT_SYMBOL(pj_sock_getpeername)
+PJ_EXPORT_SYMBOL(pj_sock_getsockname)
+PJ_EXPORT_SYMBOL(pj_sock_getsockopt)
+PJ_EXPORT_SYMBOL(pj_sock_setsockopt)
+PJ_EXPORT_SYMBOL(pj_sock_recv)
+PJ_EXPORT_SYMBOL(pj_sock_recvfrom)
+PJ_EXPORT_SYMBOL(pj_sock_send)
+PJ_EXPORT_SYMBOL(pj_sock_sendto)
+
+/*
+ * sock_select.h
+ */
+PJ_EXPORT_SYMBOL(PJ_FD_ZERO)
+PJ_EXPORT_SYMBOL(PJ_FD_SET)
+PJ_EXPORT_SYMBOL(PJ_FD_CLR)
+PJ_EXPORT_SYMBOL(PJ_FD_ISSET)
+PJ_EXPORT_SYMBOL(pj_sock_select)
+
+/*
+ * string.h
+ */
+PJ_EXPORT_SYMBOL(pj_str)
+PJ_EXPORT_SYMBOL(pj_strassign)
+PJ_EXPORT_SYMBOL(pj_strcpy)
+PJ_EXPORT_SYMBOL(pj_strcpy2)
+PJ_EXPORT_SYMBOL(pj_strdup)
+PJ_EXPORT_SYMBOL(pj_strdup_with_null)
+PJ_EXPORT_SYMBOL(pj_strdup2)
+PJ_EXPORT_SYMBOL(pj_strdup3)
+PJ_EXPORT_SYMBOL(pj_strcmp)
+PJ_EXPORT_SYMBOL(pj_strcmp2)
+PJ_EXPORT_SYMBOL(pj_strncmp)
+PJ_EXPORT_SYMBOL(pj_strncmp2)
+PJ_EXPORT_SYMBOL(pj_stricmp)
+PJ_EXPORT_SYMBOL(pj_stricmp2)
+PJ_EXPORT_SYMBOL(pj_strnicmp)
+PJ_EXPORT_SYMBOL(pj_strnicmp2)
+PJ_EXPORT_SYMBOL(pj_strcat)
+PJ_EXPORT_SYMBOL(pj_strltrim)
+PJ_EXPORT_SYMBOL(pj_strrtrim)
+PJ_EXPORT_SYMBOL(pj_strtrim)
+PJ_EXPORT_SYMBOL(pj_create_random_string)
+PJ_EXPORT_SYMBOL(pj_strtoul)
+PJ_EXPORT_SYMBOL(pj_utoa)
+PJ_EXPORT_SYMBOL(pj_utoa_pad)
+
+/*
+ * stun.h
+ */
+PJ_EXPORT_SYMBOL(pj_stun_create_bind_req)
+PJ_EXPORT_SYMBOL(pj_stun_parse_msg)
+PJ_EXPORT_SYMBOL(pj_stun_msg_find_attr)
+PJ_EXPORT_SYMBOL(pj_stun_get_mapped_addr)
+PJ_EXPORT_SYMBOL(pj_stun_get_err_msg)
+
+/*
+ * timer.h
+ */
+PJ_EXPORT_SYMBOL(pj_timer_heap_mem_size)
+PJ_EXPORT_SYMBOL(pj_timer_heap_create)
+PJ_EXPORT_SYMBOL(pj_timer_entry_init)
+PJ_EXPORT_SYMBOL(pj_timer_heap_schedule)
+PJ_EXPORT_SYMBOL(pj_timer_heap_cancel)
+PJ_EXPORT_SYMBOL(pj_timer_heap_count)
+PJ_EXPORT_SYMBOL(pj_timer_heap_earliest_time)
+PJ_EXPORT_SYMBOL(pj_timer_heap_poll)
+
+/*
+ * types.h
+ */
+PJ_EXPORT_SYMBOL(pj_time_val_normalize)
+
+/*
+ * xml.h
+ */
+PJ_EXPORT_SYMBOL(pj_xml_parse)
+PJ_EXPORT_SYMBOL(pj_xml_print)
+PJ_EXPORT_SYMBOL(pj_xml_add_node)
+PJ_EXPORT_SYMBOL(pj_xml_add_attr)
+PJ_EXPORT_SYMBOL(pj_xml_find_node)
+PJ_EXPORT_SYMBOL(pj_xml_find_next_node)
+PJ_EXPORT_SYMBOL(pj_xml_find_attr)
+PJ_EXPORT_SYMBOL(pj_xml_find)
+
diff --git a/pjlib/src/pj/timer.c b/pjlib/src/pj/timer.c
new file mode 100644
index 00000000..5cd09ea4
--- /dev/null
+++ b/pjlib/src/pj/timer.c
@@ -0,0 +1,504 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/timer.c 8 10/14/05 12:26a Bennylp $ */
+/* (C)1993-2003 Douglas C. Schmidt
+ *
+ * This file is originaly from ACE library by Doug Schmidt
+ * ACE(TM), TAO(TM) and CIAO(TM) are copyrighted by Douglas C. Schmidt and his research
+ * group at Washington University, University of California, Irvine, and Vanderbilt
+ * University Copyright (c) 1993-2003, all rights reserved.
+ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/timer.c $
+ *
+ * 8 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 7 9/21/05 1:39p Bennylp
+ * Periodic checkin for backup.
+ *
+ * 6 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/timer.h>
+#include <pj/pool.h>
+#include <pj/os.h>
+#include <pj/string.h>
+#include <pj/assert.h>
+#include <pj/errno.h>
+
+#define HEAP_PARENT(X) (X == 0 ? 0 : (((X) - 1) / 2))
+#define HEAP_LEFT(X) (((X)+(X))+1)
+
+
+/**
+ * The implementation of timer heap.
+ */
+struct pj_timer_heap_t
+{
+ /** Pool from which the timer heap resize will get the storage from */
+ pj_pool_t *pool;
+
+ /** Maximum size of the heap. */
+ pj_size_t max_size;
+
+ /** Current size of the heap. */
+ pj_size_t cur_size;
+
+ /** Mutex for synchronization, or NULL */
+ pj_mutex_t *mutex;
+
+ /**
+ * Current contents of the Heap, which is organized as a "heap" of
+ * pj_timer_entry *'s. In this context, a heap is a "partially
+ * ordered, almost complete" binary tree, which is stored in an
+ * array.
+ */
+ pj_timer_entry **heap;
+
+ /**
+ * An array of "pointers" that allows each pj_timer_entry in the
+ * <heap_> to be located in O(1) time. Basically, <timer_id_[i]>
+ * contains the slot in the <heap_> array where an pj_timer_entry
+ * with timer id <i> resides. Thus, the timer id passed back from
+ * <schedule_entry> is really an slot into the <timer_ids> array. The
+ * <timer_ids_> array serves two purposes: negative values are
+ * treated as "pointers" for the <freelist_>, whereas positive
+ * values are treated as "pointers" into the <heap_> array.
+ */
+ pj_timer_id_t *timer_ids;
+
+ /**
+ * "Pointer" to the first element in the freelist contained within
+ * the <timer_ids_> array, which is organized as a stack.
+ */
+ pj_timer_id_t timer_ids_freelist;
+
+ /** Callback to be called when a timer expires. */
+ pj_timer_heap_callback *callback;
+
+};
+
+
+
+PJ_INLINE(void) lock_timer_heap( pj_timer_heap_t *ht )
+{
+ if (ht->mutex) {
+ pj_mutex_lock(ht->mutex);
+ }
+}
+
+PJ_INLINE(void) unlock_timer_heap( pj_timer_heap_t *ht )
+{
+ if (ht->mutex) {
+ pj_mutex_unlock(ht->mutex);
+ }
+}
+
+
+static void copy_node( pj_timer_heap_t *ht, int slot, pj_timer_entry *moved_node )
+{
+ PJ_CHECK_STACK();
+
+ // Insert <moved_node> into its new location in the heap.
+ ht->heap[slot] = moved_node;
+
+ // Update the corresponding slot in the parallel <timer_ids_> array.
+ ht->timer_ids[moved_node->_timer_id] = slot;
+}
+
+static pj_timer_id_t pop_freelist( pj_timer_heap_t *ht )
+{
+ // We need to truncate this to <int> for backwards compatibility.
+ pj_timer_id_t new_id = ht->timer_ids_freelist;
+
+ PJ_CHECK_STACK();
+
+ // The freelist values in the <timer_ids_> are negative, so we need
+ // to negate them to get the next freelist "pointer."
+ ht->timer_ids_freelist =
+ -ht->timer_ids[ht->timer_ids_freelist];
+
+ return new_id;
+
+}
+
+static void push_freelist (pj_timer_heap_t *ht, pj_timer_id_t old_id)
+{
+ PJ_CHECK_STACK();
+
+ // The freelist values in the <timer_ids_> are negative, so we need
+ // to negate them to get the next freelist "pointer."
+ ht->timer_ids[old_id] = -ht->timer_ids_freelist;
+ ht->timer_ids_freelist = old_id;
+}
+
+
+static void reheap_down(pj_timer_heap_t *ht, pj_timer_entry *moved_node,
+ size_t slot, size_t child)
+{
+ PJ_CHECK_STACK();
+
+ // Restore the heap property after a deletion.
+
+ while (child < ht->cur_size)
+ {
+ // Choose the smaller of the two children.
+ if (child + 1 < ht->cur_size
+ && PJ_TIME_VAL_LT(ht->heap[child + 1]->_timer_value, ht->heap[child]->_timer_value))
+ child++;
+
+ // Perform a <copy> if the child has a larger timeout value than
+ // the <moved_node>.
+ if (PJ_TIME_VAL_LT(ht->heap[child]->_timer_value, moved_node->_timer_value))
+ {
+ copy_node( ht, slot, ht->heap[child]);
+ slot = child;
+ child = HEAP_LEFT(child);
+ }
+ else
+ // We've found our location in the heap.
+ break;
+ }
+
+ copy_node( ht, slot, moved_node);
+}
+
+static void reheap_up( pj_timer_heap_t *ht, pj_timer_entry *moved_node,
+ size_t slot, size_t parent)
+{
+ // Restore the heap property after an insertion.
+
+ while (slot > 0)
+ {
+ // If the parent node is greater than the <moved_node> we need
+ // to copy it down.
+ if (PJ_TIME_VAL_LT(moved_node->_timer_value, ht->heap[parent]->_timer_value))
+ {
+ copy_node(ht, slot, ht->heap[parent]);
+ slot = parent;
+ parent = HEAP_PARENT(slot);
+ }
+ else
+ break;
+ }
+
+ // Insert the new node into its proper resting place in the heap and
+ // update the corresponding slot in the parallel <timer_ids> array.
+ copy_node(ht, slot, moved_node);
+}
+
+
+static pj_timer_entry * remove_node( pj_timer_heap_t *ht, size_t slot)
+{
+ pj_timer_entry *removed_node = ht->heap[slot];
+
+ // Return this timer id to the freelist.
+ push_freelist( ht, removed_node->_timer_id );
+
+ // Decrement the size of the heap by one since we're removing the
+ // "slot"th node.
+ ht->cur_size--;
+
+ // Set the ID
+ removed_node->_timer_id = -1;
+
+ // Only try to reheapify if we're not deleting the last entry.
+
+ if (slot < ht->cur_size)
+ {
+ int parent;
+ pj_timer_entry *moved_node = ht->heap[ht->cur_size];
+
+ // Move the end node to the location being removed and update
+ // the corresponding slot in the parallel <timer_ids> array.
+ copy_node( ht, slot, moved_node);
+
+ // If the <moved_node->time_value_> is great than or equal its
+ // parent it needs be moved down the heap.
+ parent = HEAP_PARENT (slot);
+
+ if (PJ_TIME_VAL_GTE(moved_node->_timer_value, ht->heap[parent]->_timer_value))
+ reheap_down( ht, moved_node, slot, HEAP_LEFT(slot));
+ else
+ reheap_up( ht, moved_node, slot, parent);
+ }
+
+ return removed_node;
+}
+
+static void grow_heap(pj_timer_heap_t *ht)
+{
+ // All the containers will double in size from max_size_
+ size_t new_size = ht->max_size * 2;
+ pj_timer_id_t *new_timer_ids;
+ pj_size_t i;
+
+ // First grow the heap itself.
+
+ pj_timer_entry **new_heap = 0;
+
+ new_heap = pj_pool_alloc(ht->pool, sizeof(pj_timer_entry*) * new_size);
+ memcpy(new_heap, ht->heap, ht->max_size * sizeof(pj_timer_entry*));
+ //delete [] this->heap_;
+ ht->heap = new_heap;
+
+ // Grow the array of timer ids.
+
+ new_timer_ids = 0;
+ new_timer_ids = pj_pool_alloc(ht->pool, new_size * sizeof(pj_timer_id_t));
+
+ memcpy( new_timer_ids, ht->timer_ids, ht->max_size * sizeof(pj_timer_id_t));
+
+ //delete [] timer_ids_;
+ ht->timer_ids = new_timer_ids;
+
+ // And add the new elements to the end of the "freelist".
+ for (i = ht->max_size; i < new_size; i++)
+ ht->timer_ids[i] = -((pj_timer_id_t) (i + 1));
+
+ ht->max_size = new_size;
+}
+
+static void insert_node(pj_timer_heap_t *ht, pj_timer_entry *new_node)
+{
+ if (ht->cur_size + 2 >= ht->max_size)
+ grow_heap(ht);
+
+ reheap_up( ht, new_node, ht->cur_size, HEAP_PARENT(ht->cur_size));
+ ht->cur_size++;
+}
+
+
+static pj_status_t schedule_entry( pj_timer_heap_t *ht,
+ pj_timer_entry *entry,
+ const pj_time_val *future_time )
+{
+ if (ht->cur_size < ht->max_size)
+ {
+ // Obtain the next unique sequence number.
+ // Set the entry
+ entry->_timer_id = pop_freelist(ht);
+ entry->_timer_value = *future_time;
+ insert_node( ht, entry);
+ return 0;
+ }
+ else
+ return -1;
+}
+
+
+static int cancel( pj_timer_heap_t *ht,
+ pj_timer_entry *entry,
+ int dont_call)
+{
+ long timer_node_slot;
+
+ PJ_CHECK_STACK();
+
+ // Check to see if the timer_id is out of range
+ if (entry->_timer_id < 0 || (pj_size_t)entry->_timer_id > ht->max_size)
+ return 0;
+
+ timer_node_slot = ht->timer_ids[entry->_timer_id];
+
+ if (timer_node_slot < 0) // Check to see if timer_id is still valid.
+ return 0;
+
+ if (entry != ht->heap[timer_node_slot])
+ {
+ pj_assert(entry == ht->heap[timer_node_slot]);
+ return 0;
+ }
+ else
+ {
+ remove_node( ht, timer_node_slot);
+
+ if (dont_call == 0)
+ // Call the close hook.
+ (*ht->callback)(ht, entry);
+ return 1;
+ }
+}
+
+
+/*
+ * Calculate memory size required to create a timer heap.
+ */
+PJ_DEF(pj_size_t) pj_timer_heap_mem_size(pj_size_t count)
+{
+ return /* size of the timer heap itself: */
+ sizeof(pj_timer_heap_t) +
+ /* size of each entry: */
+ (count+2) * (sizeof(pj_timer_entry*)+sizeof(pj_timer_id_t)) +
+ /* mutex, pool etc: */
+ 132;
+}
+
+/*
+ * Create a new timer heap.
+ */
+PJ_DEF(pj_status_t) pj_timer_heap_create( pj_pool_t *pool,
+ pj_size_t size,
+ unsigned flag,
+ pj_timer_heap_t **p_heap)
+{
+ pj_timer_heap_t *ht;
+ pj_size_t i;
+
+ PJ_ASSERT_RETURN(pool && p_heap, PJ_EINVAL);
+
+ *p_heap = NULL;
+
+ /* Magic? */
+ size += 2;
+
+ /* Allocate timer heap data structure from the pool */
+ ht = pj_pool_alloc(pool, sizeof(pj_timer_heap_t));
+ if (!ht)
+ return PJ_ENOMEM;
+
+ /* Initialize timer heap sizes */
+ ht->max_size = size;
+ ht->cur_size = 0;
+ ht->timer_ids_freelist = 1;
+ ht->pool = pool;
+
+ /* Mutex. */
+ if (flag & PJ_TIMER_HEAP_NO_SYNCHRONIZE) {
+ ht->mutex = NULL;
+ } else {
+ pj_status_t rc;
+
+ /* Mutex must be the recursive types.
+ * See commented code inside pj_timer_heap_poll()
+ */
+ rc = pj_mutex_create(pool, "tmhp%p", PJ_MUTEX_RECURSE, &ht->mutex);
+ if (rc != PJ_SUCCESS)
+ return rc;
+ }
+
+ // Create the heap array.
+ ht->heap = pj_pool_alloc(pool, sizeof(pj_timer_entry*) * size);
+ if (!ht->heap)
+ return PJ_ENOMEM;
+
+ // Create the parallel
+ ht->timer_ids = pj_pool_alloc( pool, sizeof(pj_timer_id_t) * size);
+ if (!ht->timer_ids)
+ return PJ_ENOMEM;
+
+ // Initialize the "freelist," which uses negative values to
+ // distinguish freelist elements from "pointers" into the <heap_>
+ // array.
+ for (i=0; i<size; ++i)
+ ht->timer_ids[i] = -((pj_timer_id_t) (i + 1));
+
+ *p_heap = ht;
+ return PJ_SUCCESS;
+}
+
+PJ_DEF(pj_timer_entry*) pj_timer_entry_init( pj_timer_entry *entry,
+ int id,
+ void *user_data,
+ pj_timer_heap_callback *cb )
+{
+ pj_assert(entry && cb);
+
+ entry->id = id;
+ entry->user_data = user_data;
+ entry->cb = cb;
+
+ return entry;
+}
+
+PJ_DEF(pj_status_t) pj_timer_heap_schedule( pj_timer_heap_t *ht,
+ pj_timer_entry *entry,
+ const pj_time_val *delay)
+{
+ pj_status_t status;
+ pj_time_val expires;
+
+ PJ_ASSERT_RETURN(ht && entry && delay, PJ_EINVAL);
+
+ pj_gettimeofday(&expires);
+ PJ_TIME_VAL_ADD(expires, *delay);
+
+ lock_timer_heap(ht);
+ status = schedule_entry(ht, entry, &expires);
+ unlock_timer_heap(ht);
+
+ return status;
+}
+
+PJ_DEF(int) pj_timer_heap_cancel( pj_timer_heap_t *ht,
+ pj_timer_entry *entry)
+{
+ int count;
+
+ PJ_ASSERT_RETURN(ht && entry, PJ_EINVAL);
+
+ lock_timer_heap(ht);
+ count = cancel(ht, entry, 1);
+ unlock_timer_heap(ht);
+
+ return count;
+}
+
+PJ_DEF(int) pj_timer_heap_poll( pj_timer_heap_t *ht, pj_time_val *next_delay )
+{
+ pj_time_val now;
+ int count;
+
+ PJ_ASSERT_RETURN(ht, -1);
+
+ if (!ht->cur_size && next_delay) {
+ next_delay->sec = next_delay->msec = PJ_MAXINT32;
+ return 0;
+ }
+
+ count = 0;
+ pj_gettimeofday(&now);
+
+ lock_timer_heap(ht);
+ while ( ht->cur_size &&
+ PJ_TIME_VAL_LTE(ht->heap[0]->_timer_value, now) )
+ {
+ pj_timer_entry *node = remove_node(ht, 0);
+ ++count;
+
+ //Better not to temporarily release mutex to save some syscalls.
+ //But then make sure the mutex must be the recursive types (PJ_MUTEX_RECURSE)!
+ //unlock_timer_heap(ht);
+ (*node->cb)(ht, node);
+ //lock_timer_heap(ht);
+ }
+ if (ht->cur_size && next_delay) {
+ *next_delay = ht->heap[0]->_timer_value;
+ PJ_TIME_VAL_SUB(*next_delay, now);
+ } else if (next_delay) {
+ next_delay->sec = next_delay->msec = PJ_MAXINT32;
+ }
+ unlock_timer_heap(ht);
+
+ return count;
+}
+
+PJ_DEF(pj_size_t) pj_timer_heap_count( pj_timer_heap_t *ht )
+{
+ return ht->cur_size;
+}
+
+PJ_DEF(pj_status_t) pj_timer_heap_earliest_time( pj_timer_heap_t * ht,
+ pj_time_val *timeval)
+{
+ pj_assert(ht->cur_size != 0);
+ if (ht->cur_size == 0)
+ return PJ_ENOTFOUND;
+
+ lock_timer_heap(ht);
+ *timeval = ht->heap[0]->_timer_value;
+ unlock_timer_heap(ht);
+
+ return PJ_SUCCESS;
+}
+
diff --git a/pjlib/src/pj/tounix b/pjlib/src/pj/tounix
new file mode 100644
index 00000000..f4e1920b
--- /dev/null
+++ b/pjlib/src/pj/tounix
@@ -0,0 +1,4 @@
+#!/bin/sh
+cp $1 /tmp
+cat /tmp/$1 | tr -d '\r' > $1
+
diff --git a/pjlib/src/pj/types.c b/pjlib/src/pj/types.c
new file mode 100644
index 00000000..ee1a8588
--- /dev/null
+++ b/pjlib/src/pj/types.c
@@ -0,0 +1,36 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/types.c 4 9/17/05 10:37a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/types.c $
+ *
+ * 4 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/types.h>
+#include <pj/os.h>
+
+void pj_time_val_normalize(pj_time_val *t)
+{
+ PJ_CHECK_STACK();
+
+ if (t->msec >= 1000) {
+ do {
+ t->sec++;
+ t->msec -= 1000;
+ } while (t->msec >= 1000);
+ }
+ else if (t->msec <= -1000) {
+ do {
+ t->sec--;
+ t->msec += 1000;
+ } while (t->msec <= -1000);
+ }
+
+ if (t->sec >= 1 && t->msec < 0) {
+ t->sec--;
+ t->msec += 1000;
+
+ } else if (t->sec < 0 && t->msec > 0) {
+ t->sec++;
+ t->msec -= 1000;
+ }
+}
diff --git a/pjlib/src/pj/xml.c b/pjlib/src/pj/xml.c
new file mode 100644
index 00000000..ff3684a8
--- /dev/null
+++ b/pjlib/src/pj/xml.c
@@ -0,0 +1,392 @@
+/* $Header: /pjproject-0.3/pjlib/src/pj/xml.c 9 10/14/05 12:26a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pj/xml.c $
+ *
+ * 9 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 8 9/21/05 1:39p Bennylp
+ * Periodic checkin for backup.
+ *
+ * 7 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ */
+#include <pj/xml.h>
+#include <pj/scanner.h>
+#include <pj/except.h>
+#include <pj/pool.h>
+#include <pj/string.h>
+#include <pj/log.h>
+#include <pj/os.h>
+
+#define EX_SYNTAX_ERROR 12
+#define THIS_FILE "xml.c"
+
+static void on_syntax_error(struct pj_scanner *scanner)
+{
+ PJ_UNUSED_ARG(scanner);
+ PJ_THROW(EX_SYNTAX_ERROR);
+}
+
+static pj_xml_node *alloc_node( pj_pool_t *pool )
+{
+ pj_xml_node *node;
+
+ node = pj_pool_calloc(pool, 1, sizeof(pj_xml_node));
+ pj_list_init( &node->attr_head );
+ pj_list_init( &node->node_head );
+
+ return node;
+}
+
+static pj_xml_attr *alloc_attr( pj_pool_t *pool )
+{
+ return pj_pool_calloc(pool, 1, sizeof(pj_xml_attr));
+}
+
+/* This is a recursive function! */
+static pj_xml_node *xml_parse_node( pj_pool_t *pool, pj_scanner *scanner)
+{
+ pj_xml_node *node;
+ pj_str_t end_name;
+
+ PJ_CHECK_STACK();
+
+ if (*scanner->curptr != '<')
+ on_syntax_error(scanner);
+
+ /* Handle Processing Instructino (PI) construct (i.e. "<?") */
+ if (*scanner->curptr == '<' && *(scanner->curptr+1) == '?') {
+ pj_scan_advance_n(scanner, 2, PJ_FALSE);
+ for (;;) {
+ pj_str_t dummy;
+ pj_scan_get_until_ch(scanner, '?', &dummy);
+ if (*scanner->curptr=='?' && *(scanner->curptr+1)=='>') {
+ pj_scan_advance_n(scanner, 2, PJ_TRUE);
+ break;
+ } else {
+ pj_scan_advance_n(scanner, 1, PJ_FALSE);
+ }
+ }
+ return xml_parse_node(pool, scanner);
+ }
+
+ /* Handle comments construct (i.e. "<!--") */
+ if (pj_scan_strcmp(scanner, "<!--", 4) == 0) {
+ pj_scan_advance_n(scanner, 4, PJ_FALSE);
+ for (;;) {
+ pj_str_t dummy;
+ pj_scan_get_until_ch(scanner, '-', &dummy);
+ if (pj_scan_strcmp(scanner, "-->", 3) == 0) {
+ pj_scan_advance_n(scanner, 3, PJ_TRUE);
+ break;
+ } else {
+ pj_scan_advance_n(scanner, 1, PJ_FALSE);
+ }
+ }
+ return xml_parse_node(pool, scanner);
+ }
+
+ /* Alloc node. */
+ node = alloc_node(pool);
+
+ /* Get '<' */
+ pj_scan_get_char(scanner);
+
+ /* Get node name. */
+ pj_scan_get_until_chr( scanner, " />\t", &node->name);
+
+ /* Get attributes. */
+ while (*scanner->curptr != '>' && *scanner->curptr != '/') {
+ pj_xml_attr *attr = alloc_attr(pool);
+
+ pj_scan_get_until_chr( scanner, "=> \t", &attr->name);
+ if (*scanner->curptr == '=') {
+ pj_scan_get_char( scanner );
+ pj_scan_get_quote(scanner, '"', '"', &attr->value);
+ /* remove quote characters */
+ ++attr->value.ptr;
+ attr->value.slen -= 2;
+ }
+
+ pj_list_insert_before( &node->attr_head, attr );
+ }
+
+ if (*scanner->curptr == '/') {
+ pj_scan_get_char(scanner);
+ if (pj_scan_get_char(scanner) != '>')
+ on_syntax_error(scanner);
+ return node;
+ }
+
+ /* Enclosing bracket. */
+ if (pj_scan_get_char(scanner) != '>')
+ on_syntax_error(scanner);
+
+ /* Sub nodes. */
+ while (*scanner->curptr == '<' && *(scanner->curptr+1) != '/') {
+ pj_xml_node *sub_node = xml_parse_node(pool, scanner);
+ pj_list_insert_before( &node->node_head, sub_node );
+ }
+
+ /* Content. */
+ if (!pj_scan_is_eof(scanner) && *scanner->curptr != '<') {
+ pj_scan_get_until_ch(scanner, '<', &node->content);
+ }
+
+ /* Enclosing node. */
+ if (pj_scan_get_char(scanner) != '<' || pj_scan_get_char(scanner) != '/')
+ on_syntax_error(scanner);
+
+ pj_scan_get_until_chr(scanner, " \t>", &end_name);
+
+ /* Compare name. */
+ if (pj_stricmp(&node->name, &end_name) != 0)
+ on_syntax_error(scanner);
+
+ /* Enclosing '>' */
+ if (pj_scan_get_char(scanner) != '>')
+ on_syntax_error(scanner);
+
+ return node;
+}
+
+PJ_DEF(pj_xml_node*) pj_xml_parse( pj_pool_t *pool, char *msg, pj_size_t len)
+{
+ pj_xml_node *node = NULL;
+ pj_scanner scanner;
+ PJ_USE_EXCEPTION;
+
+ if (!msg || !len || !pool)
+ return NULL;
+
+ pj_scan_init( &scanner, msg, len,
+ PJ_SCAN_AUTOSKIP_WS|PJ_SCAN_AUTOSKIP_NEWLINE,
+ &on_syntax_error);
+ PJ_TRY {
+ node = xml_parse_node(pool, &scanner);
+ }
+ PJ_DEFAULT {
+ PJ_LOG(4,(THIS_FILE, "Syntax error parsing XML in line %d column %d",
+ scanner.line, scanner.col));
+ }
+ PJ_END;
+ pj_scan_fini( &scanner );
+ return node;
+}
+
+/* This is a recursive function. */
+static int xml_print_node( const pj_xml_node *node, int indent,
+ char *buf, pj_size_t len )
+{
+ int i;
+ char *p = buf;
+ pj_xml_attr *attr;
+ pj_xml_node *sub_node;
+
+#define SIZE_LEFT() ((int)(len - (p-buf)))
+
+ PJ_CHECK_STACK();
+
+ /* Print name. */
+ if (SIZE_LEFT() < node->name.slen + indent + 5)
+ return -1;
+ for (i=0; i<indent; ++i)
+ *p++ = ' ';
+ *p++ = '<';
+ pj_memcpy(p, node->name.ptr, node->name.slen);
+ p += node->name.slen;
+
+ /* Print attributes. */
+ attr = node->attr_head.next;
+ while (attr != &node->attr_head) {
+
+ if (SIZE_LEFT() < attr->name.slen + attr->value.slen + 4)
+ return -1;
+
+ *p++ = ' ';
+
+ /* Attribute name. */
+ pj_memcpy(p, attr->name.ptr, attr->name.slen);
+ p += attr->name.slen;
+
+ /* Attribute value. */
+ if (attr->value.slen) {
+ *p++ = '=';
+ *p++ = '"';
+ pj_memcpy(p, attr->value.ptr, attr->value.slen);
+ p += attr->value.slen;
+ *p++ = '"';
+ }
+
+ attr = attr->next;
+ }
+
+ /* Check for empty node. */
+ if (node->content.slen==0 &&
+ node->node_head.next==(pj_xml_node*)&node->node_head)
+ {
+ *p++ = ' ';
+ *p++ = '/';
+ *p++ = '>';
+ return p-buf;
+ }
+
+ /* Enclosing '>' */
+ if (SIZE_LEFT() < 1) return -1;
+ *p++ = '>';
+
+ /* Print sub nodes. */
+ sub_node = node->node_head.next;
+ while (sub_node != (pj_xml_node*)&node->node_head) {
+ int printed;
+
+ if (SIZE_LEFT() < indent + 3)
+ return -1;
+ //*p++ = '\r';
+ *p++ = '\n';
+
+ printed = xml_print_node(sub_node, indent + 1, p, SIZE_LEFT());
+ if (printed < 0)
+ return -1;
+
+ p += printed;
+ sub_node = sub_node->next;
+ }
+
+ /* Content. */
+ if (node->content.slen) {
+ if (SIZE_LEFT() < node->content.slen) return -1;
+ pj_memcpy(p, node->content.ptr, node->content.slen);
+ p += node->content.slen;
+ }
+
+ /* Enclosing node. */
+ if (node->node_head.next != (pj_xml_node*)&node->node_head) {
+ if (SIZE_LEFT() < node->name.slen + 5 + indent)
+ return -1;
+ //*p++ = '\r';
+ *p++ = '\n';
+ for (i=0; i<indent; ++i)
+ *p++ = ' ';
+ } else {
+ if (SIZE_LEFT() < node->name.slen + 3)
+ return -1;
+ }
+ *p++ = '<';
+ *p++ = '/';
+ pj_memcpy(p, node->name.ptr, node->name.slen);
+ p += node->name.slen;
+ *p++ = '>';
+
+#undef SIZE_LEFT
+
+ return p - buf;
+}
+
+PJ_DEF(int) pj_xml_print(const pj_xml_node *node, char *buf, pj_size_t len,
+ pj_bool_t include_prolog)
+{
+ int prolog_len = 0;
+ int printed;
+
+ if (!node || !buf || !len)
+ return 0;
+
+ if (include_prolog) {
+ pj_str_t prolog = {"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n", 39};
+ if ((int)len < prolog.slen)
+ return -1;
+ pj_memcpy(buf, prolog.ptr, prolog.slen);
+ prolog_len = prolog.slen;
+ }
+
+ printed = xml_print_node(node, 0, buf+prolog_len, len-prolog_len) + prolog_len;
+ if (printed > 0 && len-printed >= 1) {
+ buf[printed++] = '\n';
+ }
+ return printed;
+}
+
+
+PJ_DEF(void) pj_xml_add_node( pj_xml_node *parent, pj_xml_node *node )
+{
+ pj_list_insert_before(&parent->node_head, node);
+}
+
+PJ_DEF(void) pj_xml_add_attr( pj_xml_node *node, pj_xml_attr *attr )
+{
+ pj_list_insert_before(&node->attr_head, attr);
+}
+
+PJ_DEF(pj_xml_node*) pj_xml_find_node(pj_xml_node *parent, const pj_str_t *name)
+{
+ pj_xml_node *node = parent->node_head.next;
+
+ PJ_CHECK_STACK();
+
+ while (node != (void*)&parent->node_head) {
+ if (pj_stricmp(&node->name, name) == 0)
+ return node;
+ node = node->next;
+ }
+ return NULL;
+}
+
+
+PJ_DEF(pj_xml_node*) pj_xml_find_next_node( pj_xml_node *parent, pj_xml_node *node,
+ const pj_str_t *name)
+{
+ PJ_CHECK_STACK();
+
+ node = node->next;
+ while (node != (void*)&parent->node_head) {
+ if (pj_stricmp(&node->name, name) == 0)
+ return node;
+ node = node->next;
+ }
+ return NULL;
+}
+
+
+PJ_DEF(pj_xml_attr*) pj_xml_find_attr( pj_xml_node *node, const pj_str_t *name,
+ const pj_str_t *value)
+{
+ pj_xml_attr *attr = node->attr_head.next;
+ while (attr != (void*)&node->attr_head) {
+ if (pj_stricmp(&attr->name, name)==0) {
+ if (value) {
+ if (pj_stricmp(&attr->value, value)==0)
+ return attr;
+ } else {
+ return attr;
+ }
+ }
+ attr = attr->next;
+ }
+ return NULL;
+}
+
+
+
+PJ_DEF(pj_xml_node*) pj_xml_find( pj_xml_node *parent, const pj_str_t *name,
+ const void *data,
+ pj_bool_t (*match)(pj_xml_node *, const void*))
+{
+ pj_xml_node *head = (void*)&parent->node_head, *node = head->next;
+
+ while (node != (void*)head) {
+ if (name && pj_stricmp(&node->name, name)==0) {
+ if (match) {
+ if (match(node, data))
+ return node;
+ } else {
+ return node;
+ }
+ }
+ node = node->next;
+ }
+ return NULL;
+}
+
diff --git a/pjlib/src/pjlib-samples/except.c b/pjlib/src/pjlib-samples/except.c
new file mode 100644
index 00000000..c6b7f556
--- /dev/null
+++ b/pjlib/src/pjlib-samples/except.c
@@ -0,0 +1,79 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-samples/except.c 2 10/14/05 12:26a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-samples/except.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/10/05 3:16p Bennylp
+ * Created.
+ *
+ */
+#include <pj/except.h>
+#include <pj/rand.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+/**
+ * \page page_pjlib_samples_except_c Example: Exception Handling
+ *
+ * Below is sample program to demonstrate how to use exception handling.
+ *
+ * \includelineno pjlib-samples/except.c
+ */
+
+static pj_exception_id_t NO_MEMORY, OTHER_EXCEPTION;
+
+static void randomly_throw_exception()
+{
+ if (pj_rand() % 2)
+ PJ_THROW(OTHER_EXCEPTION);
+}
+
+static void *my_malloc(size_t size)
+{
+ void *ptr = malloc(size);
+ if (!ptr)
+ PJ_THROW(NO_MEMORY);
+ return ptr;
+}
+
+static int test_exception()
+{
+ PJ_USE_EXCEPTION;
+
+ PJ_TRY {
+ void *data = my_malloc(200);
+ free(data);
+ randomly_throw_exception();
+ }
+ PJ_CATCH( NO_MEMORY ) {
+ puts("Can't allocate memory");
+ return 0;
+ }
+ PJ_DEFAULT {
+ pj_exception_id_t x_id;
+
+ x_id = PJ_GET_EXCEPTION();
+ printf("Caught exception %d (%s)\n",
+ x_id, pj_exception_id_name(x_id));
+ }
+ PJ_END
+ return 1;
+}
+
+int main()
+{
+ pj_status_t rc;
+
+ // Error handling is omited for clarity.
+
+ rc = pj_init();
+
+ rc = pj_exception_id_alloc("No Memory", &NO_MEMORY);
+ rc = pj_exception_id_alloc("Other Exception", &OTHER_EXCEPTION);
+
+ return test_exception();
+}
+
diff --git a/pjlib/src/pjlib-samples/list.c b/pjlib/src/pjlib-samples/list.c
new file mode 100644
index 00000000..74e783f5
--- /dev/null
+++ b/pjlib/src/pjlib-samples/list.c
@@ -0,0 +1,66 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-samples/list.c 2 10/14/05 12:26a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-samples/list.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/10/05 5:12p Bennylp
+ * Created.
+ *
+ */
+
+#include <pj/list.h>
+#include <pj/assert.h>
+#include <pj/log.h>
+
+/**
+ * \page page_pjlib_samples_list_c Example: List Manipulation
+ *
+ * Below is sample program to demonstrate how to manipulate linked list.
+ *
+ * \includelineno pjlib-samples/list.c
+ */
+
+struct my_node
+{
+ // This must be the first member declared in the struct!
+ PJ_DECL_LIST_MEMBER(struct my_node)
+ int value;
+};
+
+
+int main()
+{
+ struct my_node nodes[10];
+ struct my_node list;
+ struct my_node *it;
+ int i;
+
+ // Initialize the list as empty.
+ pj_list_init(&list);
+
+ // Insert nodes.
+ for (i=0; i<10; ++i) {
+ nodes[i].value = i;
+ pj_list_insert_before(&list, &nodes[i]);
+ }
+
+ // Iterate list nodes.
+ it = list.next;
+ while (it != &list) {
+ PJ_LOG(3,("list", "value = %d", it->value));
+ it = it->next;
+ }
+
+ // Erase all nodes.
+ for (i=0; i<10; ++i) {
+ pj_list_erase(&nodes[i]);
+ }
+
+ // List must be empty by now.
+ pj_assert( pj_list_empty(&list) );
+
+ return 0;
+};
diff --git a/pjlib/src/pjlib-samples/log.c b/pjlib/src/pjlib-samples/log.c
new file mode 100644
index 00000000..b90c71d0
--- /dev/null
+++ b/pjlib/src/pjlib-samples/log.c
@@ -0,0 +1,36 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-samples/log.c 2 10/14/05 12:26a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-samples/log.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/10/05 3:16p Bennylp
+ * Created.
+ *
+ */
+#include <pj/log.h>
+
+/**
+ * \page page_pjlib_samples_log_c Example: Log, Hello World
+ *
+ * Very simple program to write log.
+ *
+ * \includelineno pjlib-samples/log.c
+ */
+
+int main()
+{
+ pj_status_t rc;
+
+ // Error handling omited for clarity
+
+ // Must initialize PJLIB first!
+ rc = pj_init();
+
+ PJ_LOG(3, ("main.c", "Hello world!"));
+
+ return 0;
+}
+
diff --git a/pjlib/src/pjlib-test/atomic.c b/pjlib/src/pjlib-test/atomic.c
new file mode 100644
index 00000000..0a1ebb7d
--- /dev/null
+++ b/pjlib/src/pjlib-test/atomic.c
@@ -0,0 +1,94 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/atomic.c 2 10/14/05 12:26a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/atomic.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/07/05 9:49p Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+#include <pjlib.h>
+
+/**
+ * \page page_pjlib_atomic_test Test: Atomic Variable
+ *
+ * This file provides implementation of \b atomic_test(). It tests the
+ * functionality of the atomic variable API.
+ *
+ * \section atomic_test_sec Scope of the Test
+ *
+ * API tested:
+ * - pj_atomic_create()
+ * - pj_atomic_get()
+ * - pj_atomic_inc()
+ * - pj_atomic_dec()
+ * - pj_atomic_set()
+ * - pj_atomic_destroy()
+ *
+ *
+ * This file is <b>pjlib-test/atomic.c</b>
+ *
+ * \include pjlib-test/atomic.c
+ */
+
+
+#if INCLUDE_ATOMIC_TEST
+
+int atomic_test(void)
+{
+ pj_pool_t *pool;
+ pj_atomic_t *atomic_var;
+ pj_status_t rc;
+
+ pool = pj_pool_create(mem, NULL, 4096, 0, NULL);
+ if (!pool)
+ return -10;
+
+ /* create() */
+ rc = pj_atomic_create(pool, 111, &atomic_var);
+ if (rc != 0) {
+ return -20;
+ }
+
+ /* get: check the value. */
+ if (pj_atomic_get(atomic_var) != 111)
+ return -30;
+
+ /* increment. */
+ if (pj_atomic_inc(atomic_var) != 112)
+ return -40;
+
+ /* decrement. */
+ if (pj_atomic_dec(atomic_var) != 111)
+ return -50;
+
+ /* set */
+ if (pj_atomic_set(atomic_var, 211) != 111)
+ return -60;
+
+ /* check the value again. */
+ if (pj_atomic_get(atomic_var) != 211)
+ return -70;
+
+ /* destroy */
+ rc = pj_atomic_destroy(atomic_var);
+ if (rc != 0)
+ return -80;
+
+ pj_pool_release(pool);
+
+ return 0;
+}
+
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_atomic_test;
+#endif /* INCLUDE_ATOMIC_TEST */
+
diff --git a/pjlib/src/pjlib-test/echo_clt.c b/pjlib/src/pjlib-test/echo_clt.c
new file mode 100644
index 00000000..565d5607
--- /dev/null
+++ b/pjlib/src/pjlib-test/echo_clt.c
@@ -0,0 +1,267 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/echo_clt.c 3 10/29/05 10:25p Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/echo_clt.c $
+ *
+ * 3 10/29/05 10:25p Bennylp
+ * Tested.
+ *
+ * 2 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 1 10/24/05 11:28a Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+#include <pjlib.h>
+
+#if INCLUDE_ECHO_CLIENT
+
+enum { BUF_SIZE = 512 };
+
+struct client
+{
+ int sock_type;
+ const char *server;
+ int port;
+};
+
+static pj_sem_t *sem;
+static pj_mutex_t *mutex;
+static pj_size_t total_bw;
+static unsigned total_poster;
+static pj_time_val first_report;
+
+#define MSEC_PRINT_DURATION 1000
+
+static int wait_socket(pj_sock_t sock, unsigned msec_timeout)
+{
+ pj_fd_set_t fdset;
+ pj_time_val timeout;
+
+ timeout.sec = 0;
+ timeout.msec = msec_timeout;
+ pj_time_val_normalize(&timeout);
+
+ PJ_FD_ZERO(&fdset);
+ PJ_FD_SET(sock, &fdset);
+
+ return pj_sock_select(1, &fdset, NULL, NULL, &timeout);
+}
+
+static int echo_client_thread(void *arg)
+{
+ pj_sock_t sock;
+ char send_buf[BUF_SIZE];
+ char recv_buf[BUF_SIZE];
+ pj_sockaddr_in addr;
+ pj_str_t s;
+ pj_status_t rc;
+ struct client *client = arg;
+ pj_status_t last_recv_err = PJ_SUCCESS, last_send_err = PJ_SUCCESS;
+
+ pj_time_val last_report, next_report;
+ pj_size_t thread_total;
+
+ rc = app_socket(PJ_AF_INET, client->sock_type, 0, -1, &sock);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...unable to create socket", rc);
+ return -10;
+ }
+
+ rc = pj_sockaddr_in_init( &addr, pj_cstr(&s, client->server),
+ (pj_uint16_t)client->port);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...unable to resolve server", rc);
+ return -15;
+ }
+
+ rc = pj_sock_connect(sock, &addr, sizeof(addr));
+ if (rc != PJ_SUCCESS) {
+ app_perror("...connect() error", rc);
+ pj_sock_close(sock);
+ return -20;
+ }
+
+ pj_create_random_string(send_buf, BUF_SIZE);
+ thread_total = 0;
+
+ /* Give other thread chance to initialize themselves! */
+ pj_thread_sleep(500);
+
+ pj_gettimeofday(&last_report);
+ next_report = first_report;
+
+ //PJ_LOG(3,("", "...thread %p running", pj_thread_this()));
+
+ for (;;) {
+ int rc;
+ pj_ssize_t bytes;
+ pj_time_val now;
+
+ /* Send a packet. */
+ bytes = BUF_SIZE;
+ rc = pj_sock_send(sock, send_buf, &bytes, 0);
+ if (rc != PJ_SUCCESS || bytes != BUF_SIZE) {
+ if (rc != last_send_err) {
+ app_perror("...send() error", rc);
+ PJ_LOG(3,("", "...ignoring subsequent error.."));
+ last_send_err = rc;
+ pj_thread_sleep(100);
+ }
+ continue;
+ }
+
+ rc = wait_socket(sock, 500);
+ if (rc == 0) {
+ PJ_LOG(3,("", "...timeout"));
+ } else {
+ /* Receive back the original packet. */
+ bytes = 0;
+ do {
+ pj_ssize_t received = BUF_SIZE - bytes;
+ rc = pj_sock_recv(sock, recv_buf+bytes, &received, 0);
+ if (rc != PJ_SUCCESS || received == 0) {
+ if (rc != last_recv_err) {
+ app_perror("...recv() error", rc);
+ PJ_LOG(3,("", "...ignoring subsequent error.."));
+ last_recv_err = rc;
+ pj_thread_sleep(100);
+ }
+ bytes = 0;
+ break;
+ }
+ bytes += received;
+ } while (bytes != BUF_SIZE);
+ }
+
+ /* Accumulate total received. */
+ thread_total = thread_total + bytes;
+
+ /* Report current bandwidth on due. */
+ pj_gettimeofday(&now);
+
+ if (PJ_TIME_VAL_GTE(now, next_report)) {
+ pj_uint32_t bw;
+ pj_bool_t signal_parent = 0;
+ pj_time_val duration;
+ pj_uint32_t msec;
+
+ duration = now;
+ PJ_TIME_VAL_SUB(duration, last_report);
+ msec = PJ_TIME_VAL_MSEC(duration);
+
+ bw = thread_total * 1000 / msec;
+
+ /* Post result to parent */
+ pj_mutex_lock(mutex);
+ total_bw += bw;
+ total_poster++;
+ //PJ_LOG(3,("", "...thread %p posting result", pj_thread_this()));
+ if (total_poster >= ECHO_CLIENT_MAX_THREADS)
+ signal_parent = 1;
+ pj_mutex_unlock(mutex);
+
+ thread_total = 0;
+ last_report = now;
+ next_report.sec++;
+
+ if (signal_parent) {
+ pj_sem_post(sem);
+ }
+
+ pj_thread_sleep(0);
+ }
+
+ if (bytes == 0)
+ continue;
+
+ if (pj_memcmp(send_buf, recv_buf, BUF_SIZE) != 0) {
+ PJ_LOG(3,("", "...error: buffer has changed!"));
+ break;
+ }
+ }
+
+ pj_sock_close(sock);
+ return 0;
+}
+
+int echo_client(int sock_type, const char *server, int port)
+{
+ pj_pool_t *pool;
+ pj_thread_t *thread[ECHO_CLIENT_MAX_THREADS];
+ pj_status_t rc;
+ struct client client;
+ int i;
+
+ client.sock_type = sock_type;
+ client.server = server;
+ client.port = port;
+
+ pool = pj_pool_create( mem, NULL, 4000, 4000, NULL );
+
+ rc = pj_sem_create(pool, NULL, 0, ECHO_CLIENT_MAX_THREADS+1, &sem);
+ if (rc != PJ_SUCCESS) {
+ PJ_LOG(3,("", "...error: unable to create semaphore", rc));
+ return -10;
+ }
+
+ rc = pj_mutex_create_simple(pool, NULL, &mutex);
+ if (rc != PJ_SUCCESS) {
+ PJ_LOG(3,("", "...error: unable to create mutex", rc));
+ return -20;
+ }
+
+ /*
+ rc = pj_atomic_create(pool, 0, &atom);
+ if (rc != PJ_SUCCESS) {
+ PJ_LOG(3,("", "...error: unable to create atomic variable", rc));
+ return -30;
+ }
+ */
+
+ PJ_LOG(3,("", "Echo client started"));
+ PJ_LOG(3,("", " Destination: %s:%d",
+ ECHO_SERVER_ADDRESS, ECHO_SERVER_START_PORT));
+ PJ_LOG(3,("", " Press Ctrl-C to exit"));
+
+ pj_gettimeofday(&first_report);
+ first_report.sec += 2;
+
+ for (i=0; i<ECHO_CLIENT_MAX_THREADS; ++i) {
+ rc = pj_thread_create( pool, NULL, &echo_client_thread, &client,
+ PJ_THREAD_DEFAULT_STACK_SIZE, 0,
+ &thread[i]);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: unable to create thread", rc);
+ return -10;
+ }
+ }
+
+ for (;;) {
+ pj_uint32_t bw;
+
+ pj_sem_wait(sem);
+
+ pj_mutex_lock(mutex);
+ bw = total_bw;
+ total_bw = 0;
+ total_poster = 0;
+ pj_mutex_unlock(mutex);
+
+ PJ_LOG(3,("", "...%d threads, total bandwidth: %d KB/s",
+ ECHO_CLIENT_MAX_THREADS, bw/1000));
+ }
+
+ for (i=0; i<ECHO_CLIENT_MAX_THREADS; ++i) {
+ pj_thread_join( thread[i] );
+ }
+
+ pj_pool_release(pool);
+ return 0;
+}
+
+
+#else
+int dummy_echo_client;
+#endif /* INCLUDE_ECHO_CLIENT */
diff --git a/pjlib/src/pjlib-test/echo_srv.c b/pjlib/src/pjlib-test/echo_srv.c
new file mode 100644
index 00000000..cee64309
--- /dev/null
+++ b/pjlib/src/pjlib-test/echo_srv.c
@@ -0,0 +1,331 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/echo_srv.c 3 10/29/05 10:23p Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/echo_srv.c $
+ *
+ * 3 10/29/05 10:23p Bennylp
+ * Changed ioqueue accept specification.
+ *
+ * 2 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 1 10/24/05 11:28a Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+#include <pjlib.h>
+#include <pj/compat/high_precision.h>
+
+#if INCLUDE_ECHO_SERVER
+
+static pj_bool_t thread_quit_flag;
+
+struct server
+{
+ pj_pool_t *pool;
+ int sock_type;
+ int thread_count;
+ pj_ioqueue_t *ioqueue;
+ pj_sock_t sock;
+ pj_sock_t client_sock;
+ pj_ioqueue_key_t *key;
+ pj_ioqueue_callback cb;
+ char *buf;
+ pj_size_t bufsize;
+ pj_sockaddr_in addr;
+ int addrlen;
+ pj_size_t bytes_recv;
+ pj_timestamp start_time;
+};
+
+static void on_read_complete(pj_ioqueue_key_t *key, pj_ssize_t bytes_read)
+{
+ struct server *server = pj_ioqueue_get_user_data(key);
+ pj_status_t rc;
+
+ if (server->sock_type == PJ_SOCK_DGRAM) {
+ if (bytes_read > 0) {
+ /* Send data back to sender. */
+ rc = pj_ioqueue_sendto( server->ioqueue, server->key,
+ server->buf, bytes_read, 0,
+ &server->addr, server->addrlen);
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ app_perror("...sendto() error", rc);
+ }
+ } else {
+ PJ_LOG(3,("", "...read error (bytes_read=%d)", bytes_read));
+ }
+
+ /* Start next receive. */
+ rc = pj_ioqueue_recvfrom( server->ioqueue, server->key,
+ server->buf, server->bufsize, 0,
+ &server->addr, &server->addrlen);
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ app_perror("...recvfrom() error", rc);
+ }
+
+ }
+ else if (server->sock_type == PJ_SOCK_STREAM) {
+ if (bytes_read > 0) {
+ /* Send data back to sender. */
+ rc = pj_ioqueue_send( server->ioqueue, server->key,
+ server->buf, bytes_read, 0);
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ app_perror("...send() error", rc);
+ bytes_read = 0;
+ }
+ }
+
+ if (bytes_read <= 0) {
+ PJ_LOG(3,("", "...tcp closed"));
+ pj_ioqueue_unregister( server->ioqueue, server->key );
+ pj_sock_close( server->sock );
+ pj_pool_release( server->pool );
+ return;
+ }
+
+ /* Start next receive. */
+ rc = pj_ioqueue_recv( server->ioqueue, server->key,
+ server->buf, server->bufsize, 0);
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ app_perror("...recv() error", rc);
+ }
+ }
+
+ /* Add counter. */
+ if (bytes_read > 0) {
+ if (server->bytes_recv == 0) {
+ pj_get_timestamp(&server->start_time);
+ server->bytes_recv += bytes_read;
+ } else {
+ enum { USECS_IN_SECOND = 1000000 };
+ pj_timestamp now;
+ pj_uint32_t usec_elapsed;
+
+ server->bytes_recv += bytes_read;
+
+ pj_get_timestamp(&now);
+ usec_elapsed = pj_elapsed_usec(&server->start_time, &now);
+ if (usec_elapsed > USECS_IN_SECOND) {
+ if (usec_elapsed < 2 * USECS_IN_SECOND) {
+ pj_highprec_t bw;
+ pj_uint32_t bw32;
+ const char *type_name;
+
+ /* bandwidth(bw) = server->bytes_recv * USECS/elapsed */
+ bw = server->bytes_recv;
+ pj_highprec_mul(bw, USECS_IN_SECOND);
+ pj_highprec_div(bw, usec_elapsed);
+
+ bw32 = (pj_uint32_t) bw;
+
+ if (server->sock_type==PJ_SOCK_STREAM)
+ type_name = "tcp";
+ else if (server->sock_type==PJ_SOCK_DGRAM)
+ type_name = "udp";
+ else
+ type_name = "???";
+
+ PJ_LOG(3,("",
+ "...[%s:%d (%d threads)] Current bandwidth=%u KBps",
+ type_name,
+ ECHO_SERVER_START_PORT+server->thread_count,
+ server->thread_count,
+ bw32/1024));
+ }
+ server->start_time = now;
+ server->bytes_recv = 0;
+ }
+ }
+ }
+}
+
+static void on_accept_complete( pj_ioqueue_key_t *key, pj_sock_t sock,
+ int status)
+{
+ struct server *server_server = pj_ioqueue_get_user_data(key);
+ pj_status_t rc;
+
+ PJ_UNUSED_ARG(sock);
+
+ if (status == 0) {
+ pj_pool_t *pool;
+ struct server *new_server;
+
+ pool = pj_pool_create(mem, NULL, 4000, 4000, NULL);
+ new_server = pj_pool_zalloc(pool, sizeof(struct server));
+
+ new_server->pool = pool;
+ new_server->ioqueue = server_server->ioqueue;
+ new_server->sock_type = server_server->sock_type;
+ new_server->thread_count = server_server->thread_count;
+ new_server->sock = server_server->client_sock;
+ new_server->bufsize = 4096;
+ new_server->buf = pj_pool_alloc(pool, new_server->bufsize);
+ new_server->cb = server_server->cb;
+
+ rc = pj_ioqueue_register_sock( new_server->pool, new_server->ioqueue,
+ new_server->sock, new_server,
+ &server_server->cb, &new_server->key);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...registering new tcp sock", rc);
+ pj_sock_close(new_server->sock);
+ pj_pool_release(pool);
+ thread_quit_flag = 1;
+ return;
+ }
+
+ rc = pj_ioqueue_recv( new_server->ioqueue, new_server->key,
+ new_server->buf, new_server->bufsize, 0);
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ app_perror("...recv() error", rc);
+ pj_sock_close(new_server->sock);
+ pj_pool_release(pool);
+ thread_quit_flag = 1;
+ return;
+ }
+ }
+
+ rc = pj_ioqueue_accept( server_server->ioqueue, server_server->key,
+ &server_server->client_sock,
+ NULL, NULL, NULL);
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ app_perror("...accept() error", rc);
+ thread_quit_flag = 1;
+ }
+}
+
+static int thread_proc(void *arg)
+{
+ pj_ioqueue_t *ioqueue = arg;
+
+ while (!thread_quit_flag) {
+ pj_time_val timeout;
+ int count;
+
+ timeout.sec = 0; timeout.msec = 50;
+ count = pj_ioqueue_poll( ioqueue, &timeout );
+ if (count > 0) {
+ count = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int start_echo_server( int sock_type, int port, int thread_count )
+{
+ pj_pool_t *pool;
+ struct server *server;
+ int i;
+ pj_status_t rc;
+
+
+ pool = pj_pool_create(mem, NULL, 4000, 4000, NULL);
+ if (!pool)
+ return -10;
+
+ server = pj_pool_zalloc(pool, sizeof(struct server));
+
+ server->sock_type = sock_type;
+ server->thread_count = thread_count;
+ server->cb.on_read_complete = &on_read_complete;
+ server->cb.on_accept_complete = &on_accept_complete;
+
+ /* create ioqueue */
+ rc = pj_ioqueue_create( pool, 32, thread_count, &server->ioqueue);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error creating ioqueue", rc);
+ return -20;
+ }
+
+ /* create and register socket to ioqueue. */
+ rc = app_socket(PJ_AF_INET, sock_type, 0, port, &server->sock);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error initializing socket", rc);
+ return -30;
+ }
+
+ rc = pj_ioqueue_register_sock( pool, server->ioqueue,
+ server->sock,
+ server, &server->cb,
+ &server->key);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error registering socket to ioqueue", rc);
+ return -40;
+ }
+
+ /* create receive buffer. */
+ server->bufsize = 4096;
+ server->buf = pj_pool_alloc(pool, server->bufsize);
+
+ if (sock_type == PJ_SOCK_DGRAM) {
+ server->addrlen = sizeof(server->addr);
+ rc = pj_ioqueue_recvfrom( server->ioqueue, server->key,
+ server->buf, server->bufsize,
+ 0,
+ &server->addr, &server->addrlen);
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ app_perror("...read error", rc);
+ return -50;
+ }
+ } else {
+ rc = pj_ioqueue_accept( server->ioqueue, server->key,
+ &server->client_sock, NULL, NULL, NULL );
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ app_perror("...accept() error", rc);
+ return -60;
+ }
+ }
+
+ /* create threads. */
+
+ for (i=0; i<thread_count; ++i) {
+ pj_thread_t *thread;
+ rc = pj_thread_create(pool, NULL, &thread_proc, server->ioqueue,
+ PJ_THREAD_DEFAULT_STACK_SIZE, 0, &thread);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...unable to create thread", rc);
+ return -70;
+ }
+ }
+
+ /* Done. */
+ return PJ_SUCCESS;
+}
+
+int echo_server(void)
+{
+ enum { MAX_THREADS = 4 };
+ int sock_types[2];
+ int i, j, rc;
+
+ sock_types[0] = PJ_SOCK_DGRAM;
+ sock_types[1] = PJ_SOCK_STREAM;
+
+ for (i=0; i<2; ++i) {
+ for (j=0; j<MAX_THREADS; ++j) {
+ rc = start_echo_server(sock_types[i], ECHO_SERVER_START_PORT+j, j+1);
+ if (rc != 0)
+ return rc;
+ }
+ }
+
+ pj_thread_sleep(100);
+ PJ_LOG(3,("", "Echo server started in port %d - %d",
+ ECHO_SERVER_START_PORT, ECHO_SERVER_START_PORT + MAX_THREADS));
+
+ PJ_LOG(3,("", "Press Ctrl-C to quit"));
+
+ for (;!thread_quit_flag;) {
+ pj_thread_sleep(1000);
+ }
+
+ return 0;
+}
+
+
+#else
+int dummy_echo_server;
+#endif /* INCLUDE_ECHO_SERVER */
+
diff --git a/pjlib/src/pjlib-test/errno.c b/pjlib/src/pjlib-test/errno.c
new file mode 100644
index 00000000..44f60ec7
--- /dev/null
+++ b/pjlib/src/pjlib-test/errno.c
@@ -0,0 +1,162 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/errno.c 4 10/14/05 3:05p Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/errno.c $
+ *
+ * 4 10/14/05 3:05p Bennylp
+ * Fixed warning about strlen() on Linux.
+ *
+ * 3 14/10/05 11:30 Bennylp
+ * Verify the error message.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/09/05 9:56p Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+#include <pj/errno.h>
+#include <pj/log.h>
+#include <pj/ctype.h>
+#include <pj/compat/socket.h>
+#include <pj/string.h>
+
+#if INCLUDE_ERRNO_TEST
+
+#define THIS_FILE "errno"
+
+#if defined(PJ_WIN32) && PJ_WIN32 != 0
+# include <windows.h>
+#endif
+
+#if defined(PJ_HAS_ERRNO_H) && PJ_HAS_ERRNO_H != 0
+# include <errno.h>
+#endif
+
+static void trim_newlines(char *s)
+{
+ while (*s) {
+ if (*s == '\r' || *s == '\n')
+ *s = ' ';
+ ++s;
+ }
+}
+
+int my_strncasecmp(const char *s1, const char *s2, int max_len)
+{
+ while (*s1 && *s2 && max_len > 0) {
+ if (pj_tolower(*s1) != pj_tolower(*s2))
+ return -1;
+ ++s1;
+ ++s2;
+ --max_len;
+ }
+ return 0;
+}
+
+const char *my_stristr(const char *whole, const char *part)
+{
+ int part_len = strlen(part);
+ while (*whole) {
+ if (my_strncasecmp(whole, part, part_len) == 0)
+ return whole;
+ ++whole;
+ }
+ return NULL;
+}
+
+int errno_test(void)
+{
+ enum { CUT = 6 };
+ pj_status_t rc;
+ char errbuf[256];
+
+ PJ_LOG(3,(THIS_FILE, "...errno test: check the msg carefully"));
+
+ /*
+ * Windows platform error.
+ */
+# ifdef ERROR_INVALID_DATA
+ rc = PJ_STATUS_FROM_OS(ERROR_INVALID_DATA);
+ pj_set_os_error(rc);
+
+ /* Whole */
+ pj_strerror(rc, errbuf, sizeof(errbuf));
+ trim_newlines(errbuf);
+ PJ_LOG(3,(THIS_FILE, "...msg for rc=ERROR_INVALID_DATA: '%s'", errbuf));
+ if (my_stristr(errbuf, "invalid") == NULL) {
+ PJ_LOG(3, (THIS_FILE,
+ "...error: expecting \"invalid\" string in the msg"));
+ return -20;
+ }
+
+ /* Cut version. */
+ pj_strerror(rc, errbuf, CUT);
+ PJ_LOG(3,(THIS_FILE, "...msg for rc=ERROR_INVALID_DATA (cut): '%s'", errbuf));
+# endif
+
+ /*
+ * Unix errors
+ */
+# ifdef EINVAL
+ rc = PJ_STATUS_FROM_OS(EINVAL);
+ pj_set_os_error(rc);
+
+ /* Whole */
+ pj_strerror(rc, errbuf, sizeof(errbuf));
+ trim_newlines(errbuf);
+ PJ_LOG(3,(THIS_FILE, "...msg for rc=EINVAL: '%s'", errbuf));
+ if (my_stristr(errbuf, "invalid") == NULL) {
+ PJ_LOG(3, (THIS_FILE,
+ "...error: expecting \"invalid\" string in the msg"));
+ return -30;
+ }
+
+ /* Cut */
+ pj_strerror(rc, errbuf, CUT);
+ PJ_LOG(3,(THIS_FILE, "...msg for rc=EINVAL (cut): '%s'", errbuf));
+# endif
+
+ /*
+ * Windows WSA errors
+ */
+# ifdef WSAEINVAL
+ rc = PJ_STATUS_FROM_OS(WSAEINVAL);
+ pj_set_os_error(rc);
+
+ /* Whole */
+ pj_strerror(rc, errbuf, sizeof(errbuf));
+ trim_newlines(errbuf);
+ PJ_LOG(3,(THIS_FILE, "...msg for rc=WSAEINVAL: '%s'", errbuf));
+ if (my_stristr(errbuf, "invalid") == NULL) {
+ PJ_LOG(3, (THIS_FILE,
+ "...error: expecting \"invalid\" string in the msg"));
+ return -40;
+ }
+
+ /* Cut */
+ pj_strerror(rc, errbuf, CUT);
+ PJ_LOG(3,(THIS_FILE, "...msg for rc=WSAEINVAL (cut): '%s'", errbuf));
+# endif
+
+ pj_strerror(PJ_EBUG, errbuf, sizeof(errbuf));
+ PJ_LOG(3,(THIS_FILE, "...msg for rc=PJ_EBUG: '%s'", errbuf));
+ if (my_stristr(errbuf, "BUG") == NULL) {
+ PJ_LOG(3, (THIS_FILE,
+ "...error: expecting \"BUG\" string in the msg"));
+ return -20;
+ }
+
+ pj_strerror(PJ_EBUG, errbuf, CUT);
+ PJ_LOG(3,(THIS_FILE, "...msg for rc=PJ_EBUG, cut at %d chars: '%s'",
+ CUT, errbuf));
+
+ return 0;
+}
+
+
+#endif /* INCLUDE_ERRNO_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/exception.c b/pjlib/src/pjlib-test/exception.c
new file mode 100644
index 00000000..2fe62e6e
--- /dev/null
+++ b/pjlib/src/pjlib-test/exception.c
@@ -0,0 +1,156 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/exception.c 2 10/14/05 12:26a Bennylp $
+ */
+#include "test.h"
+
+
+/**
+ * \page page_pjlib_exception_test Test: Exception Handling
+ *
+ * This file provides implementation of \b exception_test(). It tests the
+ * functionality of the exception handling API.
+ *
+ * @note This test use static ID not acquired through proper registration.
+ * This is not recommended, since it may create ID collissions.
+ *
+ * \section exception_test_sec Scope of the Test
+ *
+ * Some scenarios tested:
+ * - no exception situation
+ * - basic TRY/CATCH
+ * - multiple exception handlers
+ * - default handlers
+ *
+ *
+ * This file is <b>pjlib-test/exception.c</b>
+ *
+ * \include pjlib-test/exception.c
+ */
+
+
+#if INCLUDE_EXCEPTION_TEST
+
+#include <pjlib.h>
+
+#define ID_1 1
+#define ID_2 2
+
+static int throw_id_1(void)
+{
+ PJ_THROW( ID_1 );
+ return -1;
+}
+
+static int throw_id_2(void)
+{
+ PJ_THROW( ID_2 );
+ return -1;
+}
+
+
+static int test(void)
+{
+ PJ_USE_EXCEPTION;
+ int rc = 0;
+
+ /*
+ * No exception situation.
+ */
+ PJ_TRY {
+ rc = rc;
+ }
+ PJ_CATCH( ID_1 ) {
+ rc = -2;
+ }
+ PJ_DEFAULT {
+ rc = -3;
+ }
+ PJ_END;
+
+ if (rc != 0)
+ return rc;
+
+
+ /*
+ * Basic TRY/CATCH
+ */
+ PJ_TRY {
+ rc = throw_id_1();
+
+ // should not reach here.
+ rc = -10;
+ }
+ PJ_CATCH( ID_1 ) {
+ if (!rc) rc = 0;
+ }
+ PJ_DEFAULT {
+ if (!rc) rc = -20;
+ }
+ PJ_END;
+
+ if (rc != 0)
+ return rc;
+
+ /*
+ * Multiple exceptions handlers
+ */
+ PJ_TRY {
+ rc = throw_id_2();
+ // should not reach here.
+ rc = -25;
+ }
+ PJ_CATCH( ID_1 ) {
+ if (!rc) rc = -30;
+ }
+ PJ_CATCH( ID_2 ) {
+ if (!rc) rc = 0;
+ }
+ PJ_DEFAULT {
+ if (!rc) rc = -40;
+ }
+ PJ_END;
+
+ if (rc != 0)
+ return rc;
+
+ /*
+ * Test default handler.
+ */
+ PJ_TRY {
+ rc = throw_id_1();
+ // should not reach here
+ rc = -50;
+ }
+ PJ_CATCH( ID_2 ) {
+ if (!rc) rc = -60;
+ }
+ PJ_DEFAULT {
+ if (!rc) rc = 0;
+ }
+ PJ_END;
+
+ if (rc != 0)
+ return rc;
+
+ return 0;
+}
+
+int exception_test(void)
+{
+ int i, rc;
+ enum { LOOP = 10 };
+
+ for (i=0; i<LOOP; ++i) {
+ if ((rc=test()) != 0)
+ return rc;
+ }
+ return 0;
+}
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_exception_test;
+#endif /* INCLUDE_EXCEPTION_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/fifobuf.c b/pjlib/src/pjlib-test/fifobuf.c
new file mode 100644
index 00000000..9bb471b9
--- /dev/null
+++ b/pjlib/src/pjlib-test/fifobuf.c
@@ -0,0 +1,100 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/fifobuf.c 2 10/14/05 12:26a Bennylp $
+ */
+#include "test.h"
+
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_fifobuf_test;
+
+#if INCLUDE_FIFOBUF_TEST
+
+#include <pjlib.h>
+
+int fifobuf_test()
+{
+ enum { SIZE = 1024, MAX_ENTRIES = 128,
+ MIN_SIZE = 4, MAX_SIZE = 64,
+ LOOP=10000 };
+ pj_pool_t *pool;
+ pj_fifobuf_t fifo;
+ unsigned available = SIZE;
+ void *entries[MAX_ENTRIES];
+ void *buffer;
+ int i;
+
+ pool = pj_pool_create(mem, NULL, SIZE+256, 0, NULL);
+ if (!pool)
+ return -10;
+
+ buffer = pj_pool_alloc(pool, SIZE);
+ if (!buffer)
+ return -20;
+
+ pj_fifobuf_init (&fifo, buffer, SIZE);
+
+ // Test 1
+ for (i=0; i<LOOP*MAX_ENTRIES; ++i) {
+ int size;
+ int c, f;
+ c = i%2;
+ f = (i+1)%2;
+ do {
+ size = MIN_SIZE+(pj_rand() % MAX_SIZE);
+ entries[c] = pj_fifobuf_alloc (&fifo, size);
+ } while (entries[c] == 0);
+ if ( i!=0) {
+ pj_fifobuf_free(&fifo, entries[f]);
+ }
+ }
+ if (entries[(i+1)%2])
+ pj_fifobuf_free(&fifo, entries[(i+1)%2]);
+
+ if (pj_fifobuf_max_size(&fifo) < SIZE-4) {
+ pj_assert(0);
+ return -1;
+ }
+
+ // Test 2
+ entries[0] = pj_fifobuf_alloc (&fifo, MIN_SIZE);
+ if (!entries[0]) return -1;
+ for (i=0; i<LOOP*MAX_ENTRIES; ++i) {
+ int size = MIN_SIZE+(pj_rand() % MAX_SIZE);
+ entries[1] = pj_fifobuf_alloc (&fifo, size);
+ if (entries[1])
+ pj_fifobuf_unalloc(&fifo, entries[1]);
+ }
+ pj_fifobuf_unalloc(&fifo, entries[0]);
+ if (pj_fifobuf_max_size(&fifo) < SIZE-4) {
+ pj_assert(0);
+ return -2;
+ }
+
+ // Test 3
+ for (i=0; i<LOOP; ++i) {
+ int count, j;
+ for (count=0; available>=MIN_SIZE+4 && count < MAX_ENTRIES;) {
+ int size = MIN_SIZE+(pj_rand() % MAX_SIZE);
+ entries[count] = pj_fifobuf_alloc (&fifo, size);
+ if (entries[count]) {
+ available -= (size+4);
+ ++count;
+ }
+ }
+ for (j=0; j<count; ++j) {
+ pj_fifobuf_free (&fifo, entries[j]);
+ }
+ available = SIZE;
+ }
+
+ if (pj_fifobuf_max_size(&fifo) < SIZE-4) {
+ pj_assert(0);
+ return -3;
+ }
+ pj_pool_release(pool);
+ return 0;
+}
+
+#endif /* INCLUDE_FIFOBUF_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/ioq_perf.c b/pjlib/src/pjlib-test/ioq_perf.c
new file mode 100644
index 00000000..344b0c96
--- /dev/null
+++ b/pjlib/src/pjlib-test/ioq_perf.c
@@ -0,0 +1,466 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/ioq_perf.c 4 10/29/05 11:51a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/ioq_perf.c $
+ *
+ * 4 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 3 14/10/05 11:31 Bennylp
+ * More generalized test method, works for UDP too.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/11/05 3:52p Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+#include <pjlib.h>
+#include <pj/compat/high_precision.h>
+
+/**
+ * \page page_pjlib_ioqueue_perf_test Test: I/O Queue Performance
+ *
+ * Test the performance of the I/O queue, using typical producer
+ * consumer test. The test should examine the effect of using multiple
+ * threads on the performance.
+ *
+ * This file is <b>pjlib-test/ioq_perf.c</b>
+ *
+ * \include pjlib-test/ioq_perf.c
+ */
+
+#if INCLUDE_IOQUEUE_PERF_TEST
+
+#ifdef _MSC_VER
+# pragma warning ( disable: 4204) // non-constant aggregate initializer
+#endif
+
+#define THIS_FILE "ioq_perf"
+//#define TRACE_(expr) PJ_LOG(3,expr)
+#define TRACE_(expr)
+
+
+static pj_bool_t thread_quit_flag;
+static pj_status_t last_error;
+static unsigned last_error_counter;
+
+/* Descriptor for each producer/consumer pair. */
+typedef struct test_item
+{
+ pj_sock_t server_fd,
+ client_fd;
+ pj_ioqueue_t *ioqueue;
+ pj_ioqueue_key_t *server_key,
+ *client_key;
+ pj_size_t buffer_size;
+ char *outgoing_buffer;
+ char *incoming_buffer;
+ pj_size_t bytes_sent,
+ bytes_recv;
+} test_item;
+
+/* Callback when data has been read.
+ * Increment item->bytes_recv and ready to read the next data.
+ */
+static void on_read_complete(pj_ioqueue_key_t *key, pj_ssize_t bytes_read)
+{
+ test_item *item = pj_ioqueue_get_user_data(key);
+ pj_status_t rc;
+
+ //TRACE_((THIS_FILE, " read complete, bytes_read=%d", bytes_read));
+
+ if (thread_quit_flag)
+ return;
+
+ if (bytes_read < 0) {
+ pj_status_t rc = -bytes_read;
+ char errmsg[128];
+
+ if (rc != last_error) {
+ last_error = rc;
+ pj_strerror(rc, errmsg, sizeof(errmsg));
+ PJ_LOG(3,(THIS_FILE, "...error: read error, bytes_read=%d (%s)",
+ bytes_read, errmsg));
+ PJ_LOG(3,(THIS_FILE,
+ ".....additional info: total read=%u, total written=%u",
+ item->bytes_recv, item->bytes_sent));
+ } else {
+ last_error_counter++;
+ }
+ bytes_read = 0;
+
+ } else if (bytes_read == 0) {
+ PJ_LOG(3,(THIS_FILE, "...socket has closed!"));
+ }
+
+ item->bytes_recv += bytes_read;
+
+ /* To assure that the test quits, even if main thread
+ * doesn't have time to run.
+ */
+ if (item->bytes_recv > item->buffer_size * 10000)
+ thread_quit_flag = 1;
+
+ rc = pj_ioqueue_recv( item->ioqueue, item->server_key,
+ item->incoming_buffer, item->buffer_size, 0 );
+
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ if (rc != last_error) {
+ last_error = rc;
+ app_perror("...error: read error", rc);
+ } else {
+ last_error_counter++;
+ }
+ }
+}
+
+/* Callback when data has been written.
+ * Increment item->bytes_sent and write the next data.
+ */
+static void on_write_complete(pj_ioqueue_key_t *key, pj_ssize_t bytes_sent)
+{
+ test_item *item = pj_ioqueue_get_user_data(key);
+
+ //TRACE_((THIS_FILE, " write complete: sent = %d", bytes_sent));
+
+ if (thread_quit_flag)
+ return;
+
+ item->bytes_sent += bytes_sent;
+
+ if (bytes_sent <= 0) {
+ PJ_LOG(3,(THIS_FILE, "...error: sending stopped. bytes_sent=%d",
+ bytes_sent));
+ }
+ else {
+ pj_status_t rc;
+
+ rc = pj_ioqueue_write(item->ioqueue, item->client_key,
+ item->outgoing_buffer, item->buffer_size);
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ app_perror("...error: write error", rc);
+ }
+ }
+}
+
+/* The worker thread. */
+static int worker_thread(void *arg)
+{
+ pj_ioqueue_t *ioqueue = arg;
+ const pj_time_val timeout = {0, 100};
+ int rc;
+
+ while (!thread_quit_flag) {
+ rc = pj_ioqueue_poll(ioqueue, &timeout);
+ //TRACE_((THIS_FILE, " thread: poll returned rc=%d", rc));
+ if (rc < 0) {
+ app_perror("...error in pj_ioqueue_poll()", pj_get_netos_error());
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/* Calculate the bandwidth for the specific test configuration.
+ * The test is simple:
+ * - create sockpair_cnt number of producer-consumer socket pair.
+ * - create thread_cnt number of worker threads.
+ * - each producer will send buffer_size bytes data as fast and
+ * as soon as it can.
+ * - each consumer will read buffer_size bytes of data as fast
+ * as it could.
+ * - measure the total bytes received by all consumers during a
+ * period of time.
+ */
+static int perform_test(int sock_type, const char *type_name,
+ unsigned thread_cnt, unsigned sockpair_cnt,
+ pj_size_t buffer_size,
+ pj_size_t *p_bandwidth)
+{
+ enum { MSEC_DURATION = 5000 };
+ pj_pool_t *pool;
+ test_item *items;
+ pj_thread_t **thread;
+ pj_ioqueue_t *ioqueue;
+ pj_status_t rc;
+ pj_ioqueue_callback ioqueue_callback;
+ pj_uint32_t total_elapsed_usec, total_received;
+ pj_highprec_t bandwidth;
+ pj_timestamp start, stop;
+ unsigned i;
+
+ TRACE_((THIS_FILE, " starting test.."));
+
+ ioqueue_callback.on_read_complete = &on_read_complete;
+ ioqueue_callback.on_write_complete = &on_write_complete;
+
+ thread_quit_flag = 0;
+
+ pool = pj_pool_create(mem, NULL, 4096, 4096, NULL);
+ if (!pool)
+ return -10;
+
+ items = pj_pool_alloc(pool, sockpair_cnt*sizeof(test_item));
+ thread = pj_pool_alloc(pool, thread_cnt*sizeof(pj_thread_t*));
+
+ TRACE_((THIS_FILE, " creating ioqueue.."));
+ rc = pj_ioqueue_create(pool, sockpair_cnt*2, thread_cnt, &ioqueue);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: unable to create ioqueue", rc);
+ return -15;
+ }
+
+ /* Initialize each producer-consumer pair. */
+ for (i=0; i<sockpair_cnt; ++i) {
+
+ items[i].ioqueue = ioqueue;
+ items[i].buffer_size = buffer_size;
+ items[i].outgoing_buffer = pj_pool_alloc(pool, buffer_size);
+ items[i].incoming_buffer = pj_pool_alloc(pool, buffer_size);
+ items[i].bytes_recv = items[i].bytes_sent = 0;
+
+ /* randomize outgoing buffer. */
+ pj_create_random_string(items[i].outgoing_buffer, buffer_size);
+
+ /* Create socket pair. */
+ TRACE_((THIS_FILE, " calling socketpair.."));
+ rc = app_socketpair(PJ_AF_INET, sock_type, 0,
+ &items[i].server_fd, &items[i].client_fd);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: unable to create socket pair", rc);
+ return -20;
+ }
+
+ /* Register server socket to ioqueue. */
+ TRACE_((THIS_FILE, " register(1).."));
+ rc = pj_ioqueue_register_sock(pool, ioqueue,
+ items[i].server_fd,
+ &items[i], &ioqueue_callback,
+ &items[i].server_key);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: registering server socket to ioqueue", rc);
+ return -60;
+ }
+
+ /* Register client socket to ioqueue. */
+ TRACE_((THIS_FILE, " register(2).."));
+ rc = pj_ioqueue_register_sock(pool, ioqueue,
+ items[i].client_fd,
+ &items[i], &ioqueue_callback,
+ &items[i].client_key);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: registering server socket to ioqueue", rc);
+ return -70;
+ }
+
+ /* Start reading. */
+ TRACE_((THIS_FILE, " pj_ioqueue_recv.."));
+ rc = pj_ioqueue_recv(ioqueue, items[i].server_key,
+ items[i].incoming_buffer, items[i].buffer_size,
+ 0);
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ app_perror("...error: pj_ioqueue_recv", rc);
+ return -73;
+ }
+
+ /* Start writing. */
+ TRACE_((THIS_FILE, " pj_ioqueue_write.."));
+ rc = pj_ioqueue_write(ioqueue, items[i].client_key,
+ items[i].outgoing_buffer, items[i].buffer_size);
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ app_perror("...error: pj_ioqueue_write", rc);
+ return -76;
+ }
+
+ }
+
+ /* Create the threads. */
+ for (i=0; i<thread_cnt; ++i) {
+ rc = pj_thread_create( pool, NULL,
+ &worker_thread,
+ ioqueue,
+ PJ_THREAD_DEFAULT_STACK_SIZE,
+ PJ_THREAD_SUSPENDED, &thread[i] );
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: unable to create thread", rc);
+ return -80;
+ }
+ }
+
+ /* Mark start time. */
+ rc = pj_get_timestamp(&start);
+ if (rc != PJ_SUCCESS)
+ return -90;
+
+ /* Start the thread. */
+ TRACE_((THIS_FILE, " resuming all threads.."));
+ for (i=0; i<thread_cnt; ++i) {
+ rc = pj_thread_resume(thread[i]);
+ if (rc != 0)
+ return -100;
+ }
+
+ /* Wait for MSEC_DURATION seconds.
+ * This should be as simple as pj_thread_sleep(MSEC_DURATION) actually,
+ * but unfortunately it doesn't work when system doesn't employ
+ * timeslicing for threads.
+ */
+ TRACE_((THIS_FILE, " wait for few seconds.."));
+ do {
+ pj_thread_sleep(1);
+
+ /* Mark end time. */
+ rc = pj_get_timestamp(&stop);
+
+ if (thread_quit_flag) {
+ TRACE_((THIS_FILE, " transfer limit reached.."));
+ break;
+ }
+
+ if (pj_elapsed_usec(&start,&stop)<MSEC_DURATION * 1000) {
+ TRACE_((THIS_FILE, " time limit reached.."));
+ break;
+ }
+
+ } while (1);
+
+ /* Terminate all threads. */
+ TRACE_((THIS_FILE, " terminating all threads.."));
+ thread_quit_flag = 1;
+
+ for (i=0; i<thread_cnt; ++i) {
+ TRACE_((THIS_FILE, " join thread %d..", i));
+ pj_thread_join(thread[i]);
+ pj_thread_destroy(thread[i]);
+ }
+
+ /* Close all sockets. */
+ TRACE_((THIS_FILE, " closing all sockets.."));
+ for (i=0; i<sockpair_cnt; ++i) {
+ pj_ioqueue_unregister(ioqueue, items[i].server_key);
+ pj_ioqueue_unregister(ioqueue, items[i].client_key);
+ pj_sock_close(items[i].server_fd);
+ pj_sock_close(items[i].client_fd);
+ }
+
+ /* Destroy ioqueue. */
+ TRACE_((THIS_FILE, " destroying ioqueue.."));
+ pj_ioqueue_destroy(ioqueue);
+
+ /* Calculate actual time in usec. */
+ total_elapsed_usec = pj_elapsed_usec(&start, &stop);
+
+ /* Calculate total bytes received. */
+ total_received = 0;
+ for (i=0; i<sockpair_cnt; ++i) {
+ total_received = items[i].bytes_recv;
+ }
+
+ /* bandwidth = total_received*1000/total_elapsed_usec */
+ bandwidth = total_received;
+ pj_highprec_mul(bandwidth, 1000);
+ pj_highprec_div(bandwidth, total_elapsed_usec);
+
+ *p_bandwidth = (pj_uint32_t)bandwidth;
+
+ PJ_LOG(3,(THIS_FILE, " %.4s %d %d %3d us %8d KB/s",
+ type_name, thread_cnt, sockpair_cnt,
+ -1 /*total_elapsed_usec/sockpair_cnt*/,
+ *p_bandwidth));
+
+ /* Done. */
+ pj_pool_release(pool);
+
+ TRACE_((THIS_FILE, " done.."));
+ return 0;
+}
+
+/*
+ * main test entry.
+ */
+int ioqueue_perf_test(void)
+{
+ enum { BUF_SIZE = 512 };
+ int i, rc;
+ struct {
+ int type;
+ const char *type_name;
+ int thread_cnt;
+ int sockpair_cnt;
+ } test_param[] =
+ {
+ { PJ_SOCK_DGRAM, "udp", 1, 1},
+ { PJ_SOCK_DGRAM, "udp", 1, 2},
+ { PJ_SOCK_DGRAM, "udp", 1, 4},
+ { PJ_SOCK_DGRAM, "udp", 1, 8},
+ { PJ_SOCK_DGRAM, "udp", 2, 1},
+ { PJ_SOCK_DGRAM, "udp", 2, 2},
+ { PJ_SOCK_DGRAM, "udp", 2, 4},
+ { PJ_SOCK_DGRAM, "udp", 2, 8},
+ { PJ_SOCK_DGRAM, "udp", 4, 1},
+ { PJ_SOCK_DGRAM, "udp", 4, 2},
+ { PJ_SOCK_DGRAM, "udp", 4, 4},
+ { PJ_SOCK_DGRAM, "udp", 4, 8},
+ { PJ_SOCK_STREAM, "tcp", 1, 1},
+ { PJ_SOCK_STREAM, "tcp", 1, 2},
+ { PJ_SOCK_STREAM, "tcp", 1, 4},
+ { PJ_SOCK_STREAM, "tcp", 1, 8},
+ { PJ_SOCK_STREAM, "tcp", 2, 1},
+ { PJ_SOCK_STREAM, "tcp", 2, 2},
+ { PJ_SOCK_STREAM, "tcp", 2, 4},
+ { PJ_SOCK_STREAM, "tcp", 2, 8},
+ { PJ_SOCK_STREAM, "tcp", 4, 1},
+ { PJ_SOCK_STREAM, "tcp", 4, 2},
+ { PJ_SOCK_STREAM, "tcp", 4, 4},
+ { PJ_SOCK_STREAM, "tcp", 4, 8},
+ };
+ pj_size_t best_bandwidth;
+ int best_index = 0;
+
+ PJ_LOG(3,(THIS_FILE, " Benchmarking ioqueue:"));
+ PJ_LOG(3,(THIS_FILE, " ==============================================="));
+ PJ_LOG(3,(THIS_FILE, " Type Threads Skt.Pairs Avg.Time Bandwidth"));
+ PJ_LOG(3,(THIS_FILE, " ==============================================="));
+
+ best_bandwidth = 0;
+ for (i=0; i<sizeof(test_param)/sizeof(test_param[0]); ++i) {
+ pj_size_t bandwidth;
+
+ rc = perform_test(test_param[i].type,
+ test_param[i].type_name,
+ test_param[i].thread_cnt,
+ test_param[i].sockpair_cnt,
+ BUF_SIZE,
+ &bandwidth);
+ if (rc != 0)
+ return rc;
+
+ if (bandwidth > best_bandwidth)
+ best_bandwidth = bandwidth, best_index = i;
+
+ /* Give it a rest before next test. */
+ pj_thread_sleep(500);
+ }
+
+ PJ_LOG(3,(THIS_FILE,
+ " Best: Type=%s Threads=%d, Skt.Pairs=%d, Bandwidth=%u KB/s",
+ test_param[best_index].type_name,
+ test_param[best_index].thread_cnt,
+ test_param[best_index].sockpair_cnt,
+ best_bandwidth));
+ PJ_LOG(3,(THIS_FILE, " (Note: packet size=%d, total errors=%u)",
+ BUF_SIZE, last_error_counter));
+ return 0;
+}
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_uiq_perf_test;
+#endif /* INCLUDE_IOQUEUE_PERF_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/ioq_tcp.c b/pjlib/src/pjlib-test/ioq_tcp.c
new file mode 100644
index 00000000..434c25ae
--- /dev/null
+++ b/pjlib/src/pjlib-test/ioq_tcp.c
@@ -0,0 +1,474 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/ioq_tcp.c 4 10/29/05 10:23p Bennylp $
+ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/ioq_tcp.c $
+ *
+ * 4 10/29/05 10:23p Bennylp
+ * Fixed no-memory exception.
+ *
+ * 3 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ */
+#include "test.h"
+
+/**
+ * \page page_pjlib_ioqueue_tcp_test Test: I/O Queue (TCP)
+ *
+ * This file provides implementation to test the
+ * functionality of the I/O queue when TCP socket is used.
+ *
+ *
+ * This file is <b>pjlib-test/ioq_tcp.c</b>
+ *
+ * \include pjlib-test/ioq_tcp.c
+ */
+
+
+#if INCLUDE_TCP_IOQUEUE_TEST
+
+#include <pjlib.h>
+
+#if PJ_HAS_TCP
+
+#define THIS_FILE "test_tcp"
+#define PORT 50000
+#define NON_EXISTANT_PORT 50123
+#define LOOP 100
+#define BUF_MIN_SIZE 32
+#define BUF_MAX_SIZE 2048
+#define SOCK_INACTIVE_MIN (4-2)
+#define SOCK_INACTIVE_MAX (PJ_IOQUEUE_MAX_HANDLES - 2)
+#define POOL_SIZE (2*BUF_MAX_SIZE + SOCK_INACTIVE_MAX*128 + 2048)
+
+static pj_ssize_t callback_read_size,
+ callback_write_size,
+ callback_accept_status,
+ callback_connect_status;
+static pj_ioqueue_key_t*callback_read_key,
+ *callback_write_key,
+ *callback_accept_key,
+ *callback_connect_key;
+
+static void on_ioqueue_read(pj_ioqueue_key_t *key, pj_ssize_t bytes_read)
+{
+ callback_read_key = key;
+ callback_read_size = bytes_read;
+}
+
+static void on_ioqueue_write(pj_ioqueue_key_t *key, pj_ssize_t bytes_written)
+{
+ callback_write_key = key;
+ callback_write_size = bytes_written;
+}
+
+static void on_ioqueue_accept(pj_ioqueue_key_t *key, pj_sock_t sock,
+ int status)
+{
+ PJ_UNUSED_ARG(sock);
+
+ callback_accept_key = key;
+ callback_accept_status = status;
+}
+
+static void on_ioqueue_connect(pj_ioqueue_key_t *key, int status)
+{
+ callback_connect_key = key;
+ callback_connect_status = status;
+}
+
+static pj_ioqueue_callback test_cb =
+{
+ &on_ioqueue_read,
+ &on_ioqueue_write,
+ &on_ioqueue_accept,
+ &on_ioqueue_connect,
+};
+
+static int send_recv_test(pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *skey,
+ pj_ioqueue_key_t *ckey,
+ void *send_buf,
+ void *recv_buf,
+ pj_ssize_t bufsize,
+ pj_timestamp *t_elapsed)
+{
+ int rc;
+ pj_ssize_t bytes;
+ pj_timestamp t1, t2;
+ int pending_op = 0;
+
+ // Start reading on the server side.
+ rc = pj_ioqueue_read(ioque, skey, recv_buf, bufsize);
+ if (rc != 0 && rc != PJ_EPENDING) {
+ return -100;
+ }
+
+ ++pending_op;
+
+ // Randomize send buffer.
+ pj_create_random_string((char*)send_buf, bufsize);
+
+ // Starts send on the client side.
+ bytes = pj_ioqueue_write(ioque, ckey, send_buf, bufsize);
+ if (bytes != bufsize && bytes != PJ_EPENDING) {
+ return -120;
+ }
+ if (bytes == PJ_EPENDING) {
+ ++pending_op;
+ }
+
+ // Begin time.
+ pj_get_timestamp(&t1);
+
+ // Reset indicators
+ callback_read_size = callback_write_size = 0;
+ callback_read_key = callback_write_key = NULL;
+
+ // Poll the queue until we've got completion event in the server side.
+ rc = 0;
+ while (pending_op > 0) {
+ rc = pj_ioqueue_poll(ioque, NULL);
+ if (rc > 0) {
+ if (callback_read_size) {
+ if (callback_read_size != bufsize) {
+ return -160;
+ }
+ if (callback_read_key != skey)
+ return -161;
+ }
+ if (callback_write_size) {
+ if (callback_write_key != ckey)
+ return -162;
+ }
+ pending_op -= rc;
+ }
+ if (rc < 0) {
+ return -170;
+ }
+ }
+
+ // End time.
+ pj_get_timestamp(&t2);
+ t_elapsed->u32.lo += (t2.u32.lo - t1.u32.lo);
+
+ if (rc < 0) {
+ return -150;
+ }
+
+ // Compare recv buffer with send buffer.
+ if (pj_memcmp(send_buf, recv_buf, bufsize) != 0) {
+ return -180;
+ }
+
+ // Success
+ return 0;
+}
+
+
+/*
+ * Compliance test for success scenario.
+ */
+static int compliance_test_0(void)
+{
+ pj_sock_t ssock=-1, csock0=-1, csock1=-1;
+ pj_sockaddr_in addr, client_addr, rmt_addr;
+ int client_addr_len;
+ pj_pool_t *pool = NULL;
+ char *send_buf, *recv_buf;
+ pj_ioqueue_t *ioque = NULL;
+ pj_ioqueue_key_t *skey, *ckey0, *ckey1;
+ int bufsize = BUF_MIN_SIZE;
+ pj_ssize_t status = -1;
+ int pending_op = 0;
+ pj_timestamp t_elapsed;
+ pj_str_t s;
+ pj_status_t rc;
+
+ // Create pool.
+ pool = pj_pool_create(mem, NULL, POOL_SIZE, 4000, NULL);
+
+ // Allocate buffers for send and receive.
+ send_buf = (char*)pj_pool_alloc(pool, bufsize);
+ recv_buf = (char*)pj_pool_alloc(pool, bufsize);
+
+ // Create server socket and client socket for connecting
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_STREAM, 0, &ssock);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error creating socket", rc);
+ status=-1; goto on_error;
+ }
+
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_STREAM, 0, &csock1);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error creating socket", rc);
+ status=-1; goto on_error;
+ }
+
+ // Bind server socket.
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = PJ_AF_INET;
+ addr.sin_port = pj_htons(PORT);
+ if (pj_sock_bind(ssock, &addr, sizeof(addr))) {
+ app_perror("...bind error", rc);
+ status=-10; goto on_error;
+ }
+
+ // Create I/O Queue.
+ rc = pj_ioqueue_create(pool, PJ_IOQUEUE_MAX_HANDLES, 0, &ioque);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...ERROR in pj_ioqueue_create()", rc);
+ status=-20; goto on_error;
+ }
+
+ // Register server socket and client socket.
+ rc = pj_ioqueue_register_sock(pool, ioque, ssock, NULL, &test_cb, &skey);
+ if (rc == PJ_SUCCESS)
+ rc = pj_ioqueue_register_sock(pool, ioque, csock1, NULL, &test_cb,
+ &ckey1);
+ else
+ ckey1 = NULL;
+ if (rc != PJ_SUCCESS) {
+ app_perror("...ERROR in pj_ioqueue_register_sock()", rc);
+ status=-23; goto on_error;
+ }
+
+ // Server socket listen().
+ if (pj_sock_listen(ssock, 5)) {
+ app_perror("...ERROR in pj_sock_listen()", rc);
+ status=-25; goto on_error;
+ }
+
+ // Server socket accept()
+ client_addr_len = sizeof(pj_sockaddr_in);
+ status = pj_ioqueue_accept(ioque, skey, &csock0, &client_addr, &rmt_addr, &client_addr_len);
+ if (status != PJ_EPENDING) {
+ app_perror("...ERROR in pj_ioqueue_accept()", rc);
+ status=-30; goto on_error;
+ }
+ if (status==PJ_EPENDING) {
+ ++pending_op;
+ }
+
+ // Initialize remote address.
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = PJ_AF_INET;
+ addr.sin_port = pj_htons(PORT);
+ addr.sin_addr = pj_inet_addr(pj_cstr(&s, "127.0.0.1"));
+
+ // Client socket connect()
+ status = pj_ioqueue_connect(ioque, ckey1, &addr, sizeof(addr));
+ if (status!=PJ_SUCCESS && status != PJ_EPENDING) {
+ app_perror("...ERROR in pj_ioqueue_connect()", rc);
+ status=-40; goto on_error;
+ }
+ if (status==PJ_EPENDING) {
+ ++pending_op;
+ }
+
+ // Poll until connected
+ callback_read_size = callback_write_size = 0;
+ callback_accept_status = callback_connect_status = -2;
+
+ callback_read_key = callback_write_key =
+ callback_accept_key = callback_connect_key = NULL;
+
+ while (pending_op) {
+ pj_time_val timeout = {1, 0};
+
+ status=pj_ioqueue_poll(ioque, &timeout);
+ if (status > 0) {
+ if (callback_accept_status != -2) {
+ if (callback_accept_status != 0) {
+ status=-41; goto on_error;
+ }
+ if (callback_accept_key != skey) {
+ status=-41; goto on_error;
+ }
+ }
+
+ if (callback_connect_status != -2) {
+ if (callback_connect_status != 0) {
+ status=-50; goto on_error;
+ }
+ if (callback_connect_key != ckey1) {
+ status=-51; goto on_error;
+ }
+ }
+
+ pending_op -= status;
+
+ if (pending_op == 0) {
+ status = 0;
+ }
+ }
+ }
+
+ // Check accepted socket.
+ if (csock0 == PJ_INVALID_SOCKET) {
+ status = -69;
+ app_perror("...accept() error", pj_get_os_error());
+ goto on_error;
+ }
+
+ // Register newly accepted socket.
+ rc = pj_ioqueue_register_sock(pool, ioque, csock0, NULL,
+ &test_cb, &ckey0);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...ERROR in pj_ioqueue_register_sock", rc);
+ status = -70;
+ goto on_error;
+ }
+
+ // Test send and receive.
+ t_elapsed.u32.lo = 0;
+ status = send_recv_test(ioque, ckey0, ckey1, send_buf, recv_buf, bufsize, &t_elapsed);
+ if (status != 0) {
+ goto on_error;
+ }
+
+ // Success
+ status = 0;
+
+on_error:
+ if (ssock != PJ_INVALID_SOCKET)
+ pj_sock_close(ssock);
+ if (csock1 != PJ_INVALID_SOCKET)
+ pj_sock_close(csock1);
+ if (csock0 != PJ_INVALID_SOCKET)
+ pj_sock_close(csock0);
+ if (ioque != NULL)
+ pj_ioqueue_destroy(ioque);
+ pj_pool_release(pool);
+ return status;
+
+}
+
+/*
+ * Compliance test for failed scenario.
+ * In this case, the client connects to a non-existant service.
+ */
+static int compliance_test_1(void)
+{
+ pj_sock_t csock1=-1;
+ pj_sockaddr_in addr;
+ pj_pool_t *pool = NULL;
+ pj_ioqueue_t *ioque = NULL;
+ pj_ioqueue_key_t *ckey1;
+ pj_ssize_t status = -1;
+ int pending_op = 0;
+ pj_str_t s;
+ pj_status_t rc;
+
+ // Create pool.
+ pool = pj_pool_create(mem, NULL, POOL_SIZE, 4000, NULL);
+
+ // Create I/O Queue.
+ rc = pj_ioqueue_create(pool, PJ_IOQUEUE_MAX_HANDLES, 0, &ioque);
+ if (!ioque) {
+ status=-20; goto on_error;
+ }
+
+ // Create client socket
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_STREAM, 0, &csock1);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...ERROR in pj_sock_socket()", rc);
+ status=-1; goto on_error;
+ }
+
+ // Register client socket.
+ rc = pj_ioqueue_register_sock(pool, ioque, csock1, NULL,
+ &test_cb, &ckey1);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...ERROR in pj_ioqueue_register_sock()", rc);
+ status=-23; goto on_error;
+ }
+
+ // Initialize remote address.
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = PJ_AF_INET;
+ addr.sin_port = pj_htons(NON_EXISTANT_PORT);
+ addr.sin_addr = pj_inet_addr(pj_cstr(&s, "127.0.0.1"));
+
+ // Client socket connect()
+ status = pj_ioqueue_connect(ioque, ckey1, &addr, sizeof(addr));
+ if (status==PJ_SUCCESS) {
+ // unexpectedly success!
+ status = -30;
+ goto on_error;
+ }
+ if (status != PJ_EPENDING) {
+ // success
+ } else {
+ ++pending_op;
+ }
+
+ callback_connect_status = -2;
+ callback_connect_key = NULL;
+
+ // Poll until we've got result
+ while (pending_op) {
+ pj_time_val timeout = {1, 0};
+
+ status=pj_ioqueue_poll(ioque, &timeout);
+ if (status > 0) {
+ if (callback_connect_key==ckey1) {
+ if (callback_connect_status == 0) {
+ // unexpectedly connected!
+ status = -50;
+ goto on_error;
+ }
+ }
+
+ pending_op -= status;
+ if (pending_op == 0) {
+ status = 0;
+ }
+ }
+ }
+
+ // Success
+ status = 0;
+
+on_error:
+ if (csock1 != PJ_INVALID_SOCKET)
+ pj_sock_close(csock1);
+ if (ioque != NULL)
+ pj_ioqueue_destroy(ioque);
+ pj_pool_release(pool);
+ return status;
+}
+
+int tcp_ioqueue_test()
+{
+ int status;
+
+ PJ_LOG(3, (THIS_FILE, "..compliance test 0 (success scenario)"));
+ if ((status=compliance_test_0()) != 0) {
+ PJ_LOG(1, (THIS_FILE, "....FAILED (status=%d)\n", status));
+ return status;
+ }
+ PJ_LOG(3, (THIS_FILE, "..compliance test 1 (failed scenario)"));
+ if ((status=compliance_test_1()) != 0) {
+ PJ_LOG(1, (THIS_FILE, "....FAILED (status=%d)\n", status));
+ return status;
+ }
+
+ return 0;
+}
+
+#endif /* PJ_HAS_TCP */
+
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_uiq_tcp;
+#endif /* INCLUDE_TCP_IOQUEUE_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/ioq_udp.c b/pjlib/src/pjlib-test/ioq_udp.c
new file mode 100644
index 00000000..8b95782a
--- /dev/null
+++ b/pjlib/src/pjlib-test/ioq_udp.c
@@ -0,0 +1,664 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/ioq_udp.c 4 10/29/05 10:23p Bennylp $
+ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/ioq_udp.c $
+ *
+ * 4 10/29/05 10:23p Bennylp
+ * Fixed no-memory exception.
+ *
+ * 3 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ */
+#include "test.h"
+
+
+/**
+ * \page page_pjlib_ioqueue_udp_test Test: I/O Queue (UDP)
+ *
+ * This file provides implementation to test the
+ * functionality of the I/O queue when UDP socket is used.
+ *
+ *
+ * This file is <b>pjlib-test/ioq_udp.c</b>
+ *
+ * \include pjlib-test/ioq_udp.c
+ */
+
+
+#if INCLUDE_UDP_IOQUEUE_TEST
+
+#include <pjlib.h>
+
+#include <pj/compat/socket.h>
+
+#define THIS_FILE "test_udp"
+#define PORT 51233
+#define LOOP 100
+#define BUF_MIN_SIZE 32
+#define BUF_MAX_SIZE 2048
+#define SOCK_INACTIVE_MIN (1)
+#define SOCK_INACTIVE_MAX (PJ_IOQUEUE_MAX_HANDLES - 2)
+#define POOL_SIZE (2*BUF_MAX_SIZE + SOCK_INACTIVE_MAX*128 + 2048)
+
+#undef TRACE_
+#define TRACE_(msg) PJ_LOG(3,(THIS_FILE,"....." msg))
+
+static pj_ssize_t callback_read_size,
+ callback_write_size,
+ callback_accept_status,
+ callback_connect_status;
+static pj_ioqueue_key_t *callback_read_key,
+ *callback_write_key,
+ *callback_accept_key,
+ *callback_connect_key;
+
+static void on_ioqueue_read(pj_ioqueue_key_t *key, pj_ssize_t bytes_read)
+{
+ callback_read_key = key;
+ callback_read_size = bytes_read;
+}
+
+static void on_ioqueue_write(pj_ioqueue_key_t *key, pj_ssize_t bytes_written)
+{
+ callback_write_key = key;
+ callback_write_size = bytes_written;
+}
+
+static void on_ioqueue_accept(pj_ioqueue_key_t *key, pj_sock_t sock, int status)
+{
+ PJ_UNUSED_ARG(sock);
+ callback_accept_key = key;
+ callback_accept_status = status;
+}
+
+static void on_ioqueue_connect(pj_ioqueue_key_t *key, int status)
+{
+ callback_connect_key = key;
+ callback_connect_status = status;
+}
+
+static pj_ioqueue_callback test_cb =
+{
+ &on_ioqueue_read,
+ &on_ioqueue_write,
+ &on_ioqueue_accept,
+ &on_ioqueue_connect,
+};
+
+#ifdef PJ_WIN32
+# define S_ADDR S_un.S_addr
+#else
+# define S_ADDR s_addr
+#endif
+
+/*
+ * native_format_test()
+ * This is just a simple test to verify that various structures in sock.h
+ * are really compatible with operating system's definitions.
+ */
+static int native_format_test(void)
+{
+ pj_status_t rc;
+
+ // Test that PJ_INVALID_SOCKET is working.
+ {
+ pj_sock_t sock;
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_STREAM, -1, &sock);
+ if (rc == PJ_SUCCESS)
+ return -1020;
+ }
+
+ // Previous func will set errno var.
+ pj_set_os_error(PJ_SUCCESS);
+
+ return 0;
+}
+
+/*
+ * compliance_test()
+ * To test that the basic IOQueue functionality works. It will just exchange
+ * data between two sockets.
+ */
+static int compliance_test(void)
+{
+ pj_sock_t ssock=-1, csock=-1;
+ pj_sockaddr_in addr;
+ int addrlen;
+ pj_pool_t *pool = NULL;
+ char *send_buf, *recv_buf;
+ pj_ioqueue_t *ioque = NULL;
+ pj_ioqueue_key_t *skey, *ckey;
+ int bufsize = BUF_MIN_SIZE;
+ pj_ssize_t bytes, status = -1;
+ pj_str_t temp;
+ pj_bool_t send_pending, recv_pending;
+ pj_status_t rc;
+
+ pj_set_os_error(PJ_SUCCESS);
+
+ // Create pool.
+ pool = pj_pool_create(mem, NULL, POOL_SIZE, 4000, NULL);
+
+ // Allocate buffers for send and receive.
+ send_buf = (char*)pj_pool_alloc(pool, bufsize);
+ recv_buf = (char*)pj_pool_alloc(pool, bufsize);
+
+ // Allocate sockets for sending and receiving.
+ TRACE_("creating sockets...");
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_DGRAM, 0, &ssock);
+ if (rc==PJ_SUCCESS)
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_DGRAM, 0, &csock);
+ else
+ csock = PJ_INVALID_SOCKET;
+ if (rc != PJ_SUCCESS) {
+ app_perror("...ERROR in pj_sock_socket()", rc);
+ status=-1; goto on_error;
+ }
+
+ // Bind server socket.
+ TRACE_("bind socket...");
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = PJ_AF_INET;
+ addr.sin_port = pj_htons(PORT);
+ if (pj_sock_bind(ssock, &addr, sizeof(addr))) {
+ status=-10; goto on_error;
+ }
+
+ // Create I/O Queue.
+ TRACE_("create ioqueue...");
+ rc = pj_ioqueue_create(pool, PJ_IOQUEUE_MAX_HANDLES,
+ PJ_IOQUEUE_DEFAULT_THREADS, &ioque);
+ if (rc != PJ_SUCCESS) {
+ status=-20; goto on_error;
+ }
+
+ // Register server and client socket.
+ // We put this after inactivity socket, hopefully this can represent the
+ // worst waiting time.
+ TRACE_("registering first sockets...");
+ rc = pj_ioqueue_register_sock(pool, ioque, ssock, NULL,
+ &test_cb, &skey);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error(10): ioqueue_register error", rc);
+ status=-25; goto on_error;
+ }
+ TRACE_("registering second sockets...");
+ rc = pj_ioqueue_register_sock( pool, ioque, csock, NULL,
+ &test_cb, &ckey);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error(11): ioqueue_register error", rc);
+ status=-26; goto on_error;
+ }
+
+ // Set destination address to send the packet.
+ TRACE_("set destination address...");
+ temp = pj_str("127.0.0.1");
+ if ((rc=pj_sockaddr_in_init(&addr, &temp, PORT)) != 0) {
+ app_perror("...error: unable to resolve 127.0.0.1", rc);
+ status=-26; goto on_error;
+ }
+
+ // Randomize send_buf.
+ pj_create_random_string(send_buf, bufsize);
+
+ // Register reading from ioqueue.
+ TRACE_("start recvfrom...");
+ addrlen = sizeof(addr);
+ bytes = pj_ioqueue_recvfrom(ioque, skey, recv_buf, bufsize, 0,
+ &addr, &addrlen);
+ if (bytes < 0 && bytes != PJ_EPENDING) {
+ status=-28; goto on_error;
+ } else if (bytes == PJ_EPENDING) {
+ recv_pending = 1;
+ PJ_LOG(3, (THIS_FILE,
+ "......ok: recvfrom returned pending"));
+ } else {
+ PJ_LOG(3, (THIS_FILE,
+ "......error: recvfrom returned immediate ok!"));
+ status=-29; goto on_error;
+ }
+
+ // Write must return the number of bytes.
+ TRACE_("start sendto...");
+ bytes = pj_ioqueue_sendto(ioque, ckey, send_buf, bufsize, 0, &addr,
+ sizeof(addr));
+ if (bytes != bufsize && bytes != PJ_EPENDING) {
+ PJ_LOG(1,(THIS_FILE,
+ "......error: sendto returned %d", bytes));
+ status=-30; goto on_error;
+ } else if (bytes == PJ_EPENDING) {
+ send_pending = 1;
+ PJ_LOG(3, (THIS_FILE,
+ "......ok: sendto returned pending"));
+ } else {
+ send_pending = 0;
+ PJ_LOG(3, (THIS_FILE,
+ "......ok: sendto returned immediate success"));
+ }
+
+ // reset callback variables.
+ callback_read_size = callback_write_size = 0;
+ callback_accept_status = callback_connect_status = -2;
+ callback_read_key = callback_write_key =
+ callback_accept_key = callback_connect_key = NULL;
+
+ // Poll if pending.
+ while (send_pending && recv_pending) {
+ int rc;
+ pj_time_val timeout = { 5, 0 };
+
+ TRACE_("poll...");
+ rc = pj_ioqueue_poll(ioque, &timeout);
+
+ if (rc == 0) {
+ PJ_LOG(1,(THIS_FILE, "...ERROR: timed out..."));
+ status=-45; goto on_error;
+ } else if (rc < 0) {
+ app_perror("...ERROR in ioqueue_poll()", rc);
+ status=-50; goto on_error;
+ }
+
+ if (callback_read_key != NULL) {
+ if (callback_read_size != bufsize) {
+ status=-61; goto on_error;
+ }
+
+ if (callback_read_key != skey) {
+ status=-65; goto on_error;
+ }
+
+ if (memcmp(send_buf, recv_buf, bufsize) != 0) {
+ status=-70; goto on_error;
+ }
+
+
+ recv_pending = 0;
+ }
+
+ if (callback_write_key != NULL) {
+ if (callback_write_size != bufsize) {
+ status=-73; goto on_error;
+ }
+
+ if (callback_write_key != ckey) {
+ status=-75; goto on_error;
+ }
+
+ send_pending = 0;
+ }
+ }
+
+ // Success
+ status = 0;
+
+on_error:
+ if (status != 0) {
+ char errbuf[128];
+ PJ_LOG(1, (THIS_FILE,
+ "...compliance test error: status=%d, os_err=%d (%s)",
+ status, pj_get_netos_error(),
+ pj_strerror(pj_get_netos_error(), errbuf, sizeof(errbuf))));
+ }
+ if (ssock)
+ pj_sock_close(ssock);
+ if (csock)
+ pj_sock_close(csock);
+ if (ioque != NULL)
+ pj_ioqueue_destroy(ioque);
+ pj_pool_release(pool);
+ return status;
+
+}
+
+/*
+ * Testing with many handles.
+ * This will just test registering PJ_IOQUEUE_MAX_HANDLES count
+ * of sockets to the ioqueue.
+ */
+static int many_handles_test(void)
+{
+ enum { MAX = PJ_IOQUEUE_MAX_HANDLES };
+ pj_pool_t *pool;
+ pj_ioqueue_t *ioqueue;
+ pj_sock_t *sock;
+ pj_ioqueue_key_t **key;
+ pj_status_t rc;
+ int count, i;
+
+ PJ_LOG(3,(THIS_FILE,"...testing with so many handles"));
+
+ pool = pj_pool_create(mem, NULL, 4000, 4000, NULL);
+ if (!pool)
+ return PJ_ENOMEM;
+
+ key = pj_pool_alloc(pool, MAX*sizeof(pj_ioqueue_key_t*));
+ sock = pj_pool_alloc(pool, MAX*sizeof(pj_sock_t));
+
+ /* Create IOQueue */
+ rc = pj_ioqueue_create(pool, MAX,
+ PJ_IOQUEUE_DEFAULT_THREADS,
+ &ioqueue);
+ if (rc != PJ_SUCCESS || ioqueue == NULL) {
+ app_perror("...error in pj_ioqueue_create", rc);
+ return -10;
+ }
+
+ /* Register as many sockets. */
+ for (count=0; count<MAX; ++count) {
+ sock[count] = PJ_INVALID_SOCKET;
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_DGRAM, 0, &sock[count]);
+ if (rc != PJ_SUCCESS || sock[count] == PJ_INVALID_SOCKET) {
+ PJ_LOG(3,(THIS_FILE, "....unable to create %d-th socket, rc=%d",
+ count, rc));
+ break;
+ }
+ key[count] = NULL;
+ rc = pj_ioqueue_register_sock(pool, ioqueue, sock[count],
+ NULL, &test_cb, &key[count]);
+ if (rc != PJ_SUCCESS || key[count] == NULL) {
+ PJ_LOG(3,(THIS_FILE, "....unable to register %d-th socket, rc=%d",
+ count, rc));
+ return -30;
+ }
+ }
+
+ /* Test complete. */
+
+ /* Now deregister and close all handles. */
+
+ for (i=0; i<count; ++i) {
+ rc = pj_ioqueue_unregister(ioqueue, key[i]);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error in pj_ioqueue_unregister", rc);
+ }
+ rc = pj_sock_close(sock[i]);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error in pj_sock_close", rc);
+ }
+ }
+
+ rc = pj_ioqueue_destroy(ioqueue);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error in pj_ioqueue_destroy", rc);
+ }
+
+ pj_pool_release(pool);
+
+ PJ_LOG(3,(THIS_FILE,"....many_handles_test() ok"));
+
+ return 0;
+}
+
+/*
+ * Multi-operation test.
+ */
+
+/*
+ * Benchmarking IOQueue
+ */
+static int bench_test(int bufsize, int inactive_sock_count)
+{
+ pj_sock_t ssock=-1, csock=-1;
+ pj_sockaddr_in addr;
+ pj_pool_t *pool = NULL;
+ pj_sock_t *inactive_sock=NULL;
+ char *send_buf, *recv_buf;
+ pj_ioqueue_t *ioque = NULL;
+ pj_ioqueue_key_t *skey, *ckey, *key;
+ pj_timestamp t1, t2, t_elapsed;
+ int rc=0, i;
+ pj_str_t temp;
+ char errbuf[128];
+
+ // Create pool.
+ pool = pj_pool_create(mem, NULL, POOL_SIZE, 4000, NULL);
+
+ // Allocate buffers for send and receive.
+ send_buf = (char*)pj_pool_alloc(pool, bufsize);
+ recv_buf = (char*)pj_pool_alloc(pool, bufsize);
+
+ // Allocate sockets for sending and receiving.
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_DGRAM, 0, &ssock);
+ if (rc == PJ_SUCCESS) {
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_DGRAM, 0, &csock);
+ } else
+ csock = PJ_INVALID_SOCKET;
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_sock_socket()", rc);
+ goto on_error;
+ }
+
+ // Bind server socket.
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = PJ_AF_INET;
+ addr.sin_port = pj_htons(PORT);
+ if (pj_sock_bind(ssock, &addr, sizeof(addr)))
+ goto on_error;
+
+ pj_assert(inactive_sock_count+2 <= PJ_IOQUEUE_MAX_HANDLES);
+
+ // Create I/O Queue.
+ rc = pj_ioqueue_create(pool, PJ_IOQUEUE_MAX_HANDLES,
+ PJ_IOQUEUE_DEFAULT_THREADS, &ioque);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_ioqueue_create()", rc);
+ goto on_error;
+ }
+
+ // Allocate inactive sockets, and bind them to some arbitrary address.
+ // Then register them to the I/O queue, and start a read operation.
+ inactive_sock = (pj_sock_t*)pj_pool_alloc(pool,
+ inactive_sock_count*sizeof(pj_sock_t));
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = PJ_AF_INET;
+ for (i=0; i<inactive_sock_count; ++i) {
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_DGRAM, 0, &inactive_sock[i]);
+ if (rc != PJ_SUCCESS || inactive_sock[i] < 0) {
+ app_perror("...error: pj_sock_socket()", rc);
+ goto on_error;
+ }
+ if ((rc=pj_sock_bind(inactive_sock[i], &addr, sizeof(addr))) != 0) {
+ pj_sock_close(inactive_sock[i]);
+ inactive_sock[i] = PJ_INVALID_SOCKET;
+ app_perror("...error: pj_sock_bind()", rc);
+ goto on_error;
+ }
+ rc = pj_ioqueue_register_sock(pool, ioque, inactive_sock[i],
+ NULL, &test_cb, &key);
+ if (rc != PJ_SUCCESS) {
+ pj_sock_close(inactive_sock[i]);
+ inactive_sock[i] = PJ_INVALID_SOCKET;
+ app_perror("...error(1): pj_ioqueue_register_sock()", rc);
+ PJ_LOG(3,(THIS_FILE, "....i=%d", i));
+ goto on_error;
+ }
+ rc = pj_ioqueue_read(ioque, key, recv_buf, bufsize);
+ if ( rc < 0 && rc != PJ_EPENDING) {
+ pj_sock_close(inactive_sock[i]);
+ inactive_sock[i] = PJ_INVALID_SOCKET;
+ app_perror("...error: pj_ioqueue_read()", rc);
+ goto on_error;
+ }
+ }
+
+ // Register server and client socket.
+ // We put this after inactivity socket, hopefully this can represent the
+ // worst waiting time.
+ rc = pj_ioqueue_register_sock(pool, ioque, ssock, NULL,
+ &test_cb, &skey);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error(2): pj_ioqueue_register_sock()", rc);
+ goto on_error;
+ }
+
+ rc = pj_ioqueue_register_sock(pool, ioque, csock, NULL,
+ &test_cb, &ckey);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error(3): pj_ioqueue_register_sock()", rc);
+ goto on_error;
+ }
+
+ // Set destination address to send the packet.
+ pj_sockaddr_in_init(&addr, pj_cstr(&temp, "127.0.0.1"), PORT);
+
+ // Test loop.
+ t_elapsed.u64 = 0;
+ for (i=0; i<LOOP; ++i) {
+ pj_ssize_t bytes;
+
+ // Randomize send buffer.
+ pj_create_random_string(send_buf, bufsize);
+
+ // Start reading on the server side.
+ rc = pj_ioqueue_read(ioque, skey, recv_buf, bufsize);
+ if (rc < 0 && rc != PJ_EPENDING) {
+ app_perror("...error: pj_ioqueue_read()", rc);
+ break;
+ }
+
+ // Starts send on the client side.
+ bytes = pj_ioqueue_sendto(ioque, ckey, send_buf, bufsize, 0,
+ &addr, sizeof(addr));
+ if (bytes != bufsize && bytes != PJ_EPENDING) {
+ app_perror("...error: pj_ioqueue_write()", bytes);
+ rc = -1;
+ break;
+ }
+
+ // Begin time.
+ pj_get_timestamp(&t1);
+
+ // Poll the queue until we've got completion event in the server side.
+ callback_read_key = NULL;
+ callback_read_size = 0;
+ do {
+ rc = pj_ioqueue_poll(ioque, NULL);
+ } while (rc >= 0 && callback_read_key != skey);
+
+ // End time.
+ pj_get_timestamp(&t2);
+ t_elapsed.u64 += (t2.u64 - t1.u64);
+
+ if (rc < 0)
+ break;
+
+ // Compare recv buffer with send buffer.
+ if (callback_read_size != bufsize ||
+ memcmp(send_buf, recv_buf, bufsize))
+ {
+ rc = -1;
+ break;
+ }
+
+ // Poll until all events are exhausted, before we start the next loop.
+ do {
+ pj_time_val timeout = { 0, 10 };
+ rc = pj_ioqueue_poll(ioque, &timeout);
+ } while (rc>0);
+
+ rc = 0;
+ }
+
+ // Print results
+ if (rc == 0) {
+ pj_timestamp tzero;
+ pj_uint32_t usec_delay;
+
+ tzero.u32.hi = tzero.u32.lo = 0;
+ usec_delay = pj_elapsed_usec( &tzero, &t_elapsed);
+
+ PJ_LOG(3, (THIS_FILE, "...%10d %15d % 9d",
+ bufsize, inactive_sock_count, usec_delay));
+
+ } else {
+ PJ_LOG(2, (THIS_FILE, "...ERROR (buf:%d, fds:%d)",
+ bufsize, inactive_sock_count+2));
+ }
+
+ // Cleaning up.
+ for (i=0; i<inactive_sock_count; ++i)
+ pj_sock_close(inactive_sock[i]);
+ pj_sock_close(ssock);
+ pj_sock_close(csock);
+
+ pj_ioqueue_destroy(ioque);
+ pj_pool_release( pool);
+ return 0;
+
+on_error:
+ PJ_LOG(1,(THIS_FILE, "...ERROR: %s",
+ pj_strerror(pj_get_netos_error(), errbuf, sizeof(errbuf))));
+ if (ssock)
+ pj_sock_close(ssock);
+ if (csock)
+ pj_sock_close(csock);
+ for (i=0; i<inactive_sock_count && inactive_sock &&
+ inactive_sock[i]!=PJ_INVALID_SOCKET; ++i)
+ {
+ pj_sock_close(inactive_sock[i]);
+ }
+ if (ioque != NULL)
+ pj_ioqueue_destroy(ioque);
+ pj_pool_release( pool);
+ return -1;
+}
+
+int udp_ioqueue_test()
+{
+ int status;
+ int bufsize, sock_count;
+
+ PJ_LOG(3, (THIS_FILE, "...format test"));
+ if ((status = native_format_test()) != 0)
+ return status;
+ PJ_LOG(3, (THIS_FILE, "....native format test ok"));
+
+ PJ_LOG(3, (THIS_FILE, "...compliance test"));
+ if ((status=compliance_test()) != 0) {
+ return status;
+ }
+ PJ_LOG(3, (THIS_FILE, "....compliance test ok"));
+
+ if ((status=many_handles_test()) != 0) {
+ return status;
+ }
+
+ PJ_LOG(4, (THIS_FILE, "...benchmarking different buffer size:"));
+ PJ_LOG(4, (THIS_FILE, "... note: buf=bytes sent, fds=# of fds, "
+ "elapsed=in timer ticks"));
+
+ PJ_LOG(3, (THIS_FILE, "...Benchmarking poll times:"));
+ PJ_LOG(3, (THIS_FILE, "...====================================="));
+ PJ_LOG(3, (THIS_FILE, "...Buf.size #inactive-socks Time/poll"));
+ PJ_LOG(3, (THIS_FILE, "... (bytes) (nanosec)"));
+ PJ_LOG(3, (THIS_FILE, "...====================================="));
+
+ for (bufsize=BUF_MIN_SIZE; bufsize <= BUF_MAX_SIZE; bufsize *= 2) {
+ if (bench_test(bufsize, SOCK_INACTIVE_MIN))
+ return -1;
+ }
+ bufsize = 512;
+ for (sock_count=SOCK_INACTIVE_MIN+2;
+ sock_count<=SOCK_INACTIVE_MAX+2;
+ sock_count *= 2)
+ {
+ //PJ_LOG(3,(THIS_FILE, "...testing with %d fds", sock_count));
+ if (bench_test(bufsize, sock_count-2))
+ return -1;
+ }
+ return 0;
+}
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_uiq_udp;
+#endif /* INCLUDE_UDP_IOQUEUE_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/list.c b/pjlib/src/pjlib-test/list.c
new file mode 100644
index 00000000..8390fe70
--- /dev/null
+++ b/pjlib/src/pjlib-test/list.c
@@ -0,0 +1,209 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/list.c 2 10/14/05 12:26a Bennylp $
+ */
+#include "test.h"
+
+/**
+ * \page page_pjlib_list_test Test: Linked List
+ *
+ * This file provides implementation of \b list_test(). It tests the
+ * functionality of the linked-list API.
+ *
+ * \section list_test_sec Scope of the Test
+ *
+ * API tested:
+ * - pj_list_init()
+ * - pj_list_insert_before()
+ * - pj_list_insert_after()
+ * - pj_list_merge_last()
+ * - pj_list_empty()
+ * - pj_list_insert_nodes_before()
+ * - pj_list_erase()
+ * - pj_list_find_node()
+ * - pj_list_search()
+ *
+ *
+ * This file is <b>pjlib-test/list.c</b>
+ *
+ * \include pjlib-test/list.c
+ */
+
+#if INCLUDE_LIST_TEST
+
+#include <pjlib.h>
+
+typedef struct list_node
+{
+ PJ_DECL_LIST_MEMBER(struct list_node)
+ int value;
+} list_node;
+
+static int compare_node(void *value, const pj_list_type *nd)
+{
+ list_node *node = (list_node*)nd;
+ return ((int)value == node->value) ? 0 : -1;
+}
+
+#define PJ_SIGNED_ARRAY_SIZE(a) ((int)PJ_ARRAY_SIZE(a))
+
+int list_test()
+{
+ list_node nodes[4]; // must be even number of nodes
+ list_node list;
+ list_node list2;
+ list_node *p;
+ int i; // don't change to unsigned!
+
+ //
+ // Test insert_before().
+ //
+ list.value = (unsigned)-1;
+ pj_list_init(&list);
+ for (i=0; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i) {
+ nodes[i].value = i;
+ pj_list_insert_before(&list, &nodes[i]);
+ }
+ // check.
+ for (i=0, p=list.next; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i, p=p->next) {
+ pj_assert(p->value == i);
+ if (p->value != i) {
+ return -1;
+ }
+ }
+
+ //
+ // Test insert_after()
+ //
+ pj_list_init(&list);
+ for (i=PJ_SIGNED_ARRAY_SIZE(nodes)-1; i>=0; --i) {
+ pj_list_insert_after(&list, &nodes[i]);
+ }
+ // check.
+ for (i=0, p=list.next; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i, p=p->next) {
+ pj_assert(p->value == i);
+ if (p->value != i) {
+ return -1;
+ }
+ }
+
+ //
+ // Test merge_last()
+ //
+ // Init lists
+ pj_list_init(&list);
+ pj_list_init(&list2);
+ for (i=0; i<PJ_SIGNED_ARRAY_SIZE(nodes)/2; ++i) {
+ pj_list_insert_before(&list, &nodes[i]);
+ }
+ for (i=PJ_SIGNED_ARRAY_SIZE(nodes)/2; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i) {
+ pj_list_insert_before(&list2, &nodes[i]);
+ }
+ // merge
+ pj_list_merge_last(&list, &list2);
+ // check.
+ for (i=0, p=list.next; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i, p=p->next) {
+ pj_assert(p->value == i);
+ if (p->value != i) {
+ return -1;
+ }
+ }
+ // check list is empty
+ pj_assert( pj_list_empty(&list2) );
+ if (!pj_list_empty(&list2)) {
+ return -1;
+ }
+
+ //
+ // Check merge_first()
+ //
+ pj_list_init(&list);
+ pj_list_init(&list2);
+ for (i=0; i<PJ_SIGNED_ARRAY_SIZE(nodes)/2; ++i) {
+ pj_list_insert_before(&list, &nodes[i]);
+ }
+ for (i=PJ_SIGNED_ARRAY_SIZE(nodes)/2; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i) {
+ pj_list_insert_before(&list2, &nodes[i]);
+ }
+ // merge
+ pj_list_merge_first(&list2, &list);
+ // check (list2).
+ for (i=0, p=list2.next; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i, p=p->next) {
+ pj_assert(p->value == i);
+ if (p->value != i) {
+ return -1;
+ }
+ }
+ // check list is empty
+ pj_assert( pj_list_empty(&list) );
+ if (!pj_list_empty(&list)) {
+ return -1;
+ }
+
+ //
+ // Test insert_nodes_before()
+ //
+ // init list
+ pj_list_init(&list);
+ for (i=0; i<PJ_SIGNED_ARRAY_SIZE(nodes)/2; ++i) {
+ pj_list_insert_before(&list, &nodes[i]);
+ }
+ // chain remaining nodes
+ pj_list_init(&nodes[PJ_SIGNED_ARRAY_SIZE(nodes)/2]);
+ for (i=PJ_SIGNED_ARRAY_SIZE(nodes)/2+1; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i) {
+ pj_list_insert_before(&nodes[PJ_SIGNED_ARRAY_SIZE(nodes)/2], &nodes[i]);
+ }
+ // insert nodes
+ pj_list_insert_nodes_before(&list, &nodes[PJ_SIGNED_ARRAY_SIZE(nodes)/2]);
+ // check
+ for (i=0, p=list.next; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i, p=p->next) {
+ pj_assert(p->value == i);
+ if (p->value != i) {
+ return -1;
+ }
+ }
+
+ // erase test.
+ pj_list_init(&list);
+ for (i=0; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i) {
+ nodes[i].value = i;
+ pj_list_insert_before(&list, &nodes[i]);
+ }
+ for (i=PJ_SIGNED_ARRAY_SIZE(nodes)-1; i>=0; --i) {
+ int j;
+ pj_list_erase(&nodes[i]);
+ for (j=0, p=list.next; j<i; ++j, p=p->next) {
+ pj_assert(p->value == j);
+ if (p->value != j) {
+ return -1;
+ }
+ }
+ }
+
+ // find and search
+ pj_list_init(&list);
+ for (i=0; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i) {
+ nodes[i].value = i;
+ pj_list_insert_before(&list, &nodes[i]);
+ }
+ for (i=0; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i) {
+ p = (list_node*) pj_list_find_node(&list, &nodes[i]);
+ pj_assert( p == &nodes[i] );
+ if (p != &nodes[i]) {
+ return -1;
+ }
+ p = (list_node*) pj_list_search(&list, (void*)i, &compare_node);
+ pj_assert( p == &nodes[i] );
+ if (p != &nodes[i]) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_list_test;
+#endif /* INCLUDE_LIST_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/main.c b/pjlib/src/pjlib-test/main.c
new file mode 100644
index 00000000..96055100
--- /dev/null
+++ b/pjlib/src/pjlib-test/main.c
@@ -0,0 +1,73 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/main.c 4 29/10/05 21:32 Bennylp $
+ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/main.c $
+ *
+ * 4 29/10/05 21:32 Bennylp
+ * Boost process priority in Win32
+ *
+ * 3 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ */
+#include "test.h"
+
+#include <pj/string.h>
+#include <pj/sock.h>
+#include <pj/log.h>
+
+extern int param_echo_sock_type;
+extern const char *param_echo_server;
+extern int param_echo_port;
+
+
+#if defined(PJ_WIN32) && PJ_WIN32!=0
+#include <windows.h>
+static void boost(void)
+{
+ SetPriorityClass(GetCurrentProcess(), REALTIME_PRIORITY_CLASS);
+}
+#else
+#define boost()
+#endif
+
+int main(int argc, char *argv[])
+{
+ int rc;
+
+ boost();
+
+ while (argc > 1) {
+ char *arg = argv[--argc];
+
+ if (*arg=='-' && *(arg+1)=='p') {
+ pj_str_t port = pj_str(argv[--argc]);
+
+ param_echo_port = pj_strtoul(&port);
+
+ } else if (*arg=='-' && *(arg+1)=='s') {
+ param_echo_server = argv[--argc];
+
+ } else if (*arg=='-' && *(arg+1)=='t') {
+ pj_str_t type = pj_str(argv[--argc]);
+
+ if (pj_stricmp2(&type, "tcp")==0)
+ param_echo_sock_type = PJ_SOCK_STREAM;
+ else if (pj_stricmp2(&type, "udp")==0)
+ param_echo_sock_type = PJ_SOCK_DGRAM;
+ else {
+ PJ_LOG(3,("", "error: unknown socket type %s", type.ptr));
+ return 1;
+ }
+ }
+ }
+
+ rc = test_main();
+
+ return rc;
+}
+
diff --git a/pjlib/src/pjlib-test/main_mod.c b/pjlib/src/pjlib-test/main_mod.c
new file mode 100644
index 00000000..45410184
--- /dev/null
+++ b/pjlib/src/pjlib-test/main_mod.c
@@ -0,0 +1,33 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/main_mod.c 2 10/29/05 11:51a Bennylp $
+ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/main_mod.c $
+ *
+ * 2 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 1 10/05/05 5:12p Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+int init_module(void)
+{
+ printk(KERN_INFO "PJLIB test module loaded. Starting tests...\n");
+
+ test_main();
+
+ /* Prevent module from loading. We've finished test anyway.. */
+ return 1;
+}
+
+void cleanup_module(void)
+{
+ printk(KERN_INFO "PJLIB test module unloading...\n");
+}
+
+MODULE_LICENSE("GPL");
+
diff --git a/pjlib/src/pjlib-test/mutex.c b/pjlib/src/pjlib-test/mutex.c
new file mode 100644
index 00000000..b6609b8d
--- /dev/null
+++ b/pjlib/src/pjlib-test/mutex.c
@@ -0,0 +1,164 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/mutex.c 1 10/23/05 12:52p Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/mutex.c $
+ *
+ * 1 10/23/05 12:52p Bennylp
+ * Craeted.
+ *
+ */
+#include "test.h"
+#include <pjlib.h>
+
+#if INCLUDE_MUTEX_TEST
+
+#undef TRACE_
+//#define TRACE_(x) PJ_LOG(3,x)
+#define TRACE_(x)
+
+/* Test witn non-recursive mutex. */
+static int simple_mutex_test(pj_pool_t *pool)
+{
+ pj_status_t rc;
+ pj_mutex_t *mutex;
+
+ PJ_LOG(3,("", "...testing simple mutex"));
+
+ /* Create mutex. */
+ TRACE_(("", "....create mutex"));
+ rc = pj_mutex_create( pool, "", PJ_MUTEX_SIMPLE, &mutex);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_mutex_create", rc);
+ return -10;
+ }
+
+ /* Normal lock/unlock cycle. */
+ TRACE_(("", "....lock mutex"));
+ rc = pj_mutex_lock(mutex);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_mutex_lock", rc);
+ return -20;
+ }
+ TRACE_(("", "....unlock mutex"));
+ rc = pj_mutex_unlock(mutex);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_mutex_unlock", rc);
+ return -30;
+ }
+
+ /* Lock again. */
+ TRACE_(("", "....lock mutex"));
+ rc = pj_mutex_lock(mutex);
+ if (rc != PJ_SUCCESS) return -40;
+
+ /* Try-lock should fail. It should not deadlocked. */
+ TRACE_(("", "....trylock mutex"));
+ rc = pj_mutex_trylock(mutex);
+ if (rc == PJ_SUCCESS)
+ PJ_LOG(3,("", "...info: looks like simple mutex is recursive"));
+
+ /* Unlock and done. */
+ TRACE_(("", "....unlock mutex"));
+ rc = pj_mutex_unlock(mutex);
+ if (rc != PJ_SUCCESS) return -50;
+
+ TRACE_(("", "....destroy mutex"));
+ rc = pj_mutex_destroy(mutex);
+ if (rc != PJ_SUCCESS) return -60;
+
+ TRACE_(("", "....done"));
+ return PJ_SUCCESS;
+}
+
+
+/* Test with recursive mutex. */
+static int recursive_mutex_test(pj_pool_t *pool)
+{
+ pj_status_t rc;
+ pj_mutex_t *mutex;
+
+ PJ_LOG(3,("", "...testing recursive mutex"));
+
+ /* Create mutex. */
+ TRACE_(("", "....create mutex"));
+ rc = pj_mutex_create( pool, "", PJ_MUTEX_RECURSE, &mutex);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_mutex_create", rc);
+ return -10;
+ }
+
+ /* Normal lock/unlock cycle. */
+ TRACE_(("", "....lock mutex"));
+ rc = pj_mutex_lock(mutex);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_mutex_lock", rc);
+ return -20;
+ }
+ TRACE_(("", "....unlock mutex"));
+ rc = pj_mutex_unlock(mutex);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_mutex_unlock", rc);
+ return -30;
+ }
+
+ /* Lock again. */
+ TRACE_(("", "....lock mutex"));
+ rc = pj_mutex_lock(mutex);
+ if (rc != PJ_SUCCESS) return -40;
+
+ /* Try-lock should NOT fail. . */
+ TRACE_(("", "....trylock mutex"));
+ rc = pj_mutex_trylock(mutex);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: recursive mutex is not recursive!", rc);
+ return -40;
+ }
+
+ /* Locking again should not fail. */
+ TRACE_(("", "....lock mutex"));
+ rc = pj_mutex_lock(mutex);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: recursive mutex is not recursive!", rc);
+ return -45;
+ }
+
+ /* Unlock several times and done. */
+ TRACE_(("", "....unlock mutex 3x"));
+ rc = pj_mutex_unlock(mutex);
+ if (rc != PJ_SUCCESS) return -50;
+ rc = pj_mutex_unlock(mutex);
+ if (rc != PJ_SUCCESS) return -51;
+ rc = pj_mutex_unlock(mutex);
+ if (rc != PJ_SUCCESS) return -52;
+
+ TRACE_(("", "....destroy mutex"));
+ rc = pj_mutex_destroy(mutex);
+ if (rc != PJ_SUCCESS) return -60;
+
+ TRACE_(("", "....done"));
+ return PJ_SUCCESS;
+}
+
+int mutex_test(void)
+{
+ pj_pool_t *pool;
+ int rc;
+
+ pool = pj_pool_create(mem, "", 4000, 4000, NULL);
+
+ rc = simple_mutex_test(pool);
+ if (rc != 0)
+ return rc;
+
+ rc = recursive_mutex_test(pool);
+ if (rc != 0)
+ return rc;
+
+ pj_pool_release(pool);
+
+ return 0;
+}
+
+#else
+int dummy_mutex_test;
+#endif
+
diff --git a/pjlib/src/pjlib-test/os.c b/pjlib/src/pjlib-test/os.c
new file mode 100644
index 00000000..893cfc69
--- /dev/null
+++ b/pjlib/src/pjlib-test/os.c
@@ -0,0 +1,10 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/os.c 2 10/14/05 12:26a Bennylp $
+ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/os.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ */
diff --git a/pjlib/src/pjlib-test/pool.c b/pjlib/src/pjlib-test/pool.c
new file mode 100644
index 00000000..8b9d1ff0
--- /dev/null
+++ b/pjlib/src/pjlib-test/pool.c
@@ -0,0 +1,164 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/pool.c 2 10/14/05 12:26a Bennylp $
+ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/pool.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ */
+#include <pj/pool.h>
+#include <pj/rand.h>
+#include <pj/log.h>
+#include "test.h"
+
+/**
+ * \page page_pjlib_pool_test Test: Pool
+ *
+ * This file provides implementation of \b pool_test(). It tests the
+ * functionality of the memory pool.
+ *
+ *
+ * This file is <b>pjlib-test/pool.c</b>
+ *
+ * \include pjlib-test/pool.c
+ */
+
+
+#if INCLUDE_POOL_TEST
+
+#define SIZE 4096
+
+/* Normally we should throw exception when memory alloc fails.
+ * Here we do nothing so that the flow will go back to original caller,
+ * which will test the result using NULL comparison. Normally caller will
+ * catch the exception instead of checking for NULLs.
+ */
+static void null_callback(pj_pool_t *pool, pj_size_t size)
+{
+ PJ_UNUSED_ARG(pool);
+ PJ_UNUSED_ARG(size);
+}
+
+#define GET_FREE(p) (pj_pool_get_capacity(p)-pj_pool_get_used_size(p))
+
+/* Test that the capacity and used size reported by the pool is correct.
+ */
+static int capacity_test(void)
+{
+ pj_pool_t *pool = pj_pool_create(mem, NULL, SIZE, 0, &null_callback);
+ pj_size_t freesize;
+
+ PJ_LOG(3,("test", "...capacity_test()"));
+
+ if (!pool)
+ return -200;
+
+ freesize = GET_FREE(pool);
+
+ if (pj_pool_alloc(pool, freesize) == NULL) {
+ PJ_LOG(3,("test", "...error: wrong freesize %u reported",
+ freesize));
+ pj_pool_release(pool);
+ return -210;
+ }
+
+ pj_pool_release(pool);
+ return 0;
+}
+
+/* Test function to drain the pool's space.
+ */
+static int drain_test(pj_size_t size, pj_size_t increment)
+{
+ pj_pool_t *pool = pj_pool_create(mem, NULL, size, increment,
+ &null_callback);
+ pj_size_t freesize;
+ void *p;
+ int status = 0;
+
+ PJ_LOG(3,("test", "...drain_test(%d,%d)", size, increment));
+
+ if (!pool)
+ return -10;
+
+ /* Get free size */
+ freesize = GET_FREE(pool);
+ if (freesize < 1) {
+ status=-15;
+ goto on_error;
+ }
+
+ /* Drain the pool until there's nothing left. */
+ while (freesize > 0) {
+ int size;
+
+ if (freesize > 255)
+ size = ((pj_rand() & 0x000000FF) + 4) & ~0x03L;
+ else
+ size = freesize;
+
+ p = pj_pool_alloc(pool, size);
+ if (!p) {
+ status=-20; goto on_error;
+ }
+
+ freesize -= size;
+ }
+
+ /* Check that capacity is zero. */
+ if (GET_FREE(pool) != 0) {
+ PJ_LOG(3,("test", "....error: returned free=%u (expecting 0)",
+ GET_FREE(pool)));
+ status=-30; goto on_error;
+ }
+
+ /* Try to allocate once more */
+ p = pj_pool_alloc(pool, 257);
+ if (!p) {
+ status=-40; goto on_error;
+ }
+
+ /* Check that capacity is NOT zero. */
+ if (GET_FREE(pool) == 0) {
+ status=-50; goto on_error;
+ }
+
+
+on_error:
+ pj_pool_release(pool);
+ return status;
+}
+
+int pool_test(void)
+{
+ enum { LOOP = 2 };
+ int loop;
+ int rc;
+
+ rc = capacity_test();
+ if (rc) return rc;
+
+ for (loop=0; loop<LOOP; ++loop) {
+ /* Test that the pool should grow automaticly. */
+ rc = drain_test(SIZE, SIZE);
+ if (rc != 0) return rc;
+
+ /* Test situation where pool is not allowed to grow.
+ * We expect the test to return correct error.
+ */
+ rc = drain_test(SIZE, 0);
+ if (rc != -40) return rc;
+ }
+
+ return 0;
+}
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_pool_test;
+#endif /* INCLUDE_POOL_TEST */
+
diff --git a/pjlib/src/pjlib-test/pool_perf.c b/pjlib/src/pjlib-test/pool_perf.c
new file mode 100644
index 00000000..76e45606
--- /dev/null
+++ b/pjlib/src/pjlib-test/pool_perf.c
@@ -0,0 +1,134 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/pool_perf.c 2 10/14/05 12:26a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/pool_perf.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/05/05 5:13p Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+
+#if INCLUDE_POOL_PERF_TEST
+
+#include <pjlib.h>
+#include <pj/compat/malloc.h>
+
+#if !PJ_HAS_HIGH_RES_TIMER
+# error Need high resolution timer for this test.
+#endif
+
+#define THIS_FILE "test"
+
+#define LOOP 10
+#define COUNT 1024
+static unsigned sizes[COUNT];
+#define MIN_SIZE 4
+#define MAX_SIZE 512
+static unsigned total_size;
+
+static int pool_test_pool()
+{
+ int i;
+ pj_pool_t *pool = pj_pool_create(mem, NULL, total_size + 4*COUNT, 0, NULL);
+ if (!pool)
+ return -1;
+
+ for (i=0; i<COUNT; ++i) {
+ char *p;
+ if ( (p=(char*)pj_pool_alloc(pool, sizes[i])) == NULL)
+ return -1;
+ *p = '\0';
+ }
+
+ pj_pool_release(pool);
+ return 0;
+}
+
+static int pool_test_malloc_free()
+{
+ char *p[COUNT];
+ int i;
+
+ for (i=0; i<COUNT; ++i) {
+ p[i] = (char*)malloc(sizes[i]);
+ if (!p[i]) {
+ // Don't care for memory leak in this test
+ return -1;
+ }
+ *p[i] = '\0';
+ }
+
+ for (i=0; i<COUNT; ++i) {
+ free(p[i]);
+ }
+
+ return 0;
+}
+
+int pool_perf_test()
+{
+ unsigned i;
+ pj_uint32_t pool_time=0, malloc_time=0, pool_time2=0;
+ pj_timestamp start, end;
+ pj_uint32_t best, worst;
+
+ // Initialize sizes.
+ for (i=0; i<COUNT; ++i) {
+ sizes[i] = MIN_SIZE + pj_rand() % MAX_SIZE;
+ total_size += sizes[i];
+ }
+
+ PJ_LOG(3, (THIS_FILE, "Benchmarking pool.."));
+
+ // Warmup
+ pool_test_pool();
+ pool_test_malloc_free();
+
+ for (i=0; i<LOOP; ++i) {
+ pj_get_timestamp(&start);
+ if (pool_test_pool()) {
+ return 1;
+ }
+ pj_get_timestamp(&end);
+ pool_time += (end.u32.lo - start.u32.lo);
+
+ pj_get_timestamp(&start);
+ if (pool_test_malloc_free()) {
+ return 2;
+ }
+ pj_get_timestamp(&end);
+ malloc_time += (end.u32.lo - start.u32.lo);
+
+ pj_get_timestamp(&start);
+ if (pool_test_pool()) {
+ return 4;
+ }
+ pj_get_timestamp(&end);
+ pool_time2 += (end.u32.lo - start.u32.lo);
+ }
+
+ PJ_LOG(4, (THIS_FILE, "..LOOP count: %u", LOOP));
+ PJ_LOG(4, (THIS_FILE, "..number of alloc/dealloc per loop: %u", COUNT));
+ PJ_LOG(4, (THIS_FILE, "..pool allocation/deallocation time: %u", pool_time));
+ PJ_LOG(4, (THIS_FILE, "..malloc/free time: %u", malloc_time));
+ PJ_LOG(4, (THIS_FILE, "..pool again, second invocation: %u", pool_time2));
+
+ if (pool_time2==0) pool_time2=1;
+ if (pool_time < pool_time2)
+ best = pool_time, worst = pool_time2;
+ else
+ best = pool_time2, worst = pool_time;
+
+ PJ_LOG(3, (THIS_FILE, "..malloc Speedup best=%dx, worst=%dx",
+ (int)(malloc_time/best),
+ (int)(malloc_time/worst)));
+ return 0;
+}
+
+
+#endif /* INCLUDE_POOL_PERF_TEST */
+
diff --git a/pjlib/src/pjlib-test/rand.c b/pjlib/src/pjlib-test/rand.c
new file mode 100644
index 00000000..25f7a47e
--- /dev/null
+++ b/pjlib/src/pjlib-test/rand.c
@@ -0,0 +1,43 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/rand.c 1 10/05/05 5:13p Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/rand.c $
+ *
+ * 1 10/05/05 5:13p Bennylp
+ * Created.
+ *
+ */
+#include <pj/rand.h>
+#include <pj/log.h>
+#include "test.h"
+
+#if INCLUDE_RAND_TEST
+
+#define COUNT 1024
+static int values[COUNT];
+
+/*
+ * rand_test(), simply generates COUNT number of random number and
+ * check that there's no duplicate numbers.
+ */
+int rand_test(void)
+{
+ int i;
+
+ for (i=0; i<COUNT; ++i) {
+ int j;
+
+ values[i] = pj_rand();
+ for (j=0; j<i; ++j) {
+ if (values[i] == values[j]) {
+ PJ_LOG(3,("test", "error: duplicate value %d at %d-th index",
+ values[i], i));
+ return -10;
+ }
+ }
+ }
+
+ return 0;
+}
+
+#endif /* INCLUDE_RAND_TEST */
+
diff --git a/pjlib/src/pjlib-test/rbtree.c b/pjlib/src/pjlib-test/rbtree.c
new file mode 100644
index 00000000..4b1fd4a4
--- /dev/null
+++ b/pjlib/src/pjlib-test/rbtree.c
@@ -0,0 +1,150 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/rbtree.c 2 10/14/05 12:26a Bennylp $ */
+#include "test.h"
+
+#if INCLUDE_RBTREE_TEST
+
+#include <pjlib.h>
+
+#define LOOP 32
+#define MIN_COUNT 64
+#define MAX_COUNT (LOOP * MIN_COUNT)
+#define STRSIZE 16
+#define THIS_FILE "rbtree_test"
+
+typedef struct node_key
+{
+ pj_uint32_t hash;
+ char str[STRSIZE];
+} node_key;
+
+static int compare_node(const node_key *k1, const node_key *k2)
+{
+ if (k1->hash == k2->hash) {
+ return strcmp(k1->str, k2->str);
+ } else {
+ return k1->hash < k2->hash ? -1 : 1;
+ }
+}
+
+void randomize_string(char *str, int len)
+{
+ int i;
+ for (i=0; i<len-1; ++i)
+ str[i] = (char)('a' + pj_rand() % 26);
+ str[len-1] = '\0';
+}
+
+static int test(void)
+{
+ pj_rbtree rb;
+ node_key *key;
+ pj_rbtree_node *node;
+ pj_pool_t *pool;
+ int err=0;
+ int count = MIN_COUNT;
+ int i;
+ unsigned size;
+
+ pj_rbtree_init(&rb, (pj_rbtree_comp*)&compare_node);
+ size = MAX_COUNT*(sizeof(*key)+PJ_RBTREE_NODE_SIZE) +
+ PJ_RBTREE_SIZE + PJ_POOL_SIZE;
+ pool = pj_pool_create( mem, "pool", size, 0, NULL);
+ if (!pool) {
+ PJ_LOG(3,("test", "...error: creating pool of %u bytes", size));
+ return -10;
+ }
+
+ key = (node_key *)pj_pool_alloc(pool, MAX_COUNT*sizeof(*key));
+ if (!key)
+ return -20;
+
+ node = (pj_rbtree_node*)pj_pool_alloc(pool, MAX_COUNT*sizeof(*node));
+ if (!node)
+ return -30;
+
+ for (i=0; i<LOOP; ++i) {
+ int j;
+ pj_rbtree_node *prev, *it;
+ pj_timestamp t1, t2, t_setup, t_insert, t_search, t_erase;
+
+ pj_assert(rb.size == 0);
+
+ t_setup.u32.lo = t_insert.u32.lo = t_search.u32.lo = t_erase.u32.lo = 0;
+
+ for (j=0; j<count; j++) {
+ randomize_string(key[j].str, STRSIZE);
+
+ pj_get_timestamp(&t1);
+ node[j].key = &key[j];
+ node[j].user_data = key[j].str;
+ key[j].hash = pj_hash_calc(0, key[j].str, PJ_HASH_KEY_STRING);
+ pj_get_timestamp(&t2);
+ t_setup.u32.lo += (t2.u32.lo - t1.u32.lo);
+
+ pj_get_timestamp(&t1);
+ pj_rbtree_insert(&rb, &node[j]);
+ pj_get_timestamp(&t2);
+ t_insert.u32.lo += (t2.u32.lo - t1.u32.lo);
+ }
+
+ pj_assert(rb.size == (unsigned)count);
+
+ // Iterate key, make sure they're sorted.
+ prev = NULL;
+ it = pj_rbtree_first(&rb);
+ while (it) {
+ if (prev) {
+ if (compare_node((node_key*)prev->key,(node_key*)it->key)>=0) {
+ ++err;
+ PJ_LOG(3, (THIS_FILE, "Error: %s >= %s",
+ (char*)prev->user_data, (char*)it->user_data));
+ }
+ }
+ prev = it;
+ it = pj_rbtree_next(&rb, it);
+ }
+
+ // Search.
+ for (j=0; j<count; j++) {
+ pj_get_timestamp(&t1);
+ it = pj_rbtree_find(&rb, &key[j]);
+ pj_get_timestamp(&t2);
+ t_search.u32.lo += (t2.u32.lo - t1.u32.lo);
+
+ pj_assert(it != NULL);
+ if (it == NULL)
+ ++err;
+ }
+
+ // Erase node.
+ for (j=0; j<count; j++) {
+ pj_get_timestamp(&t1);
+ it = pj_rbtree_erase(&rb, &node[j]);
+ pj_get_timestamp(&t2);
+ t_erase.u32.lo += (t2.u32.lo - t1.u32.lo);
+ }
+
+ PJ_LOG(4, (THIS_FILE,
+ "...count:%d, setup:%d, insert:%d, search:%d, erase:%d",
+ count,
+ t_setup.u32.lo / count, t_insert.u32.lo / count,
+ t_search.u32.lo / count, t_erase.u32.lo / count));
+
+ count = 2 * count;
+ if (count > MAX_COUNT)
+ break;
+ }
+
+ pj_pool_release(pool);
+ return err;
+}
+
+
+int rbtree_test()
+{
+ return test();
+}
+
+#endif /* INCLUDE_RBTREE_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/select.c b/pjlib/src/pjlib-test/select.c
new file mode 100644
index 00000000..e6562d2c
--- /dev/null
+++ b/pjlib/src/pjlib-test/select.c
@@ -0,0 +1,208 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/select.c 2 10/14/05 12:26a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/select.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ */
+#include "test.h"
+
+/**
+ * \page page_pjlib_select_test Test: Socket Select()
+ *
+ * This file provides implementation of \b select_test(). It tests the
+ * functionality of the pj_sock_select() API.
+ *
+ *
+ * This file is <b>pjlib-test/select.c</b>
+ *
+ * \include pjlib-test/select.c
+ */
+
+
+#if INCLUDE_SELECT_TEST
+
+#include <pj/sock.h>
+#include <pj/sock_select.h>
+#include <pj/log.h>
+#include <pj/string.h>
+#include <pj/assert.h>
+#include <pj/os.h>
+#include <pj/errno.h>
+
+enum
+{
+ READ_FDS,
+ WRITE_FDS,
+ EXCEPT_FDS
+};
+
+#define UDP_PORT 51232
+#define THIS_FILE "select_test"
+
+/*
+ * do_select()
+ *
+ * Perform pj_sock_select() and find out which sockets
+ * are signalled.
+ */
+static int do_select( pj_sock_t sock1, pj_sock_t sock2,
+ int setcount[])
+{
+ pj_fd_set_t fds[3];
+ pj_time_val timeout;
+ int i, n;
+
+ for (i=0; i<3; ++i) {
+ PJ_FD_ZERO(&fds[i]);
+ PJ_FD_SET(sock1, &fds[i]);
+ PJ_FD_SET(sock2, &fds[i]);
+ setcount[i] = 0;
+ }
+
+ timeout.sec = 1;
+ timeout.msec = 0;
+
+ n = pj_sock_select(FD_SETSIZE, &fds[0], &fds[1], &fds[2],
+ &timeout);
+ if (n < 0)
+ return n;
+ if (n == 0)
+ return 0;
+
+ for (i=0; i<3; ++i) {
+ if (PJ_FD_ISSET(sock1, &fds[i]))
+ setcount[i]++;
+ if (PJ_FD_ISSET(sock2, &fds[i]))
+ setcount[i]++;
+ }
+
+ return n;
+}
+
+/*
+ * select_test()
+ *
+ * Test main entry.
+ */
+int select_test()
+{
+ pj_sock_t udp1=PJ_INVALID_SOCKET, udp2=PJ_INVALID_SOCKET;
+ pj_sockaddr_in udp_addr;
+ int status;
+ int setcount[3];
+ pj_str_t s;
+ const char data[] = "hello";
+ const int datalen = 5;
+ pj_ssize_t sent, received;
+ char buf[10];
+ pj_status_t rc;
+
+ PJ_LOG(3, (THIS_FILE, "...Testing simple UDP select()"));
+
+ // Create two UDP sockets.
+ rc = pj_sock_socket( PJ_AF_INET, PJ_SOCK_DGRAM, 0, &udp1);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: unable to create socket", rc);
+ status=-10; goto on_return;
+ }
+ rc = pj_sock_socket( PJ_AF_INET, PJ_SOCK_DGRAM, 0, &udp2);
+ if (udp2 == PJ_INVALID_SOCKET) {
+ app_perror("...error: unable to create socket", rc);
+ status=-20; goto on_return;
+ }
+
+ // Bind one of the UDP socket.
+ pj_memset(&udp_addr, 0, sizeof(udp_addr));
+ udp_addr.sin_family = PJ_AF_INET;
+ udp_addr.sin_port = UDP_PORT;
+ udp_addr.sin_addr = pj_inet_addr(pj_cstr(&s, "127.0.0.1"));
+
+ if (pj_sock_bind(udp2, &udp_addr, sizeof(udp_addr))) {
+ status=-30; goto on_return;
+ }
+
+ // Send data.
+ sent = datalen;
+ rc = pj_sock_sendto(udp1, data, &sent, 0, &udp_addr, sizeof(udp_addr));
+ if (rc != PJ_SUCCESS || sent != datalen) {
+ app_perror("...error: sendto() error", rc);
+ status=-40; goto on_return;
+ }
+
+ // Check that socket is marked as reable.
+ // Note that select() may also report that sockets are writable.
+ status = do_select(udp1, udp2, setcount);
+ if (status < 0) {
+ char errbuf[128];
+ pj_strerror(pj_get_netos_error(), errbuf, sizeof(errbuf));
+ PJ_LOG(1,(THIS_FILE, "...error: %s", errbuf));
+ status=-50; goto on_return;
+ }
+ if (status == 0) {
+ status=-60; goto on_return;
+ }
+
+ if (setcount[READ_FDS] != 1) {
+ status=-70; goto on_return;
+ }
+ if (setcount[WRITE_FDS] != 0) {
+ if (setcount[WRITE_FDS] == 2) {
+ PJ_LOG(3,(THIS_FILE, "...info: system reports writable sockets"));
+ } else {
+ status=-80; goto on_return;
+ }
+ } else {
+ PJ_LOG(3,(THIS_FILE,
+ "...info: system doesn't report writable sockets"));
+ }
+ if (setcount[EXCEPT_FDS] != 0) {
+ status=-90; goto on_return;
+ }
+
+ // Read the socket to clear readable sockets.
+ received = sizeof(buf);
+ rc = pj_sock_recv(udp2, buf, &received, 0);
+ if (rc != PJ_SUCCESS || received != 5) {
+ status=-100; goto on_return;
+ }
+
+ status = 0;
+
+ // Test timeout on the read part.
+ // This won't necessarily return zero, as select() may report that
+ // sockets are writable.
+ setcount[0] = setcount[1] = setcount[2] = 0;
+ status = do_select(udp1, udp2, setcount);
+ if (status != 0 && status != setcount[WRITE_FDS]) {
+ PJ_LOG(3,(THIS_FILE, "...error: expecting timeout but got %d sks set",
+ status));
+ PJ_LOG(3,(THIS_FILE, " rdset: %d, wrset: %d, exset: %d",
+ setcount[0], setcount[1], setcount[2]));
+ status = -110; goto on_return;
+ }
+ if (setcount[READ_FDS] != 0) {
+ PJ_LOG(3,(THIS_FILE, "...error: readable socket not expected"));
+ status = -120; goto on_return;
+ }
+
+ status = 0;
+
+on_return:
+ if (udp1 != PJ_INVALID_SOCKET)
+ pj_sock_close(udp1);
+ if (udp2 != PJ_INVALID_SOCKET)
+ pj_sock_close(udp2);
+ return status;
+}
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_select_test;
+#endif /* INCLUDE_SELECT_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/sleep.c b/pjlib/src/pjlib-test/sleep.c
new file mode 100644
index 00000000..95fa3bac
--- /dev/null
+++ b/pjlib/src/pjlib-test/sleep.c
@@ -0,0 +1,198 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/sleep.c 3 10/29/05 11:51a Bennylp $
+ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/sleep.c $
+ *
+ * 3 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/11/05 12:53a Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+
+/**
+ * \page page_pjlib_sleep_test Test: Sleep, Time, and Timestamp
+ *
+ * This file provides implementation of \b sleep_test().
+ *
+ * \section sleep_test_sec Scope of the Test
+ *
+ * This tests:
+ * - whether pj_thread_sleep() works.
+ * - whether pj_gettimeofday() works.
+ * - whether pj_get_timestamp() and friends works.
+ *
+ * API tested:
+ * - pj_thread_sleep()
+ * - pj_gettimeofday()
+ * - PJ_TIME_VAL_SUB()
+ * - PJ_TIME_VAL_LTE()
+ * - pj_get_timestamp()
+ * - pj_get_timestamp_freq() (implicitly)
+ * - pj_elapsed_time()
+ * - pj_elapsed_usec()
+ *
+ *
+ * This file is <b>pjlib-test/sleep.c</b>
+ *
+ * \include pjlib-test/sleep.c
+ */
+
+#if INCLUDE_SLEEP_TEST
+
+#include <pjlib.h>
+
+#define THIS_FILE "sleep_test"
+
+static int simple_sleep_test(void)
+{
+ enum { COUNT = 5 };
+ int i;
+ pj_status_t rc;
+
+ PJ_LOG(3,(THIS_FILE, "..will write messages every 1 second:"));
+
+ for (i=0; i<COUNT; ++i) {
+ rc = pj_thread_sleep(1000);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_thread_sleep()", rc);
+ return -10;
+ }
+ PJ_LOG(3,(THIS_FILE, "...wake up.."));
+ }
+
+ return 0;
+}
+
+static int sleep_duration_test(void)
+{
+ enum { MIS = 20, DURATION = 1000, DURATION2 = 500 };
+ pj_status_t rc;
+
+ PJ_LOG(3,(THIS_FILE, "..running sleep duration test"));
+
+ /* Test pj_thread_sleep() and pj_gettimeofday() */
+ {
+ pj_time_val start, stop;
+ pj_uint32_t msec;
+
+ /* Mark start of test. */
+ rc = pj_gettimeofday(&start);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_gettimeofday()", rc);
+ return -10;
+ }
+
+ /* Sleep */
+ rc = pj_thread_sleep(DURATION);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_thread_sleep()", rc);
+ return -20;
+ }
+
+ /* Mark end of test. */
+ rc = pj_gettimeofday(&stop);
+
+ /* Calculate duration (store in stop). */
+ PJ_TIME_VAL_SUB(stop, start);
+
+ /* Convert to msec. */
+ msec = PJ_TIME_VAL_MSEC(stop);
+
+ /* Check if it's within range. */
+ if (msec < DURATION * (100-MIS)/100 ||
+ msec > DURATION * (100+MIS)/100)
+ {
+ PJ_LOG(3,(THIS_FILE,
+ "...error: slept for %d ms instead of %d ms "
+ "(outside %d%% err window)",
+ msec, DURATION, MIS));
+ return -30;
+ }
+ }
+
+
+ /* Test pj_thread_sleep() and pj_get_timestamp() and friends */
+ {
+ pj_time_val t1, t2;
+ pj_timestamp start, stop;
+ pj_time_val elapsed;
+ pj_uint32_t msec;
+
+ /* Mark start of test. */
+ rc = pj_get_timestamp(&start);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_get_timestamp()", rc);
+ return -60;
+ }
+
+ /* ..also with gettimeofday() */
+ pj_gettimeofday(&t1);
+
+ /* Sleep */
+ rc = pj_thread_sleep(DURATION2);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_thread_sleep()", rc);
+ return -70;
+ }
+
+ /* Mark end of test. */
+ pj_get_timestamp(&stop);
+
+ /* ..also with gettimeofday() */
+ pj_gettimeofday(&t2);
+
+ /* Compare t1 and t2. */
+ if (PJ_TIME_VAL_LTE(t2, t1)) {
+ PJ_LOG(3,(THIS_FILE, "...error: t2 is less than t1!!"));
+ return -75;
+ }
+
+ /* Get elapsed time in time_val */
+ elapsed = pj_elapsed_time(&start, &stop);
+
+ msec = PJ_TIME_VAL_MSEC(elapsed);
+
+ /* Check if it's within range. */
+ if (msec < DURATION2 * (100-MIS)/100 ||
+ msec > DURATION2 * (100+MIS)/100)
+ {
+ PJ_LOG(3,(THIS_FILE,
+ "...error: slept for %d ms instead of %d ms "
+ "(outside %d%% err window)",
+ msec, DURATION2, MIS));
+ return -30;
+ }
+ }
+
+ /* All done. */
+ return 0;
+}
+
+int sleep_test()
+{
+ int rc;
+
+ rc = simple_sleep_test();
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ rc = sleep_duration_test();
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ return 0;
+}
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_sleep_test;
+#endif /* INCLUDE_SLEEP_TEST */
diff --git a/pjlib/src/pjlib-test/sock.c b/pjlib/src/pjlib-test/sock.c
new file mode 100644
index 00000000..9135a8bb
--- /dev/null
+++ b/pjlib/src/pjlib-test/sock.c
@@ -0,0 +1,459 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/sock.c 4 10/29/05 11:51a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pjlib-test/sock.c $
+ *
+ * 4 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 3 14/10/05 11:31 Bennylp
+ * Fixed bug when TCP data is received in chunks.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 9/21/05 1:38p Bennylp
+ * Renamed from *.cpp
+ *
+ * 2 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ * 1 9/15/05 8:41p Bennylp
+ */
+#include <pjlib.h>
+#include "test.h"
+
+
+/**
+ * \page page_pjlib_sock_test Test: Socket
+ *
+ * This file provides implementation of \b sock_test(). It tests the
+ * various aspects of the socket API.
+ *
+ * \section sock_test_scope_sec Scope of the Test
+ *
+ * The scope of the test:
+ * - verify the validity of the address structs.
+ * - verify that address manipulation API works.
+ * - simple socket creation and destruction.
+ * - simple socket send/recv and sendto/recvfrom.
+ * - UDP connect()
+ * - send/recv big data.
+ * - all for both UDP and TCP.
+ *
+ * The APIs tested in this test:
+ * - pj_inet_aton()
+ * - pj_inet_ntoa()
+ * - pj_gethostname()
+ * - pj_sock_socket()
+ * - pj_sock_close()
+ * - pj_sock_send()
+ * - pj_sock_sendto()
+ * - pj_sock_recv()
+ * - pj_sock_recvfrom()
+ * - pj_sock_bind()
+ * - pj_sock_connect()
+ * - pj_sock_listen()
+ * - pj_sock_accept()
+ *
+ *
+ * This file is <b>pjlib-test/sock.c</b>
+ *
+ * \include pjlib-test/sock.c
+ */
+
+#if INCLUDE_SOCK_TEST
+
+#define UDP_PORT 51234
+#define TCP_PORT (UDP_PORT+10)
+#define BIG_DATA_LEN 9000
+
+static char bigdata[BIG_DATA_LEN];
+static char bigbuffer[BIG_DATA_LEN];
+
+static int format_test(void)
+{
+ pj_str_t s = pj_str("127.0.0.1");
+ char *p;
+ pj_in_addr addr;
+ const pj_str_t *hostname;
+
+ PJ_LOG(3,("test", "...format_test()"));
+
+ /* pj_inet_aton() */
+ if (pj_inet_aton(&s, &addr) != 1)
+ return -10;
+
+ /* Check the result. */
+ p = (char*)&addr;
+ if (p[0]!=127 || p[1]!=0 || p[2]!=0 || p[3]!=1)
+ return -15;
+
+ /* pj_inet_ntoa() */
+ p = pj_inet_ntoa(addr);
+ if (!p)
+ return -20;
+
+ if (pj_strcmp2(&s, p) != 0)
+ return -30;
+
+ /* pj_gethostname() */
+ hostname = pj_gethostname();
+ if (!hostname || !hostname->ptr || !hostname->slen)
+ return -40;
+
+ /* pj_gethostaddr() */
+
+ return 0;
+}
+
+static int simple_sock_test(void)
+{
+ int types[2];
+ pj_sock_t sock;
+ int i;
+ pj_status_t rc = PJ_SUCCESS;
+
+ types[0] = PJ_SOCK_STREAM;
+ types[1] = PJ_SOCK_DGRAM;
+
+ PJ_LOG(3,("test", "...simple_sock_test()"));
+
+ for (i=0; i<sizeof(types)/sizeof(types[0]); ++i) {
+
+ rc = pj_sock_socket(PJ_AF_INET, types[i], 0, &sock);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: unable to create socket type %d", rc);
+ break;
+ } else {
+ rc = pj_sock_close(sock);
+ if (rc != 0) {
+ app_perror("...error: close socket", rc);
+ break;
+ }
+ }
+ }
+ return rc;
+}
+
+
+static int send_recv_test(int sock_type,
+ pj_sock_t ss, pj_sock_t cs,
+ pj_sockaddr_in *dstaddr, pj_sockaddr_in *srcaddr,
+ int addrlen)
+{
+ enum { DATA_LEN = 16 };
+ char senddata[DATA_LEN+4], recvdata[DATA_LEN+4];
+ pj_ssize_t sent, received, total_received;
+ pj_status_t rc;
+
+ TRACE_(("test", "....create_random_string()"));
+ pj_create_random_string(senddata, DATA_LEN);
+ senddata[DATA_LEN-1] = '\0';
+
+ /*
+ * Test send/recv small data.
+ */
+ TRACE_(("test", "....sendto()"));
+ if (dstaddr) {
+ sent = DATA_LEN;
+ rc = pj_sock_sendto(cs, senddata, &sent, 0, dstaddr, addrlen);
+ if (rc != PJ_SUCCESS || sent != DATA_LEN) {
+ app_perror("...sendto error", rc);
+ rc = -140; goto on_error;
+ }
+ } else {
+ sent = DATA_LEN;
+ rc = pj_sock_send(cs, senddata, &sent, 0);
+ if (rc != PJ_SUCCESS || sent != DATA_LEN) {
+ app_perror("...send error", rc);
+ rc = -145; goto on_error;
+ }
+ }
+
+ TRACE_(("test", "....recv()"));
+ if (srcaddr) {
+ pj_sockaddr_in addr;
+ int srclen = sizeof(addr);
+
+ pj_memset(&addr, 0, sizeof(addr));
+
+ received = DATA_LEN;
+ rc = pj_sock_recvfrom(ss, recvdata, &received, 0, &addr, &srclen);
+ if (rc != PJ_SUCCESS || received != DATA_LEN) {
+ app_perror("...recvfrom error", rc);
+ rc = -150; goto on_error;
+ }
+ if (srclen != addrlen)
+ return -151;
+ if (pj_memcmp(&addr, srcaddr, srclen) != 0) {
+ char srcaddr_str[32], addr_str[32];
+ strcpy(srcaddr_str, pj_inet_ntoa(srcaddr->sin_addr));
+ strcpy(addr_str, pj_inet_ntoa(addr.sin_addr));
+ PJ_LOG(3,("test", "...error: src address mismatch (original=%s, "
+ "recvfrom addr=%s)",
+ srcaddr_str, addr_str));
+ return -152;
+ }
+
+ } else {
+ /* Repeat recv() until all data is received.
+ * This applies only for non-UDP of course, since for UDP
+ * we would expect all data to be received in one packet.
+ */
+ total_received = 0;
+ do {
+ received = DATA_LEN-total_received;
+ rc = pj_sock_recv(ss, recvdata+total_received, &received, 0);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...recv error", rc);
+ rc = -155; goto on_error;
+ }
+ if (received <= 0) {
+ PJ_LOG(3,("", "...error: socket has closed! (received=%d)",
+ received));
+ rc = -156; goto on_error;
+ }
+ if (received != DATA_LEN-total_received) {
+ if (sock_type != PJ_SOCK_STREAM) {
+ PJ_LOG(3,("", "...error: expecting %u bytes, got %u bytes",
+ DATA_LEN-total_received, received));
+ rc = -157; goto on_error;
+ }
+ }
+ total_received += received;
+ } while (total_received < DATA_LEN);
+ }
+
+ TRACE_(("test", "....memcmp()"));
+ if (pj_memcmp(senddata, recvdata, DATA_LEN) != 0) {
+ PJ_LOG(3,("","...error: received data mismatch "
+ "(got:'%s' expecting:'%s'",
+ recvdata, senddata));
+ rc = -160; goto on_error;
+ }
+
+ /*
+ * Test send/recv big data.
+ */
+ TRACE_(("test", "....sendto()"));
+ if (dstaddr) {
+ sent = BIG_DATA_LEN;
+ rc = pj_sock_sendto(cs, bigdata, &sent, 0, dstaddr, addrlen);
+ if (rc != PJ_SUCCESS || sent != BIG_DATA_LEN) {
+ app_perror("...sendto error", rc);
+ rc = -161; goto on_error;
+ }
+ } else {
+ sent = BIG_DATA_LEN;
+ rc = pj_sock_send(cs, bigdata, &sent, 0);
+ if (rc != PJ_SUCCESS || sent != BIG_DATA_LEN) {
+ app_perror("...send error", rc);
+ rc = -165; goto on_error;
+ }
+ }
+
+ TRACE_(("test", "....recv()"));
+
+ /* Repeat recv() until all data is received.
+ * This applies only for non-UDP of course, since for UDP
+ * we would expect all data to be received in one packet.
+ */
+ total_received = 0;
+ do {
+ received = BIG_DATA_LEN-total_received;
+ rc = pj_sock_recv(ss, bigbuffer+total_received, &received, 0);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...recv error", rc);
+ rc = -170; goto on_error;
+ }
+ if (received <= 0) {
+ PJ_LOG(3,("", "...error: socket has closed! (received=%d)",
+ received));
+ rc = -173; goto on_error;
+ }
+ if (received != BIG_DATA_LEN-total_received) {
+ if (sock_type != PJ_SOCK_STREAM) {
+ PJ_LOG(3,("", "...error: expecting %u bytes, got %u bytes",
+ BIG_DATA_LEN-total_received, received));
+ rc = -176; goto on_error;
+ }
+ }
+ total_received += received;
+ } while (total_received < BIG_DATA_LEN);
+
+ TRACE_(("test", "....memcmp()"));
+ if (pj_memcmp(bigdata, bigbuffer, BIG_DATA_LEN) != 0) {
+ PJ_LOG(3,("", "...error: received data has been altered!"));
+ rc = -180; goto on_error;
+ }
+
+ rc = 0;
+
+on_error:
+ return rc;
+}
+
+static int udp_test(void)
+{
+ pj_sock_t cs = PJ_INVALID_SOCKET, ss = PJ_INVALID_SOCKET;
+ pj_sockaddr_in dstaddr, srcaddr;
+ pj_str_t s;
+ pj_status_t rc = 0, retval;
+
+ PJ_LOG(3,("test", "...udp_test()"));
+
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_DGRAM, 0, &ss);
+ if (rc != 0) {
+ app_perror("...error: unable to create socket", rc);
+ return -100;
+ }
+
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_DGRAM, 0, &cs);
+ if (rc != 0)
+ return -110;
+
+ /* Bind server socket. */
+ pj_memset(&dstaddr, 0, sizeof(dstaddr));
+ dstaddr.sin_family = PJ_AF_INET;
+ dstaddr.sin_port = pj_htons(UDP_PORT);
+ dstaddr.sin_addr = pj_inet_addr(pj_cstr(&s, "127.0.0.1"));
+
+ if ((rc=pj_sock_bind(ss, &dstaddr, sizeof(dstaddr))) != 0) {
+ app_perror("...bind error", rc);
+ rc = -120; goto on_error;
+ }
+
+ /* Bind client socket. */
+ pj_memset(&srcaddr, 0, sizeof(srcaddr));
+ srcaddr.sin_family = PJ_AF_INET;
+ srcaddr.sin_port = pj_htons(UDP_PORT-1);
+ srcaddr.sin_addr = pj_inet_addr(pj_cstr(&s, "127.0.0.1"));
+
+ if ((rc=pj_sock_bind(cs, &srcaddr, sizeof(srcaddr))) != 0) {
+ app_perror("...bind error", rc);
+ rc = -121; goto on_error;
+ }
+
+ /* Test send/recv, with sendto */
+ rc = send_recv_test(PJ_SOCK_DGRAM, ss, cs, &dstaddr, NULL,
+ sizeof(dstaddr));
+ if (rc != 0)
+ goto on_error;
+
+ /* Test send/recv, with sendto and recvfrom */
+ rc = send_recv_test(PJ_SOCK_DGRAM, ss, cs, &dstaddr,
+ &srcaddr, sizeof(dstaddr));
+ if (rc != 0)
+ goto on_error;
+
+ /* connect() the sockets. */
+ rc = pj_sock_connect(cs, &dstaddr, sizeof(dstaddr));
+ if (rc != 0) {
+ app_perror("...connect() error", rc);
+ rc = -122; goto on_error;
+ }
+
+ /* Test send/recv with send() */
+ rc = send_recv_test(PJ_SOCK_DGRAM, ss, cs, NULL, NULL, 0);
+ if (rc != 0)
+ goto on_error;
+
+ /* Test send/recv with send() and recvfrom */
+ rc = send_recv_test(PJ_SOCK_DGRAM, ss, cs, NULL, &srcaddr,
+ sizeof(srcaddr));
+ if (rc != 0)
+ goto on_error;
+
+on_error:
+ retval = rc;
+ if (cs != PJ_INVALID_SOCKET) {
+ rc = pj_sock_close(cs);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error in closing socket", rc);
+ return -1000;
+ }
+ }
+ if (ss != PJ_INVALID_SOCKET) {
+ rc = pj_sock_close(ss);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error in closing socket", rc);
+ return -1010;
+ }
+ }
+
+ return retval;
+}
+
+static int tcp_test(void)
+{
+ pj_sock_t cs, ss;
+ pj_status_t rc = 0, retval;
+
+ PJ_LOG(3,("test", "...tcp_test()"));
+
+ rc = app_socketpair(PJ_AF_INET, PJ_SOCK_STREAM, 0, &ss, &cs);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: app_socketpair():", rc);
+ return -2000;
+ }
+
+ /* Test send/recv with send() and recv() */
+ retval = send_recv_test(PJ_SOCK_STREAM, ss, cs, NULL, NULL, 0);
+
+ rc = pj_sock_close(cs);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error in closing socket", rc);
+ return -2000;
+ }
+
+ rc = pj_sock_close(ss);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error in closing socket", rc);
+ return -2010;
+ }
+
+ return retval;
+}
+
+static int ioctl_test(void)
+{
+ return 0;
+}
+
+int sock_test()
+{
+ int rc;
+
+ pj_create_random_string(bigdata, BIG_DATA_LEN);
+
+ rc = format_test();
+ if (rc != 0)
+ return rc;
+
+ rc = simple_sock_test();
+ if (rc != 0)
+ return rc;
+
+ rc = ioctl_test();
+ if (rc != 0)
+ return rc;
+
+ rc = udp_test();
+ if (rc != 0)
+ return rc;
+
+ rc = tcp_test();
+ if (rc != 0)
+ return rc;
+
+ return 0;
+}
+
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_sock_test;
+#endif /* INCLUDE_SOCK_TEST */
+
diff --git a/pjlib/src/pjlib-test/sock_perf.c b/pjlib/src/pjlib-test/sock_perf.c
new file mode 100644
index 00000000..9e800432
--- /dev/null
+++ b/pjlib/src/pjlib-test/sock_perf.c
@@ -0,0 +1,183 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/sock_perf.c 4 10/29/05 11:51a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/sock_perf.c $
+ *
+ * 4 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 3 14/10/05 11:31 Bennylp
+ * Fixed bug when TCP data is part_received in chunks.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/11/05 11:18p Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+#include <pjlib.h>
+#include <pj/compat/high_precision.h>
+
+
+/**
+ * \page page_pjlib_sock_perf_test Test: Socket Performance
+ *
+ * Test the performance of the socket communication. This will perform
+ * simple producer-consumer type of test, where we calculate how long
+ * does it take to send certain number of packets from producer to
+ * consumer.
+ *
+ * This file is <b>pjlib-test/sock_perf.c</b>
+ *
+ * \include pjlib-test/sock_perf.c
+ */
+
+#if INCLUDE_SOCK_PERF_TEST
+
+/*
+ * sock_producer_consumer()
+ *
+ * Simple producer-consumer benchmarking. Send loop number of
+ * buf_size size packets as fast as possible.
+ */
+static int sock_producer_consumer(int sock_type,
+ unsigned buf_size,
+ unsigned loop,
+ unsigned *p_bandwidth)
+{
+ pj_sock_t consumer, producer;
+ pj_pool_t *pool;
+ char *outgoing_buffer, *incoming_buffer;
+ pj_timestamp start, stop;
+ unsigned i;
+ pj_highprec_t elapsed, bandwidth;
+ pj_size_t total_received;
+ pj_status_t rc;
+
+ /* Create pool. */
+ pool = pj_pool_create(mem, NULL, 4096, 4096, NULL);
+ if (!pool)
+ return -10;
+
+ /* Create producer-consumer pair. */
+ rc = app_socketpair(PJ_AF_INET, sock_type, 0, &consumer, &producer);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: create socket pair", rc);
+ return -20;
+ }
+
+ /* Create buffers. */
+ outgoing_buffer = pj_pool_alloc(pool, buf_size);
+ incoming_buffer = pj_pool_alloc(pool, buf_size);
+
+ /* Start loop. */
+ pj_get_timestamp(&start);
+ total_received = 0;
+ for (i=0; i<loop; ++i) {
+ pj_ssize_t sent, part_received, received;
+ pj_time_val delay;
+
+ sent = buf_size;
+ rc = pj_sock_send(producer, outgoing_buffer, &sent, 0);
+ if (rc != PJ_SUCCESS || sent != (pj_ssize_t)buf_size) {
+ app_perror("...error: send()", rc);
+ return -61;
+ }
+
+ /* Repeat recv() until all data is part_received.
+ * This applies only for non-UDP of course, since for UDP
+ * we would expect all data to be part_received in one packet.
+ */
+ received = 0;
+ do {
+ part_received = buf_size-received;
+ rc = pj_sock_recv(consumer, incoming_buffer+received,
+ &part_received, 0);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...recv error", rc);
+ return -70;
+ }
+ if (part_received <= 0) {
+ PJ_LOG(3,("", "...error: socket has closed (part_received=%d)!",
+ part_received));
+ return -73;
+ }
+ if ((pj_size_t)part_received != buf_size-received) {
+ if (sock_type != PJ_SOCK_STREAM) {
+ PJ_LOG(3,("", "...error: expecting %u bytes, got %u bytes",
+ buf_size-received, part_received));
+ return -76;
+ }
+ }
+ received += part_received;
+ } while ((pj_size_t)received < buf_size);
+
+ total_received += received;
+
+ /* Stop test if it's been runnign for more than 10 secs. */
+ pj_get_timestamp(&stop);
+ delay = pj_elapsed_time(&start, &stop);
+ if (delay.sec > 10)
+ break;
+ }
+
+ /* Stop timer. */
+ pj_get_timestamp(&stop);
+
+ elapsed = pj_elapsed_usec(&start, &stop);
+
+ /* bandwidth = total_received * 1000 / elapsed */
+ bandwidth = total_received;
+ pj_highprec_mul(bandwidth, 1000);
+ pj_highprec_div(bandwidth, elapsed);
+
+ *p_bandwidth = (pj_uint32_t)bandwidth;
+
+ /* Close sockets. */
+ pj_sock_close(consumer);
+ pj_sock_close(producer);
+
+ /* Done */
+ pj_pool_release(pool);
+
+ return 0;
+}
+
+/*
+ * sock_perf_test()
+ *
+ * Main test entry.
+ */
+int sock_perf_test(void)
+{
+ enum { LOOP = 64 * 1024 };
+ int rc;
+ unsigned bandwidth;
+
+ PJ_LOG(3,("", "...benchmarking socket "
+ "(2 sockets, packet=512, single threaded):"));
+
+ /* Benchmarking UDP */
+ rc = sock_producer_consumer(PJ_SOCK_DGRAM, 512, LOOP, &bandwidth);
+ if (rc != 0) return rc;
+ PJ_LOG(3,("", "....bandwidth UDP = %d KB/s", bandwidth));
+
+ /* Benchmarking TCP */
+ rc = sock_producer_consumer(PJ_SOCK_STREAM, 512, LOOP, &bandwidth);
+ if (rc != 0) return rc;
+ PJ_LOG(3,("", "....bandwidth TCP = %d KB/s", bandwidth));
+
+ return rc;
+}
+
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_sock_perf_test;
+#endif /* INCLUDE_SOCK_PERF_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/string.c b/pjlib/src/pjlib-test/string.c
new file mode 100644
index 00000000..1a3de325
--- /dev/null
+++ b/pjlib/src/pjlib-test/string.c
@@ -0,0 +1,168 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/string.c 2 10/14/05 12:26a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/string.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/05/05 5:13p Bennylp
+ * Created.
+ *
+ */
+#include <pj/string.h>
+#include <pj/pool.h>
+#include <pj/log.h>
+#include "test.h"
+
+/**
+ * \page page_pjlib_string_test Test: String
+ *
+ * This file provides implementation of \b string_test(). It tests the
+ * functionality of the string API.
+ *
+ * \section sleep_test_sec Scope of the Test
+ *
+ * API tested:
+ * - pj_str()
+ * - pj_strcmp()
+ * - pj_strcmp2()
+ * - pj_stricmp()
+ * - pj_strlen()
+ * - pj_strncmp()
+ * - pj_strnicmp()
+ * - pj_strchr()
+ * - pj_strdup()
+ * - pj_strdup2()
+ * - pj_strcpy()
+ * - pj_strcat()
+ * - pj_strtrim()
+ * - pj_utoa()
+ * - pj_strtoul()
+ * - pj_create_random_string()
+ *
+ *
+ * This file is <b>pjlib-test/string.c</b>
+ *
+ * \include pjlib-test/string.c
+ */
+
+#if INCLUDE_STRING_TEST
+
+#ifdef _MSC_VER
+# pragma warning(disable: 4204)
+#endif
+
+#define HELLO_WORLD "Hello World"
+#define JUST_HELLO "Hello"
+#define UL_VALUE 3456789012UL
+
+int string_test(void)
+{
+ const pj_str_t hello_world = { HELLO_WORLD, strlen(HELLO_WORLD) };
+ const pj_str_t just_hello = { JUST_HELLO, strlen(JUST_HELLO) };
+ pj_str_t s1, s2, s3, s4, s5;
+ enum { RCOUNT = 10, RLEN = 16 };
+ pj_str_t random[RCOUNT];
+ pj_pool_t *pool;
+ int i;
+
+ pool = pj_pool_create(mem, NULL, 4096, 0, NULL);
+ if (!pool) return -5;
+
+ /*
+ * pj_str(), pj_strcmp(), pj_stricmp(), pj_strlen(),
+ * pj_strncmp(), pj_strchr()
+ */
+ s1 = pj_str(HELLO_WORLD);
+ if (pj_strcmp(&s1, &hello_world) != 0)
+ return -10;
+ if (pj_stricmp(&s1, &hello_world) != 0)
+ return -20;
+ if (pj_strcmp(&s1, &just_hello) <= 0)
+ return -30;
+ if (pj_stricmp(&s1, &just_hello) <= 0)
+ return -40;
+ if (pj_strlen(&s1) != strlen(HELLO_WORLD))
+ return -50;
+ if (pj_strncmp(&s1, &hello_world, 5) != 0)
+ return -60;
+ if (pj_strnicmp(&s1, &hello_world, 5) != 0)
+ return -70;
+ if (pj_strchr(&s1, HELLO_WORLD[1]) != s1.ptr+1)
+ return -80;
+
+ /*
+ * pj_strdup()
+ */
+ if (!pj_strdup(pool, &s2, &s1))
+ return -100;
+ if (pj_strcmp(&s1, &s2) != 0)
+ return -110;
+
+ /*
+ * pj_strcpy(), pj_strcat()
+ */
+ s3.ptr = pj_pool_alloc(pool, 256);
+ if (!s3.ptr)
+ return -200;
+ pj_strcpy(&s3, &s2);
+ pj_strcat(&s3, &just_hello);
+
+ if (pj_strcmp2(&s3, HELLO_WORLD JUST_HELLO) != 0)
+ return -210;
+
+ /*
+ * pj_strdup2(), pj_strtrim().
+ */
+ pj_strdup2(pool, &s4, " " HELLO_WORLD "\t ");
+ pj_strtrim(&s4);
+ if (pj_strcmp2(&s4, HELLO_WORLD) != 0)
+ return -250;
+
+ /*
+ * pj_utoa()
+ */
+ s5.ptr = pj_pool_alloc(pool, 16);
+ if (!s5.ptr)
+ return -270;
+ s5.slen = pj_utoa(UL_VALUE, s5.ptr);
+
+ /*
+ * pj_strtoul()
+ */
+ if (pj_strtoul(&s5) != UL_VALUE)
+ return -280;
+
+ /*
+ * pj_create_random_string()
+ * Check that no duplicate strings are returned.
+ */
+ for (i=0; i<RCOUNT; ++i) {
+ int j;
+
+ random[i].ptr = pj_pool_alloc(pool, RLEN);
+ if (!random[i].ptr)
+ return -320;
+
+ random[i].slen = RLEN;
+ pj_create_random_string(random[i].ptr, RLEN);
+
+ for (j=0; j<i; ++j) {
+ if (pj_strcmp(&random[i], &random[j])==0)
+ return -330;
+ }
+ }
+
+ /* Done. */
+ pj_pool_release(pool);
+ return 0;
+}
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_string_test;
+#endif /* INCLUDE_STRING_TEST */
+
diff --git a/pjlib/src/pjlib-test/test.c b/pjlib/src/pjlib-test/test.c
new file mode 100644
index 00000000..44b89c40
--- /dev/null
+++ b/pjlib/src/pjlib-test/test.c
@@ -0,0 +1,196 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/test.c 4 29/10/05 21:33 Bennylp $
+ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/test.c $
+ *
+ * 4 29/10/05 21:33 Bennylp
+ * Changed echo_server() to echo_srv_sync()
+ *
+ * 3 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/05/05 5:13p Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+#include <pjlib.h>
+#ifdef _MSC_VER
+# pragma warning(disable:4127)
+#endif
+
+#define DO_TEST(test) do { \
+ PJ_LOG(3, ("test", "Running %s...", #test)); \
+ rc = test; \
+ PJ_LOG(3, ("test", \
+ "%s(%d)", \
+ (rc ? "..ERROR" : "..success"), rc)); \
+ if (rc!=0) goto on_return; \
+ } while (0)
+
+
+pj_pool_factory *mem;
+
+int param_echo_sock_type;
+const char *param_echo_server = ECHO_SERVER_ADDRESS;
+int param_echo_port = ECHO_SERVER_START_PORT;
+
+int test_inner(void)
+{
+ pj_caching_pool caching_pool;
+ const char *filename;
+ int line;
+ int rc = 0;
+
+ mem = &caching_pool.factory;
+
+ rc = pj_init();
+ if (rc != 0) {
+ app_perror("pj_init() error!!", rc);
+ return rc;
+ }
+
+ pj_log_set_level(3);
+ pj_log_set_decor(PJ_LOG_HAS_NEWLINE);
+ pj_dump_config();
+ pj_caching_pool_init( &caching_pool, &pj_pool_factory_default_policy, 0 );
+
+#if INCLUDE_ERRNO_TEST
+ DO_TEST( errno_test() );
+#endif
+
+#if INCLUDE_TIMESTAMP_TEST
+ DO_TEST( timestamp_test() );
+#endif
+
+#if INCLUDE_EXCEPTION_TEST
+ DO_TEST( exception_test() );
+#endif
+
+#if INCLUDE_RAND_TEST
+ DO_TEST( rand_test() );
+#endif
+
+#if INCLUDE_LIST_TEST
+ DO_TEST( list_test() );
+#endif
+
+#if INCLUDE_POOL_TEST
+ DO_TEST( pool_test() );
+#endif
+
+#if INCLUDE_POOL_PERF_TEST
+ DO_TEST( pool_perf_test() );
+#endif
+
+#if INCLUDE_STRING_TEST
+ DO_TEST( string_test() );
+#endif
+
+#if INCLUDE_FIFOBUF_TEST
+ DO_TEST( fifobuf_test() );
+#endif
+
+#if INCLUDE_RBTREE_TEST
+ DO_TEST( rbtree_test() );
+#endif
+
+#if INCLUDE_ATOMIC_TEST
+ DO_TEST( atomic_test() );
+#endif
+
+#if INCLUDE_MUTEX_TEST
+ DO_TEST( mutex_test() );
+#endif
+
+#if INCLUDE_TIMER_TEST
+ DO_TEST( timer_test() );
+#endif
+
+#if INCLUDE_SLEEP_TEST
+ DO_TEST( sleep_test() );
+#endif
+
+#if INCLUDE_THREAD_TEST
+ DO_TEST( thread_test() );
+#endif
+
+#if INCLUDE_SOCK_TEST
+ DO_TEST( sock_test() );
+#endif
+
+#if INCLUDE_SOCK_PERF_TEST
+ DO_TEST( sock_perf_test() );
+#endif
+
+#if INCLUDE_SELECT_TEST
+ DO_TEST( select_test() );
+#endif
+
+#if INCLUDE_UDP_IOQUEUE_TEST
+ DO_TEST( udp_ioqueue_test() );
+#endif
+
+#if PJ_HAS_TCP && INCLUDE_TCP_IOQUEUE_TEST
+ DO_TEST( tcp_ioqueue_test() );
+#endif
+
+#if INCLUDE_IOQUEUE_PERF_TEST
+ DO_TEST( ioqueue_perf_test() );
+#endif
+
+#if INCLUDE_XML_TEST
+ DO_TEST( xml_test() );
+#endif
+
+#if INCLUDE_ECHO_SERVER
+ //echo_server();
+ echo_srv_sync();
+#elif INCLUDE_ECHO_CLIENT
+ if (param_echo_sock_type == 0)
+ param_echo_sock_type = PJ_SOCK_DGRAM;
+
+ echo_client( param_echo_sock_type,
+ param_echo_server,
+ param_echo_port);
+#endif
+
+ goto on_return;
+
+on_return:
+
+ pj_caching_pool_destroy( &caching_pool );
+
+ PJ_LOG(3,("test", ""));
+
+ pj_thread_get_stack_info(pj_thread_this(), &filename, &line);
+ PJ_LOG(3,("test", "Stack max usage: %u, deepest: %s:%u",
+ pj_thread_get_stack_max_usage(pj_thread_this()),
+ filename, line));
+ if (rc == 0)
+ PJ_LOG(3,("test", "Looks like everything is okay!.."));
+ else
+ PJ_LOG(3,("test", "Test completed with error(s)"));
+ return 0;
+}
+
+int test_main(void)
+{
+ PJ_USE_EXCEPTION;
+
+ PJ_TRY {
+ return test_inner();
+ }
+ PJ_DEFAULT {
+ int id = PJ_GET_EXCEPTION();
+ PJ_LOG(3,("test", "FATAL: unhandled exception id %d (%s)",
+ id, pj_exception_id_name(id)));
+ }
+ PJ_END;
+
+ return -1;
+}
diff --git a/pjlib/src/pjlib-test/test.h b/pjlib/src/pjlib-test/test.h
new file mode 100644
index 00000000..475cdff6
--- /dev/null
+++ b/pjlib/src/pjlib-test/test.h
@@ -0,0 +1,90 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/test.h 4 10/29/05 10:28p Bennylp $ */
+#ifndef __PJLIB_TEST_H__
+#define __PJLIB_TEST_H__
+
+#include <pj/types.h>
+
+#define GROUP_LIBC 1
+#define GROUP_OS 1
+#define GROUP_DATA_STRUCTURE 1
+#define GROUP_NETWORK 1
+#define GROUP_EXTRA 1
+
+#define INCLUDE_ERRNO_TEST GROUP_LIBC
+#define INCLUDE_TIMESTAMP_TEST GROUP_OS
+#define INCLUDE_EXCEPTION_TEST GROUP_LIBC
+#define INCLUDE_RAND_TEST GROUP_LIBC
+#define INCLUDE_LIST_TEST GROUP_DATA_STRUCTURE
+#define INCLUDE_POOL_TEST GROUP_LIBC
+#define INCLUDE_POOL_PERF_TEST (PJ_HAS_MALLOC && GROUP_LIBC)
+#define INCLUDE_STRING_TEST GROUP_DATA_STRUCTURE
+#define INCLUDE_FIFOBUF_TEST GROUP_DATA_STRUCTURE
+#define INCLUDE_RBTREE_TEST GROUP_DATA_STRUCTURE
+#define INCLUDE_TIMER_TEST GROUP_DATA_STRUCTURE
+#define INCLUDE_ATOMIC_TEST GROUP_OS
+#define INCLUDE_MUTEX_TEST GROUP_OS
+#define INCLUDE_SLEEP_TEST GROUP_OS
+#define INCLUDE_THREAD_TEST GROUP_OS
+#define INCLUDE_SOCK_TEST GROUP_NETWORK
+#define INCLUDE_SOCK_PERF_TEST GROUP_NETWORK
+#define INCLUDE_SELECT_TEST GROUP_NETWORK
+#define INCLUDE_UDP_IOQUEUE_TEST GROUP_NETWORK
+#define INCLUDE_TCP_IOQUEUE_TEST GROUP_NETWORK
+#define INCLUDE_IOQUEUE_PERF_TEST GROUP_NETWORK
+#define INCLUDE_XML_TEST GROUP_EXTRA
+
+
+#define INCLUDE_ECHO_SERVER 0
+#define INCLUDE_ECHO_CLIENT 0
+
+#define ECHO_SERVER_MAX_THREADS 4
+#define ECHO_SERVER_START_PORT 65000
+#define ECHO_SERVER_ADDRESS "compaq.home"
+#define ECHO_SERVER_DURATION_MSEC (60*60*1000)
+
+#define ECHO_CLIENT_MAX_THREADS 10
+
+PJ_BEGIN_DECL
+
+extern int errno_test(void);
+extern int timestamp_test(void);
+extern int exception_test(void);
+extern int rand_test(void);
+extern int list_test(void);
+extern int pool_test(void);
+extern int pool_perf_test(void);
+extern int string_test(void);
+extern int fifobuf_test(void);
+extern int timer_test(void);
+extern int rbtree_test(void);
+extern int atomic_test(void);
+extern int mutex_test(void);
+extern int sleep_test(void);
+extern int thread_test(void);
+extern int sock_test(void);
+extern int sock_perf_test(void);
+extern int select_test(void);
+extern int udp_ioqueue_test(void);
+extern int tcp_ioqueue_test(void);
+extern int ioqueue_perf_test(void);
+extern int xml_test(void);
+
+extern int echo_server(void);
+extern int echo_client(int sock_type, const char *server, int port);
+
+extern pj_pool_factory *mem;
+
+extern int test_main(void);
+extern void app_perror(const char *msg, pj_status_t err);
+extern pj_status_t app_socket(int family, int type, int proto, int port,
+ pj_sock_t *ptr_sock);
+extern pj_status_t app_socketpair(int family, int type, int protocol,
+ pj_sock_t *server, pj_sock_t *client);
+
+//#define TRACE_(expr) PJ_LOG(3,expr)
+#define TRACE_(expr)
+
+PJ_END_DECL
+
+#endif /* __PJLIB_TEST_H__ */
+
diff --git a/pjlib/src/pjlib-test/thread.c b/pjlib/src/pjlib-test/thread.c
new file mode 100644
index 00000000..f41ec16e
--- /dev/null
+++ b/pjlib/src/pjlib-test/thread.c
@@ -0,0 +1,290 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/thread.c 4 10/29/05 11:51a Bennylp $
+ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/thread.c $
+ *
+ * 4 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 3 14/10/05 11:32 Bennylp
+ * More lenient with timeslice difference.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/11/05 12:39a Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+
+/**
+ * \page page_pjlib_thread_test Test: Thread Test
+ *
+ * This file contains \a thread_test() definition.
+ *
+ * \section thread_test_scope_sec Scope of Test
+ * This tests:
+ * - whether PJ_THREAD_SUSPENDED flag works.
+ * - whether multithreading works.
+ * - whether thread timeslicing works, and threads have equal
+ * time-slice proportion.
+ *
+ * APIs tested:
+ * - pj_thread_create()
+ * - pj_thread_register()
+ * - pj_thread_this()
+ * - pj_thread_get_name()
+ * - pj_thread_destroy()
+ * - pj_thread_resume()
+ * - pj_thread_sleep()
+ * - pj_thread_join()
+ * - pj_thread_destroy()
+ *
+ *
+ * This file is <b>pjlib-test/thread.c</b>
+ *
+ * \include pjlib-test/thread.c
+ */
+#if INCLUDE_THREAD_TEST
+
+#include <pjlib.h>
+
+#define THIS_FILE "thread_test"
+
+static int quit_flag=0;
+
+/*
+ * The thread's entry point.
+ *
+ * Each of the thread mainly will just execute the loop which
+ * increments a variable.
+ */
+static void* thread_proc(pj_uint32_t *pcounter)
+{
+ /* Test that pj_thread_register() works. */
+ pj_thread_desc desc;
+ pj_thread_t *this_thread;
+ pj_status_t rc;
+
+ rc = pj_thread_register("thread", desc, &this_thread);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error in pj_thread_register", rc);
+ return NULL;
+ }
+
+ /* Test that pj_thread_this() works */
+ this_thread = pj_thread_this();
+ if (this_thread == NULL) {
+ PJ_LOG(3,(THIS_FILE, "...error: pj_thread_this() returns NULL!"));
+ return NULL;
+ }
+
+ /* Test that pj_thread_get_name() works */
+ if (pj_thread_get_name(this_thread) == NULL) {
+ PJ_LOG(3,(THIS_FILE, "...error: pj_thread_get_name() returns NULL!"));
+ return NULL;
+ }
+
+ /* Main loop */
+ for (;!quit_flag;) {
+ (*pcounter)++;
+ //Must sleep if platform doesn't do time-slicing.
+ pj_thread_sleep(0);
+ }
+
+ return NULL;
+}
+
+/*
+ * simple_thread()
+ */
+static int simple_thread(const char *title, unsigned flags)
+{
+ pj_pool_t *pool;
+ pj_thread_t *thread;
+ pj_status_t rc;
+ pj_uint32_t counter = 0;
+
+ PJ_LOG(3,(THIS_FILE, "..%s", title));
+
+ pool = pj_pool_create(mem, NULL, 4000, 4000, NULL);
+ if (!pool)
+ return -1000;
+
+ quit_flag = 0;
+
+ rc = pj_thread_create(pool, "thread", (pj_thread_proc*)&thread_proc,
+ &counter,
+ PJ_THREAD_DEFAULT_STACK_SIZE,
+ flags,
+ &thread);
+
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: unable to create thread", rc);
+ return -1010;
+ }
+
+ if (flags & PJ_THREAD_SUSPENDED) {
+ rc = pj_thread_resume(thread);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: resume thread error", rc);
+ return -1020;
+ }
+ }
+
+ PJ_LOG(3,(THIS_FILE, "..waiting for thread to quit.."));
+
+ quit_flag = 1;
+ pj_thread_join(thread);
+
+ pj_pool_release(pool);
+
+ PJ_LOG(3,(THIS_FILE, "...%s success", title));
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * timeslice_test()
+ */
+static int timeslice_test(void)
+{
+ enum { NUM_THREADS = 4 };
+ pj_pool_t *pool;
+ pj_uint32_t counter[NUM_THREADS], lowest, highest, diff;
+ pj_thread_t *thread[NUM_THREADS];
+ int i;
+ pj_status_t rc;
+
+ quit_flag = 0;
+
+ pool = pj_pool_create(mem, NULL, 4096, 0, NULL);
+ if (!pool)
+ return -10;
+
+ PJ_LOG(3,(THIS_FILE, "..timeslice testing with %d threads", NUM_THREADS));
+
+ /* Create all threads in suspended mode. */
+ for (i=0; i<NUM_THREADS; ++i) {
+ counter[i] = 0;
+ rc = pj_thread_create(pool, "thread", (pj_thread_proc*)&thread_proc,
+ &counter[i],
+ PJ_THREAD_DEFAULT_STACK_SIZE,
+ PJ_THREAD_SUSPENDED,
+ &thread[i]);
+ if (rc!=PJ_SUCCESS) {
+ app_perror("...ERROR in pj_thread_create()", rc);
+ return -20;
+ }
+ }
+
+ /* Sleep for 1 second.
+ * The purpose of this is to test whether all threads are suspended.
+ */
+ pj_thread_sleep(1000);
+
+ /* Check that all counters are still zero. */
+ for (i=0; i<NUM_THREADS; ++i) {
+ if (counter[i] != 0) {
+ PJ_LOG(3,(THIS_FILE, "....ERROR! Thread %d-th is not suspended!",
+ i));
+ return -30;
+ }
+ }
+
+ /* Now resume all threads. */
+ for (i=0; i<NUM_THREADS; ++i) {
+ rc = pj_thread_resume(thread[i]);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...ERROR in pj_thread_resume()", rc);
+ return -40;
+ }
+ }
+
+ /* Main thread sleeps for some time to allow threads to run.
+ * The longer we sleep, the more accurate the calculation will be,
+ * but it'll make user waits for longer for the test to finish.
+ */
+ pj_thread_sleep(5000);
+
+ /* Signal all threads to quit. */
+ quit_flag = 1;
+
+ /* Wait until all threads quit, then destroy. */
+ for (i=0; i<NUM_THREADS; ++i) {
+ rc = pj_thread_join(thread[i]);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...ERROR in pj_thread_join()", rc);
+ return -50;
+ }
+ rc = pj_thread_destroy(thread[i]);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...ERROR in pj_thread_destroy()", rc);
+ return -60;
+ }
+ }
+
+ /* Now examine the value of the counters.
+ * Check that all threads had equal proportion of processing.
+ */
+ lowest = 0xFFFFFFFF;
+ highest = 0;
+ for (i=0; i<NUM_THREADS; ++i) {
+ if (counter[i] < lowest)
+ lowest = counter[i];
+ if (counter[i] > highest)
+ highest = counter[i];
+ }
+
+ /* Check that all threads are running. */
+ if (lowest < 2) {
+ PJ_LOG(3,(THIS_FILE, "...ERROR: not all threads were running!"));
+ return -70;
+ }
+
+ /* The difference between lowest and higest should be lower than 50%.
+ */
+ diff = (highest-lowest)*100 / ((highest+lowest)/2);
+ if ( diff >= 50) {
+ PJ_LOG(3,(THIS_FILE, "...ERROR: thread didn't have equal timeslice!"));
+ PJ_LOG(3,(THIS_FILE, ".....lowest counter=%u, highest counter=%u, diff=%u%%",
+ lowest, highest, diff));
+ return -80;
+ } else {
+ PJ_LOG(3,(THIS_FILE,
+ "...info: timeslice diff between lowest & highest=%u%%",
+ diff));
+ }
+
+ return 0;
+}
+
+int thread_test(void)
+{
+ int rc;
+
+ rc = simple_thread("simple thread test", 0);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ rc = simple_thread("suspended thread test", PJ_THREAD_SUSPENDED);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ rc = timeslice_test();
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ return rc;
+}
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_thread_test;
+#endif /* INCLUDE_THREAD_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/timer.c b/pjlib/src/pjlib-test/timer.c
new file mode 100644
index 00000000..1aaa208d
--- /dev/null
+++ b/pjlib/src/pjlib-test/timer.c
@@ -0,0 +1,169 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/timer.c 3 10/29/05 10:23p Bennylp $ */
+#include "test.h"
+
+/**
+ * \page page_pjlib_timer_test Test: Timer
+ *
+ * This file provides implementation of \b timer_test(). It tests the
+ * functionality of the timer heap.
+ *
+ *
+ * This file is <b>pjlib-test/timer.c</b>
+ *
+ * \include pjlib-test/timer.c
+ */
+
+
+#if INCLUDE_TIMER_TEST
+
+#include <pjlib.h>
+
+#define LOOP 16
+#define MIN_COUNT 250
+#define MAX_COUNT (LOOP * MIN_COUNT)
+#define MIN_DELAY 2
+#define D (MAX_COUNT / 32000)
+#define DELAY (D < MIN_DELAY ? MIN_DELAY : D)
+#define THIS_FILE "timer_test"
+
+
+static void timer_callback(pj_timer_heap_t *ht, pj_timer_entry *e)
+{
+ PJ_UNUSED_ARG(ht);
+ PJ_UNUSED_ARG(e);
+}
+
+static int test_timer_heap(void)
+{
+ int i, j;
+ pj_timer_entry *entry;
+ pj_pool_t *pool;
+ pj_timer_heap_t *timer;
+ pj_time_val delay;
+ pj_status_t rc; int err=0;
+ unsigned size, count;
+
+ size = pj_timer_heap_mem_size(MAX_COUNT)+MAX_COUNT*sizeof(pj_timer_entry);
+ pool = pj_pool_create( mem, NULL, size, 4000, NULL);
+ if (!pool) {
+ PJ_LOG(3,("test", "...error: unable to create pool of %u bytes",
+ size));
+ return -10;
+ }
+
+ entry = (pj_timer_entry*)pj_pool_calloc(pool, MAX_COUNT, sizeof(*entry));
+ if (!entry)
+ return -20;
+
+ for (i=0; i<MAX_COUNT; ++i) {
+ entry[i].cb = &timer_callback;
+ }
+ rc = pj_timer_heap_create(pool, MAX_COUNT, 0, &timer);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: unable to create timer heap", rc);
+ return -30;
+ }
+
+ count = MIN_COUNT;
+ for (i=0; i<LOOP; ++i) {
+ int early = 0;
+ int done=0;
+ int cancelled=0;
+ int rc;
+ pj_timestamp t1, t2, t_sched, t_cancel, t_poll;
+ pj_time_val now, expire;
+
+ pj_gettimeofday(&now);
+ pj_srand(now.sec);
+ t_sched.u32.lo = t_cancel.u32.lo = t_poll.u32.lo = 0;
+
+ // Register timers
+ for (j=0; j<(int)count; ++j) {
+ delay.sec = pj_rand() % DELAY;
+ delay.msec = pj_rand() % 1000;
+
+ // Schedule timer
+ pj_get_timestamp(&t1);
+ rc = pj_timer_heap_schedule(timer, &entry[j], &delay);
+ if (rc != 0)
+ return -40;
+ pj_get_timestamp(&t2);
+
+ t_sched.u32.lo += (t2.u32.lo - t1.u32.lo);
+
+ // Poll timers.
+ pj_get_timestamp(&t1);
+ rc = pj_timer_heap_poll(timer, NULL);
+ pj_get_timestamp(&t2);
+ if (rc > 0) {
+ t_poll.u32.lo += (t2.u32.lo - t1.u32.lo);
+ early += rc;
+ }
+ }
+
+ // Set the time where all timers should finish
+ pj_gettimeofday(&expire);
+ delay.sec = DELAY;
+ delay.msec = 0;
+ PJ_TIME_VAL_ADD(expire, delay);
+
+ // Wait unfil all timers finish, cancel some of them.
+ do {
+ int index = pj_rand() % count;
+ pj_get_timestamp(&t1);
+ rc = pj_timer_heap_cancel(timer, &entry[index]);
+ pj_get_timestamp(&t2);
+ if (rc > 0) {
+ cancelled += rc;
+ t_cancel.u32.lo += (t2.u32.lo - t1.u32.lo);
+ }
+
+ pj_gettimeofday(&now);
+
+ pj_get_timestamp(&t1);
+ rc = pj_timer_heap_poll(timer, NULL);
+ pj_get_timestamp(&t2);
+ if (rc > 0) {
+ done += rc;
+ t_poll.u32.lo += (t2.u32.lo - t1.u32.lo);
+ }
+
+ } while (PJ_TIME_VAL_LTE(now, expire)&&pj_timer_heap_count(timer) > 0);
+
+ if (pj_timer_heap_count(timer)) {
+ PJ_LOG(3, (THIS_FILE, "ERROR: %d timers left",
+ pj_timer_heap_count(timer)));
+ ++err;
+ }
+ t_sched.u32.lo /= count;
+ t_cancel.u32.lo /= count;
+ t_poll.u32.lo /= count;
+ PJ_LOG(4, (THIS_FILE,
+ "...ok (count:%d, early:%d, cancelled:%d, "
+ "sched:%d, cancel:%d poll:%d)",
+ count, early, cancelled, t_sched.u32.lo, t_cancel.u32.lo,
+ t_poll.u32.lo));
+
+ count = count * 2;
+ if (count > MAX_COUNT)
+ break;
+ }
+
+ pj_pool_release(pool);
+ return err;
+}
+
+
+int timer_test()
+{
+ return test_timer_heap();
+}
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_timer_test;
+#endif /* INCLUDE_TIMER_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/timestamp.c b/pjlib/src/pjlib-test/timestamp.c
new file mode 100644
index 00000000..3d4d9f8e
--- /dev/null
+++ b/pjlib/src/pjlib-test/timestamp.c
@@ -0,0 +1,140 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/timestamp.c 4 10/29/05 11:51a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/timestamp.c $
+ *
+ * 4 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 3 14/10/05 11:32 Bennylp
+ * Longer test, to check if timestamp is running backwards.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/09/05 9:39p Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+#include <pj/os.h>
+#include <pj/log.h>
+
+
+/**
+ * \page page_pjlib_timestamp_test Test: Timestamp
+ *
+ * This file provides implementation of timestamp_test()
+ *
+ * \section timestamp_test_sec Scope of the Test
+ *
+ * This tests whether timestamp API works.
+ *
+ * API tested:
+ * - pj_get_timestamp_freq()
+ * - pj_get_timestamp()
+ * - pj_elapsed_usec()
+ * - PJ_LOG()
+ *
+ *
+ * This file is <b>pjlib-test/timestamp.c</b>
+ *
+ * \include pjlib-test/timestamp.c
+ */
+
+#if INCLUDE_TIMESTAMP_TEST
+
+#define THIS_FILE "timestamp"
+
+int timestamp_test(void)
+{
+ enum { CONSECUTIVE_LOOP = 1000 };
+ volatile unsigned i;
+ pj_timestamp freq, t1, t2;
+ unsigned elapsed;
+ pj_status_t rc;
+
+ PJ_LOG(3,(THIS_FILE, "...Testing timestamp (high res time)"));
+
+ /* Get and display timestamp frequency. */
+ if ((rc=pj_get_timestamp_freq(&freq)) != PJ_SUCCESS) {
+ app_perror("...ERROR: get timestamp freq", rc);
+ return -1000;
+ }
+
+ PJ_LOG(3,(THIS_FILE, "....frequency: hiword=%lu loword=%lu",
+ freq.u32.hi, freq.u32.lo));
+
+ PJ_LOG(3,(THIS_FILE, "...checking if time can run backwards (pls wait).."));
+
+ /*
+ * Check if consecutive readings should yield timestamp value
+ * that is bigger than previous value.
+ * First we get the first timestamp.
+ */
+ rc = pj_get_timestamp(&t1);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...ERROR: get timestamp", rc);
+ return -1001;
+ }
+ for (i=0; i<CONSECUTIVE_LOOP; ++i) {
+ /*
+ volatile unsigned j;
+ for (j=0; j<1000; ++j)
+ ;
+ */
+ pj_thread_sleep(1);
+ rc = pj_get_timestamp(&t2);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...ERROR: get timestamp", rc);
+ return -1002;
+ }
+ /* compare t2 with t1, expecting t2 >= t1. */
+ if (t2.u32.hi < t1.u32.hi ||
+ (t2.u32.hi == t1.u32.hi && t2.u32.lo < t1.u32.lo))
+ {
+ PJ_LOG(3,(THIS_FILE, "...ERROR: timestamp runs backwards!"));
+ return -1003;
+ }
+ }
+
+ /*
+ * Simple test to time some loop.
+ */
+ PJ_LOG(3,(THIS_FILE, "....testing simple 1000000 loop"));
+
+
+ /* Mark start time. */
+ if ((rc=pj_get_timestamp(&t1)) != PJ_SUCCESS) {
+ app_perror("....error: cat't get timestamp", rc);
+ return -1010;
+ }
+
+ /* Loop.. */
+ for (i=0; i<1000000; ++i)
+ ;
+
+ /* Mark end time. */
+ pj_get_timestamp(&t2);
+
+ /* Get elapsed time in usec. */
+ elapsed = pj_elapsed_usec(&t1, &t2);
+ PJ_LOG(3,(THIS_FILE, "....elapsed: %u usec", (unsigned)elapsed));
+
+ /* See if elapsed time is reasonable. */
+ if (elapsed < 1 || elapsed > 100000) {
+ PJ_LOG(3,(THIS_FILE, "....error: elapsed time outside window (%u)",
+ elapsed));
+ return -1030;
+ }
+ return 0;
+}
+
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_timestamp_test;
+#endif /* INCLUDE_TIMESTAMP_TEST */
+
diff --git a/pjlib/src/pjlib-test/udp_echo_srv_sync.c b/pjlib/src/pjlib-test/udp_echo_srv_sync.c
new file mode 100644
index 00000000..b513498b
--- /dev/null
+++ b/pjlib/src/pjlib-test/udp_echo_srv_sync.c
@@ -0,0 +1,168 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/udp_echo_srv_sync.c 2 29/10/05 21:34 Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/udp_echo_srv_sync.c $
+ *
+ * 2 29/10/05 21:34 Bennylp
+ * Tested on Win32
+ *
+ * 1 10/29/05 9:56a Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+#include <pjlib.h>
+
+static pj_sem_t *sem;
+static pj_mutex_t *mutex;
+static pj_size_t total_bw;
+
+static int worker_thread(void *arg)
+{
+ pj_sock_t sock = (pj_sock_t)arg;
+ char buf[1516];
+ pj_size_t received;
+ pj_time_val last_print;
+ pj_status_t last_recv_err = PJ_SUCCESS, last_write_err = PJ_SUCCESS;
+
+ received = 0;
+ pj_gettimeofday(&last_print);
+
+ for (;;) {
+ pj_ssize_t len;
+ pj_uint32_t delay_msec;
+ pj_time_val now;
+ pj_highprec_t bw;
+ pj_status_t rc;
+ pj_sockaddr_in addr;
+ int addrlen;
+
+ len = sizeof(buf);
+ addrlen = sizeof(addr);
+ rc = pj_sock_recvfrom(sock, buf, &len, 0, &addr, &addrlen);
+ if (rc != 0) {
+ if (rc != last_recv_err) {
+ app_perror("...recv error", rc);
+ last_recv_err = rc;
+ }
+ continue;
+ }
+
+ received += len;
+
+ rc = pj_sock_sendto(sock, buf, &len, 0, &addr, addrlen);
+ if (rc != PJ_SUCCESS) {
+ if (rc != last_write_err) {
+ app_perror("...send error", rc);
+ last_write_err = rc;
+ }
+ continue;
+ }
+
+ pj_gettimeofday(&now);
+ PJ_TIME_VAL_SUB(now, last_print);
+ delay_msec = PJ_TIME_VAL_MSEC(now);
+
+ if (delay_msec < 1000)
+ continue;
+
+ bw = received;
+ pj_highprec_mul(bw, 1000);
+ pj_highprec_div(bw, delay_msec);
+
+ pj_mutex_lock(mutex);
+ total_bw = total_bw + (pj_size_t)bw;
+ pj_mutex_unlock(mutex);
+
+ pj_gettimeofday(&last_print);
+ received = 0;
+ pj_sem_post(sem);
+ pj_thread_sleep(0);
+ }
+}
+
+
+int echo_srv_sync(void)
+{
+ pj_pool_t *pool;
+ pj_sock_t sock;
+ pj_thread_t *thread[ECHO_SERVER_MAX_THREADS];
+ pj_status_t rc;
+ pj_highprec_t abs_total;
+ unsigned count;
+ int i;
+
+ pool = pj_pool_create(mem, NULL, 4000, 4000, NULL);
+ if (!pool)
+ return -5;
+
+ rc = pj_sem_create(pool, NULL, 0, ECHO_SERVER_MAX_THREADS, &sem);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...unable to create semaphore", rc);
+ return -6;
+ }
+
+ rc = pj_mutex_create_simple(pool, NULL, &mutex);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...unable to create mutex", rc);
+ return -7;
+ }
+
+ rc = app_socket(PJ_AF_INET, PJ_SOCK_DGRAM, 0, ECHO_SERVER_START_PORT, &sock);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...socket error", rc);
+ return -10;
+ }
+
+ for (i=0; i<ECHO_SERVER_MAX_THREADS; ++i) {
+ rc = pj_thread_create(pool, NULL, &worker_thread, (void*)sock,
+ PJ_THREAD_DEFAULT_STACK_SIZE, 0,
+ &thread[i]);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...unable to create thread", rc);
+ return -20;
+ }
+ }
+
+ PJ_LOG(3,("", "...UDP echo server running with %d threads at port %d",
+ ECHO_SERVER_MAX_THREADS, ECHO_SERVER_START_PORT));
+ PJ_LOG(3,("", "...Press Ctrl-C to abort"));
+
+ abs_total = 0;
+ count = 0;
+
+ for (;;) {
+ pj_uint32_t avg32;
+ pj_highprec_t avg;
+
+ for (i=0; i<ECHO_SERVER_MAX_THREADS; ++i)
+ pj_sem_wait(sem);
+
+ /* calculate average so far:
+ avg = abs_total / count;
+ */
+ count++;
+ abs_total += total_bw;
+ avg = abs_total;
+ pj_highprec_div(avg, count);
+ avg32 = (pj_uint32_t)avg;
+
+
+ PJ_LOG(3,("", "Synchronous UDP (%d threads): %u KB/s (avg=%u KB/s) %s",
+ ECHO_SERVER_MAX_THREADS,
+ total_bw / 1000,
+ avg32 / 1000,
+ (count==20 ? "<ses avg>" : "")));
+
+ total_bw = 0;
+
+ if (count==20) {
+ count = 0;
+ abs_total = 0;
+ }
+
+ while (pj_sem_trywait(sem) == PJ_SUCCESS)
+ ;
+ }
+}
+
+
diff --git a/pjlib/src/pjlib-test/util.c b/pjlib/src/pjlib-test/util.c
new file mode 100644
index 00000000..c698cff4
--- /dev/null
+++ b/pjlib/src/pjlib-test/util.c
@@ -0,0 +1,129 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/util.c 3 10/29/05 11:51a Bennylp $
+ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/util.c $
+ *
+ * 3 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/12/05 10:00a Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+#include <pjlib.h>
+
+void app_perror(const char *msg, pj_status_t rc)
+{
+ char errbuf[256];
+
+ PJ_CHECK_STACK();
+
+ pj_strerror(rc, errbuf, sizeof(errbuf));
+ PJ_LOG(1,("test", "%s: [pj_status_t=%d] %s", msg, rc, errbuf));
+}
+
+#define SERVER 0
+#define CLIENT 1
+
+pj_status_t app_socket(int family, int type, int proto, int port,
+ pj_sock_t *ptr_sock)
+{
+ pj_sockaddr_in addr;
+ pj_sock_t sock;
+ pj_status_t rc;
+
+ rc = pj_sock_socket(family, type, proto, &sock);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ pj_memset(&addr, 0, sizeof(addr));
+ addr.sin_family = (pj_uint16_t)family;
+ addr.sin_port = (short)(port!=-1 ? pj_htons((pj_uint16_t)port) : 0);
+ rc = pj_sock_bind(sock, &addr, sizeof(addr));
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ if (type == PJ_SOCK_STREAM) {
+ rc = pj_sock_listen(sock, 5);
+ if (rc != PJ_SUCCESS)
+ return rc;
+ }
+
+ *ptr_sock = sock;
+ return PJ_SUCCESS;
+}
+
+pj_status_t app_socketpair(int family, int type, int protocol,
+ pj_sock_t *serverfd, pj_sock_t *clientfd)
+{
+ int i;
+ static unsigned short port = 11000;
+ pj_sockaddr_in addr;
+ pj_str_t s;
+ pj_status_t rc = 0;
+ pj_sock_t sock[2];
+
+ /* Create both sockets. */
+ for (i=0; i<2; ++i) {
+ rc = pj_sock_socket(family, type, protocol, &sock[i]);
+ if (rc != PJ_SUCCESS) {
+ if (i==1)
+ pj_sock_close(sock[0]);
+ return rc;
+ }
+ }
+
+ /* Retry bind */
+ pj_memset(&addr, 0, sizeof(addr));
+ addr.sin_family = PJ_AF_INET;
+ for (i=0; i<5; ++i) {
+ addr.sin_port = pj_htons(port++);
+ rc = pj_sock_bind(sock[SERVER], &addr, sizeof(addr));
+ if (rc == PJ_SUCCESS)
+ break;
+ }
+
+ if (rc != PJ_SUCCESS)
+ goto on_error;
+
+ /* For TCP, listen the socket. */
+ if (type == PJ_SOCK_STREAM) {
+ rc = pj_sock_listen(sock[SERVER], PJ_SOMAXCONN);
+ if (rc != PJ_SUCCESS)
+ goto on_error;
+ }
+
+ /* Connect client socket. */
+ addr.sin_addr = pj_inet_addr(pj_cstr(&s, "127.0.0.1"));
+ rc = pj_sock_connect(sock[CLIENT], &addr, sizeof(addr));
+ if (rc != PJ_SUCCESS)
+ goto on_error;
+
+ /* For TCP, must accept(), and get the new socket. */
+ if (type == PJ_SOCK_STREAM) {
+ pj_sock_t newserver;
+
+ rc = pj_sock_accept(sock[SERVER], &newserver, NULL, NULL);
+ if (rc != PJ_SUCCESS)
+ goto on_error;
+
+ /* Replace server socket with new socket. */
+ pj_sock_close(sock[SERVER]);
+ sock[SERVER] = newserver;
+ }
+
+ *serverfd = sock[SERVER];
+ *clientfd = sock[CLIENT];
+
+ return rc;
+
+on_error:
+ for (i=0; i<2; ++i)
+ pj_sock_close(sock[i]);
+ return rc;
+}
diff --git a/pjlib/src/pjlib-test/xml.c b/pjlib/src/pjlib-test/xml.c
new file mode 100644
index 00000000..9a7c0a1e
--- /dev/null
+++ b/pjlib/src/pjlib-test/xml.c
@@ -0,0 +1,127 @@
+#include "test.h"
+
+
+#if INCLUDE_XML_TEST
+
+#include <pj/xml.h>
+#include <pjlib.h>
+
+#define THIS_FILE "xml_test"
+
+static const char *xml_doc[] =
+{
+" <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
+" <p:pidf-full xmlns=\"urn:ietf:params:xml:ns:pidf\"\n"
+" xmlns:p=\"urn:ietf:params:xml:ns:pidf-diff\"\n"
+" xmlns:r=\"urn:ietf:params:xml:ns:pidf:rpid\"\n"
+" xmlns:c=\"urn:ietf:params:xml:ns:pidf:caps\"\n"
+" entity=\"pres:someone@example.com\"\n"
+" version=\"567\">\n"
+"\n"
+" <tuple id=\"sg89ae\">\n"
+" <status>\n"
+" <basic>open</basic>\n"
+" <r:relationship>assistant</r:relationship>\n"
+" </status>\n"
+" <c:servcaps>\n"
+" <c:audio>true</c:audio>\n"
+" <c:video>false</c:video>\n"
+" <c:message>true</c:message>\n"
+" </c:servcaps>\n"
+" <contact priority=\"0.8\">tel:09012345678</contact>\n"
+" </tuple>\n"
+"\n"
+" <tuple id=\"cg231jcr\">\n"
+" <status>\n"
+" <basic>open</basic>\n"
+" </status>\n"
+" <contact priority=\"1.0\">im:pep@example.com</contact>\n"
+" </tuple>\n"
+"\n"
+" <tuple id=\"r1230d\">\n"
+" <status>\n"
+" <basic>closed</basic>\n"
+" <r:activity>meeting</r:activity>\n"
+" </status>\n"
+" <r:homepage>http://example.com/~pep/</r:homepage>\n"
+" <r:icon>http://example.com/~pep/icon.gif</r:icon>\n"
+" <r:card>http://example.com/~pep/card.vcd</r:card>\n"
+" <contact priority=\"0.9\">sip:pep@example.com</contact>\n"
+" </tuple>\n"
+"\n"
+" <note xml:lang=\"en\">Full state presence document</note>\n"
+"\n"
+" <r:person>\n"
+" <r:status>\n"
+" <r:activities>\n"
+" <r:on-the-phone/>\n"
+" <r:busy/>\n"
+" </r:activities>\n"
+" </r:status>\n"
+" </r:person>\n"
+"\n"
+" <r:device id=\"urn:esn:600b40c7\">\n"
+" <r:status>\n"
+" <c:devcaps>\n"
+" <c:mobility>\n"
+" <c:supported>\n"
+" <c:mobile/>\n"
+" </c:supported>\n"
+" </c:mobility>\n"
+" </c:devcaps>\n"
+" </r:status>\n"
+" </r:device>\n"
+"\n"
+" </p:pidf-full>\n"
+}
+;
+
+static int xml_parse_print_test(const char *doc)
+{
+ pj_str_t msg;
+ pj_pool_t *pool;
+ pj_xml_node *root;
+ char *output;
+ int output_len;
+
+ pool = pj_pool_create(mem, "xml", 4096, 1024, NULL);
+ pj_strdup2(pool, &msg, doc);
+ root = pj_xml_parse(pool, msg.ptr, msg.slen);
+ if (!root) {
+ PJ_LOG(1, (THIS_FILE, " Error: unable to parse XML"));
+ return -10;
+ }
+
+ output = (char*)pj_pool_alloc(pool, msg.slen + 512);
+ pj_memset(output, 0, msg.slen+512);
+ output_len = pj_xml_print(root, output, msg.slen+512, PJ_TRUE);
+ if (output_len < 1) {
+ PJ_LOG(1, (THIS_FILE, " Error: buffer too small to print XML file"));
+ return -20;
+ }
+ output[output_len] = '\0';
+
+
+ pj_pool_release(pool);
+ return 0;
+}
+
+int xml_test()
+{
+ unsigned i;
+ for (i=0; i<sizeof(xml_doc)/sizeof(xml_doc[0]); ++i) {
+ int status;
+ if ((status=xml_parse_print_test(xml_doc[i])) != 0)
+ return status;
+ }
+ return 0;
+}
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_xml_test;
+#endif /* INCLUDE_XML_TEST */
+
+