summaryrefslogtreecommitdiff
path: root/pjlib/src/pjlib-test
diff options
context:
space:
mode:
Diffstat (limited to 'pjlib/src/pjlib-test')
-rw-r--r--pjlib/src/pjlib-test/atomic.c94
-rw-r--r--pjlib/src/pjlib-test/echo_clt.c267
-rw-r--r--pjlib/src/pjlib-test/echo_srv.c331
-rw-r--r--pjlib/src/pjlib-test/errno.c162
-rw-r--r--pjlib/src/pjlib-test/exception.c156
-rw-r--r--pjlib/src/pjlib-test/fifobuf.c100
-rw-r--r--pjlib/src/pjlib-test/ioq_perf.c466
-rw-r--r--pjlib/src/pjlib-test/ioq_tcp.c474
-rw-r--r--pjlib/src/pjlib-test/ioq_udp.c664
-rw-r--r--pjlib/src/pjlib-test/list.c209
-rw-r--r--pjlib/src/pjlib-test/main.c73
-rw-r--r--pjlib/src/pjlib-test/main_mod.c33
-rw-r--r--pjlib/src/pjlib-test/mutex.c164
-rw-r--r--pjlib/src/pjlib-test/os.c10
-rw-r--r--pjlib/src/pjlib-test/pool.c164
-rw-r--r--pjlib/src/pjlib-test/pool_perf.c134
-rw-r--r--pjlib/src/pjlib-test/rand.c43
-rw-r--r--pjlib/src/pjlib-test/rbtree.c150
-rw-r--r--pjlib/src/pjlib-test/select.c208
-rw-r--r--pjlib/src/pjlib-test/sleep.c198
-rw-r--r--pjlib/src/pjlib-test/sock.c459
-rw-r--r--pjlib/src/pjlib-test/sock_perf.c183
-rw-r--r--pjlib/src/pjlib-test/string.c168
-rw-r--r--pjlib/src/pjlib-test/test.c196
-rw-r--r--pjlib/src/pjlib-test/test.h90
-rw-r--r--pjlib/src/pjlib-test/thread.c290
-rw-r--r--pjlib/src/pjlib-test/timer.c169
-rw-r--r--pjlib/src/pjlib-test/timestamp.c140
-rw-r--r--pjlib/src/pjlib-test/udp_echo_srv_sync.c168
-rw-r--r--pjlib/src/pjlib-test/util.c129
-rw-r--r--pjlib/src/pjlib-test/xml.c127
31 files changed, 6219 insertions, 0 deletions
diff --git a/pjlib/src/pjlib-test/atomic.c b/pjlib/src/pjlib-test/atomic.c
new file mode 100644
index 00000000..0a1ebb7d
--- /dev/null
+++ b/pjlib/src/pjlib-test/atomic.c
@@ -0,0 +1,94 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/atomic.c 2 10/14/05 12:26a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/atomic.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/07/05 9:49p Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+#include <pjlib.h>
+
+/**
+ * \page page_pjlib_atomic_test Test: Atomic Variable
+ *
+ * This file provides implementation of \b atomic_test(). It tests the
+ * functionality of the atomic variable API.
+ *
+ * \section atomic_test_sec Scope of the Test
+ *
+ * API tested:
+ * - pj_atomic_create()
+ * - pj_atomic_get()
+ * - pj_atomic_inc()
+ * - pj_atomic_dec()
+ * - pj_atomic_set()
+ * - pj_atomic_destroy()
+ *
+ *
+ * This file is <b>pjlib-test/atomic.c</b>
+ *
+ * \include pjlib-test/atomic.c
+ */
+
+
+#if INCLUDE_ATOMIC_TEST
+
+int atomic_test(void)
+{
+ pj_pool_t *pool;
+ pj_atomic_t *atomic_var;
+ pj_status_t rc;
+
+ pool = pj_pool_create(mem, NULL, 4096, 0, NULL);
+ if (!pool)
+ return -10;
+
+ /* create() */
+ rc = pj_atomic_create(pool, 111, &atomic_var);
+ if (rc != 0) {
+ return -20;
+ }
+
+ /* get: check the value. */
+ if (pj_atomic_get(atomic_var) != 111)
+ return -30;
+
+ /* increment. */
+ if (pj_atomic_inc(atomic_var) != 112)
+ return -40;
+
+ /* decrement. */
+ if (pj_atomic_dec(atomic_var) != 111)
+ return -50;
+
+ /* set */
+ if (pj_atomic_set(atomic_var, 211) != 111)
+ return -60;
+
+ /* check the value again. */
+ if (pj_atomic_get(atomic_var) != 211)
+ return -70;
+
+ /* destroy */
+ rc = pj_atomic_destroy(atomic_var);
+ if (rc != 0)
+ return -80;
+
+ pj_pool_release(pool);
+
+ return 0;
+}
+
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_atomic_test;
+#endif /* INCLUDE_ATOMIC_TEST */
+
diff --git a/pjlib/src/pjlib-test/echo_clt.c b/pjlib/src/pjlib-test/echo_clt.c
new file mode 100644
index 00000000..565d5607
--- /dev/null
+++ b/pjlib/src/pjlib-test/echo_clt.c
@@ -0,0 +1,267 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/echo_clt.c 3 10/29/05 10:25p Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/echo_clt.c $
+ *
+ * 3 10/29/05 10:25p Bennylp
+ * Tested.
+ *
+ * 2 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 1 10/24/05 11:28a Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+#include <pjlib.h>
+
+#if INCLUDE_ECHO_CLIENT
+
+enum { BUF_SIZE = 512 };
+
+struct client
+{
+ int sock_type;
+ const char *server;
+ int port;
+};
+
+static pj_sem_t *sem;
+static pj_mutex_t *mutex;
+static pj_size_t total_bw;
+static unsigned total_poster;
+static pj_time_val first_report;
+
+#define MSEC_PRINT_DURATION 1000
+
+static int wait_socket(pj_sock_t sock, unsigned msec_timeout)
+{
+ pj_fd_set_t fdset;
+ pj_time_val timeout;
+
+ timeout.sec = 0;
+ timeout.msec = msec_timeout;
+ pj_time_val_normalize(&timeout);
+
+ PJ_FD_ZERO(&fdset);
+ PJ_FD_SET(sock, &fdset);
+
+ return pj_sock_select(1, &fdset, NULL, NULL, &timeout);
+}
+
+static int echo_client_thread(void *arg)
+{
+ pj_sock_t sock;
+ char send_buf[BUF_SIZE];
+ char recv_buf[BUF_SIZE];
+ pj_sockaddr_in addr;
+ pj_str_t s;
+ pj_status_t rc;
+ struct client *client = arg;
+ pj_status_t last_recv_err = PJ_SUCCESS, last_send_err = PJ_SUCCESS;
+
+ pj_time_val last_report, next_report;
+ pj_size_t thread_total;
+
+ rc = app_socket(PJ_AF_INET, client->sock_type, 0, -1, &sock);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...unable to create socket", rc);
+ return -10;
+ }
+
+ rc = pj_sockaddr_in_init( &addr, pj_cstr(&s, client->server),
+ (pj_uint16_t)client->port);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...unable to resolve server", rc);
+ return -15;
+ }
+
+ rc = pj_sock_connect(sock, &addr, sizeof(addr));
+ if (rc != PJ_SUCCESS) {
+ app_perror("...connect() error", rc);
+ pj_sock_close(sock);
+ return -20;
+ }
+
+ pj_create_random_string(send_buf, BUF_SIZE);
+ thread_total = 0;
+
+ /* Give other thread chance to initialize themselves! */
+ pj_thread_sleep(500);
+
+ pj_gettimeofday(&last_report);
+ next_report = first_report;
+
+ //PJ_LOG(3,("", "...thread %p running", pj_thread_this()));
+
+ for (;;) {
+ int rc;
+ pj_ssize_t bytes;
+ pj_time_val now;
+
+ /* Send a packet. */
+ bytes = BUF_SIZE;
+ rc = pj_sock_send(sock, send_buf, &bytes, 0);
+ if (rc != PJ_SUCCESS || bytes != BUF_SIZE) {
+ if (rc != last_send_err) {
+ app_perror("...send() error", rc);
+ PJ_LOG(3,("", "...ignoring subsequent error.."));
+ last_send_err = rc;
+ pj_thread_sleep(100);
+ }
+ continue;
+ }
+
+ rc = wait_socket(sock, 500);
+ if (rc == 0) {
+ PJ_LOG(3,("", "...timeout"));
+ } else {
+ /* Receive back the original packet. */
+ bytes = 0;
+ do {
+ pj_ssize_t received = BUF_SIZE - bytes;
+ rc = pj_sock_recv(sock, recv_buf+bytes, &received, 0);
+ if (rc != PJ_SUCCESS || received == 0) {
+ if (rc != last_recv_err) {
+ app_perror("...recv() error", rc);
+ PJ_LOG(3,("", "...ignoring subsequent error.."));
+ last_recv_err = rc;
+ pj_thread_sleep(100);
+ }
+ bytes = 0;
+ break;
+ }
+ bytes += received;
+ } while (bytes != BUF_SIZE);
+ }
+
+ /* Accumulate total received. */
+ thread_total = thread_total + bytes;
+
+ /* Report current bandwidth on due. */
+ pj_gettimeofday(&now);
+
+ if (PJ_TIME_VAL_GTE(now, next_report)) {
+ pj_uint32_t bw;
+ pj_bool_t signal_parent = 0;
+ pj_time_val duration;
+ pj_uint32_t msec;
+
+ duration = now;
+ PJ_TIME_VAL_SUB(duration, last_report);
+ msec = PJ_TIME_VAL_MSEC(duration);
+
+ bw = thread_total * 1000 / msec;
+
+ /* Post result to parent */
+ pj_mutex_lock(mutex);
+ total_bw += bw;
+ total_poster++;
+ //PJ_LOG(3,("", "...thread %p posting result", pj_thread_this()));
+ if (total_poster >= ECHO_CLIENT_MAX_THREADS)
+ signal_parent = 1;
+ pj_mutex_unlock(mutex);
+
+ thread_total = 0;
+ last_report = now;
+ next_report.sec++;
+
+ if (signal_parent) {
+ pj_sem_post(sem);
+ }
+
+ pj_thread_sleep(0);
+ }
+
+ if (bytes == 0)
+ continue;
+
+ if (pj_memcmp(send_buf, recv_buf, BUF_SIZE) != 0) {
+ PJ_LOG(3,("", "...error: buffer has changed!"));
+ break;
+ }
+ }
+
+ pj_sock_close(sock);
+ return 0;
+}
+
+int echo_client(int sock_type, const char *server, int port)
+{
+ pj_pool_t *pool;
+ pj_thread_t *thread[ECHO_CLIENT_MAX_THREADS];
+ pj_status_t rc;
+ struct client client;
+ int i;
+
+ client.sock_type = sock_type;
+ client.server = server;
+ client.port = port;
+
+ pool = pj_pool_create( mem, NULL, 4000, 4000, NULL );
+
+ rc = pj_sem_create(pool, NULL, 0, ECHO_CLIENT_MAX_THREADS+1, &sem);
+ if (rc != PJ_SUCCESS) {
+ PJ_LOG(3,("", "...error: unable to create semaphore", rc));
+ return -10;
+ }
+
+ rc = pj_mutex_create_simple(pool, NULL, &mutex);
+ if (rc != PJ_SUCCESS) {
+ PJ_LOG(3,("", "...error: unable to create mutex", rc));
+ return -20;
+ }
+
+ /*
+ rc = pj_atomic_create(pool, 0, &atom);
+ if (rc != PJ_SUCCESS) {
+ PJ_LOG(3,("", "...error: unable to create atomic variable", rc));
+ return -30;
+ }
+ */
+
+ PJ_LOG(3,("", "Echo client started"));
+ PJ_LOG(3,("", " Destination: %s:%d",
+ ECHO_SERVER_ADDRESS, ECHO_SERVER_START_PORT));
+ PJ_LOG(3,("", " Press Ctrl-C to exit"));
+
+ pj_gettimeofday(&first_report);
+ first_report.sec += 2;
+
+ for (i=0; i<ECHO_CLIENT_MAX_THREADS; ++i) {
+ rc = pj_thread_create( pool, NULL, &echo_client_thread, &client,
+ PJ_THREAD_DEFAULT_STACK_SIZE, 0,
+ &thread[i]);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: unable to create thread", rc);
+ return -10;
+ }
+ }
+
+ for (;;) {
+ pj_uint32_t bw;
+
+ pj_sem_wait(sem);
+
+ pj_mutex_lock(mutex);
+ bw = total_bw;
+ total_bw = 0;
+ total_poster = 0;
+ pj_mutex_unlock(mutex);
+
+ PJ_LOG(3,("", "...%d threads, total bandwidth: %d KB/s",
+ ECHO_CLIENT_MAX_THREADS, bw/1000));
+ }
+
+ for (i=0; i<ECHO_CLIENT_MAX_THREADS; ++i) {
+ pj_thread_join( thread[i] );
+ }
+
+ pj_pool_release(pool);
+ return 0;
+}
+
+
+#else
+int dummy_echo_client;
+#endif /* INCLUDE_ECHO_CLIENT */
diff --git a/pjlib/src/pjlib-test/echo_srv.c b/pjlib/src/pjlib-test/echo_srv.c
new file mode 100644
index 00000000..cee64309
--- /dev/null
+++ b/pjlib/src/pjlib-test/echo_srv.c
@@ -0,0 +1,331 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/echo_srv.c 3 10/29/05 10:23p Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/echo_srv.c $
+ *
+ * 3 10/29/05 10:23p Bennylp
+ * Changed ioqueue accept specification.
+ *
+ * 2 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 1 10/24/05 11:28a Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+#include <pjlib.h>
+#include <pj/compat/high_precision.h>
+
+#if INCLUDE_ECHO_SERVER
+
+static pj_bool_t thread_quit_flag;
+
+struct server
+{
+ pj_pool_t *pool;
+ int sock_type;
+ int thread_count;
+ pj_ioqueue_t *ioqueue;
+ pj_sock_t sock;
+ pj_sock_t client_sock;
+ pj_ioqueue_key_t *key;
+ pj_ioqueue_callback cb;
+ char *buf;
+ pj_size_t bufsize;
+ pj_sockaddr_in addr;
+ int addrlen;
+ pj_size_t bytes_recv;
+ pj_timestamp start_time;
+};
+
+static void on_read_complete(pj_ioqueue_key_t *key, pj_ssize_t bytes_read)
+{
+ struct server *server = pj_ioqueue_get_user_data(key);
+ pj_status_t rc;
+
+ if (server->sock_type == PJ_SOCK_DGRAM) {
+ if (bytes_read > 0) {
+ /* Send data back to sender. */
+ rc = pj_ioqueue_sendto( server->ioqueue, server->key,
+ server->buf, bytes_read, 0,
+ &server->addr, server->addrlen);
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ app_perror("...sendto() error", rc);
+ }
+ } else {
+ PJ_LOG(3,("", "...read error (bytes_read=%d)", bytes_read));
+ }
+
+ /* Start next receive. */
+ rc = pj_ioqueue_recvfrom( server->ioqueue, server->key,
+ server->buf, server->bufsize, 0,
+ &server->addr, &server->addrlen);
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ app_perror("...recvfrom() error", rc);
+ }
+
+ }
+ else if (server->sock_type == PJ_SOCK_STREAM) {
+ if (bytes_read > 0) {
+ /* Send data back to sender. */
+ rc = pj_ioqueue_send( server->ioqueue, server->key,
+ server->buf, bytes_read, 0);
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ app_perror("...send() error", rc);
+ bytes_read = 0;
+ }
+ }
+
+ if (bytes_read <= 0) {
+ PJ_LOG(3,("", "...tcp closed"));
+ pj_ioqueue_unregister( server->ioqueue, server->key );
+ pj_sock_close( server->sock );
+ pj_pool_release( server->pool );
+ return;
+ }
+
+ /* Start next receive. */
+ rc = pj_ioqueue_recv( server->ioqueue, server->key,
+ server->buf, server->bufsize, 0);
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ app_perror("...recv() error", rc);
+ }
+ }
+
+ /* Add counter. */
+ if (bytes_read > 0) {
+ if (server->bytes_recv == 0) {
+ pj_get_timestamp(&server->start_time);
+ server->bytes_recv += bytes_read;
+ } else {
+ enum { USECS_IN_SECOND = 1000000 };
+ pj_timestamp now;
+ pj_uint32_t usec_elapsed;
+
+ server->bytes_recv += bytes_read;
+
+ pj_get_timestamp(&now);
+ usec_elapsed = pj_elapsed_usec(&server->start_time, &now);
+ if (usec_elapsed > USECS_IN_SECOND) {
+ if (usec_elapsed < 2 * USECS_IN_SECOND) {
+ pj_highprec_t bw;
+ pj_uint32_t bw32;
+ const char *type_name;
+
+ /* bandwidth(bw) = server->bytes_recv * USECS/elapsed */
+ bw = server->bytes_recv;
+ pj_highprec_mul(bw, USECS_IN_SECOND);
+ pj_highprec_div(bw, usec_elapsed);
+
+ bw32 = (pj_uint32_t) bw;
+
+ if (server->sock_type==PJ_SOCK_STREAM)
+ type_name = "tcp";
+ else if (server->sock_type==PJ_SOCK_DGRAM)
+ type_name = "udp";
+ else
+ type_name = "???";
+
+ PJ_LOG(3,("",
+ "...[%s:%d (%d threads)] Current bandwidth=%u KBps",
+ type_name,
+ ECHO_SERVER_START_PORT+server->thread_count,
+ server->thread_count,
+ bw32/1024));
+ }
+ server->start_time = now;
+ server->bytes_recv = 0;
+ }
+ }
+ }
+}
+
+static void on_accept_complete( pj_ioqueue_key_t *key, pj_sock_t sock,
+ int status)
+{
+ struct server *server_server = pj_ioqueue_get_user_data(key);
+ pj_status_t rc;
+
+ PJ_UNUSED_ARG(sock);
+
+ if (status == 0) {
+ pj_pool_t *pool;
+ struct server *new_server;
+
+ pool = pj_pool_create(mem, NULL, 4000, 4000, NULL);
+ new_server = pj_pool_zalloc(pool, sizeof(struct server));
+
+ new_server->pool = pool;
+ new_server->ioqueue = server_server->ioqueue;
+ new_server->sock_type = server_server->sock_type;
+ new_server->thread_count = server_server->thread_count;
+ new_server->sock = server_server->client_sock;
+ new_server->bufsize = 4096;
+ new_server->buf = pj_pool_alloc(pool, new_server->bufsize);
+ new_server->cb = server_server->cb;
+
+ rc = pj_ioqueue_register_sock( new_server->pool, new_server->ioqueue,
+ new_server->sock, new_server,
+ &server_server->cb, &new_server->key);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...registering new tcp sock", rc);
+ pj_sock_close(new_server->sock);
+ pj_pool_release(pool);
+ thread_quit_flag = 1;
+ return;
+ }
+
+ rc = pj_ioqueue_recv( new_server->ioqueue, new_server->key,
+ new_server->buf, new_server->bufsize, 0);
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ app_perror("...recv() error", rc);
+ pj_sock_close(new_server->sock);
+ pj_pool_release(pool);
+ thread_quit_flag = 1;
+ return;
+ }
+ }
+
+ rc = pj_ioqueue_accept( server_server->ioqueue, server_server->key,
+ &server_server->client_sock,
+ NULL, NULL, NULL);
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ app_perror("...accept() error", rc);
+ thread_quit_flag = 1;
+ }
+}
+
+static int thread_proc(void *arg)
+{
+ pj_ioqueue_t *ioqueue = arg;
+
+ while (!thread_quit_flag) {
+ pj_time_val timeout;
+ int count;
+
+ timeout.sec = 0; timeout.msec = 50;
+ count = pj_ioqueue_poll( ioqueue, &timeout );
+ if (count > 0) {
+ count = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int start_echo_server( int sock_type, int port, int thread_count )
+{
+ pj_pool_t *pool;
+ struct server *server;
+ int i;
+ pj_status_t rc;
+
+
+ pool = pj_pool_create(mem, NULL, 4000, 4000, NULL);
+ if (!pool)
+ return -10;
+
+ server = pj_pool_zalloc(pool, sizeof(struct server));
+
+ server->sock_type = sock_type;
+ server->thread_count = thread_count;
+ server->cb.on_read_complete = &on_read_complete;
+ server->cb.on_accept_complete = &on_accept_complete;
+
+ /* create ioqueue */
+ rc = pj_ioqueue_create( pool, 32, thread_count, &server->ioqueue);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error creating ioqueue", rc);
+ return -20;
+ }
+
+ /* create and register socket to ioqueue. */
+ rc = app_socket(PJ_AF_INET, sock_type, 0, port, &server->sock);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error initializing socket", rc);
+ return -30;
+ }
+
+ rc = pj_ioqueue_register_sock( pool, server->ioqueue,
+ server->sock,
+ server, &server->cb,
+ &server->key);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error registering socket to ioqueue", rc);
+ return -40;
+ }
+
+ /* create receive buffer. */
+ server->bufsize = 4096;
+ server->buf = pj_pool_alloc(pool, server->bufsize);
+
+ if (sock_type == PJ_SOCK_DGRAM) {
+ server->addrlen = sizeof(server->addr);
+ rc = pj_ioqueue_recvfrom( server->ioqueue, server->key,
+ server->buf, server->bufsize,
+ 0,
+ &server->addr, &server->addrlen);
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ app_perror("...read error", rc);
+ return -50;
+ }
+ } else {
+ rc = pj_ioqueue_accept( server->ioqueue, server->key,
+ &server->client_sock, NULL, NULL, NULL );
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ app_perror("...accept() error", rc);
+ return -60;
+ }
+ }
+
+ /* create threads. */
+
+ for (i=0; i<thread_count; ++i) {
+ pj_thread_t *thread;
+ rc = pj_thread_create(pool, NULL, &thread_proc, server->ioqueue,
+ PJ_THREAD_DEFAULT_STACK_SIZE, 0, &thread);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...unable to create thread", rc);
+ return -70;
+ }
+ }
+
+ /* Done. */
+ return PJ_SUCCESS;
+}
+
+int echo_server(void)
+{
+ enum { MAX_THREADS = 4 };
+ int sock_types[2];
+ int i, j, rc;
+
+ sock_types[0] = PJ_SOCK_DGRAM;
+ sock_types[1] = PJ_SOCK_STREAM;
+
+ for (i=0; i<2; ++i) {
+ for (j=0; j<MAX_THREADS; ++j) {
+ rc = start_echo_server(sock_types[i], ECHO_SERVER_START_PORT+j, j+1);
+ if (rc != 0)
+ return rc;
+ }
+ }
+
+ pj_thread_sleep(100);
+ PJ_LOG(3,("", "Echo server started in port %d - %d",
+ ECHO_SERVER_START_PORT, ECHO_SERVER_START_PORT + MAX_THREADS));
+
+ PJ_LOG(3,("", "Press Ctrl-C to quit"));
+
+ for (;!thread_quit_flag;) {
+ pj_thread_sleep(1000);
+ }
+
+ return 0;
+}
+
+
+#else
+int dummy_echo_server;
+#endif /* INCLUDE_ECHO_SERVER */
+
diff --git a/pjlib/src/pjlib-test/errno.c b/pjlib/src/pjlib-test/errno.c
new file mode 100644
index 00000000..44f60ec7
--- /dev/null
+++ b/pjlib/src/pjlib-test/errno.c
@@ -0,0 +1,162 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/errno.c 4 10/14/05 3:05p Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/errno.c $
+ *
+ * 4 10/14/05 3:05p Bennylp
+ * Fixed warning about strlen() on Linux.
+ *
+ * 3 14/10/05 11:30 Bennylp
+ * Verify the error message.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/09/05 9:56p Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+#include <pj/errno.h>
+#include <pj/log.h>
+#include <pj/ctype.h>
+#include <pj/compat/socket.h>
+#include <pj/string.h>
+
+#if INCLUDE_ERRNO_TEST
+
+#define THIS_FILE "errno"
+
+#if defined(PJ_WIN32) && PJ_WIN32 != 0
+# include <windows.h>
+#endif
+
+#if defined(PJ_HAS_ERRNO_H) && PJ_HAS_ERRNO_H != 0
+# include <errno.h>
+#endif
+
+static void trim_newlines(char *s)
+{
+ while (*s) {
+ if (*s == '\r' || *s == '\n')
+ *s = ' ';
+ ++s;
+ }
+}
+
+int my_strncasecmp(const char *s1, const char *s2, int max_len)
+{
+ while (*s1 && *s2 && max_len > 0) {
+ if (pj_tolower(*s1) != pj_tolower(*s2))
+ return -1;
+ ++s1;
+ ++s2;
+ --max_len;
+ }
+ return 0;
+}
+
+const char *my_stristr(const char *whole, const char *part)
+{
+ int part_len = strlen(part);
+ while (*whole) {
+ if (my_strncasecmp(whole, part, part_len) == 0)
+ return whole;
+ ++whole;
+ }
+ return NULL;
+}
+
+int errno_test(void)
+{
+ enum { CUT = 6 };
+ pj_status_t rc;
+ char errbuf[256];
+
+ PJ_LOG(3,(THIS_FILE, "...errno test: check the msg carefully"));
+
+ /*
+ * Windows platform error.
+ */
+# ifdef ERROR_INVALID_DATA
+ rc = PJ_STATUS_FROM_OS(ERROR_INVALID_DATA);
+ pj_set_os_error(rc);
+
+ /* Whole */
+ pj_strerror(rc, errbuf, sizeof(errbuf));
+ trim_newlines(errbuf);
+ PJ_LOG(3,(THIS_FILE, "...msg for rc=ERROR_INVALID_DATA: '%s'", errbuf));
+ if (my_stristr(errbuf, "invalid") == NULL) {
+ PJ_LOG(3, (THIS_FILE,
+ "...error: expecting \"invalid\" string in the msg"));
+ return -20;
+ }
+
+ /* Cut version. */
+ pj_strerror(rc, errbuf, CUT);
+ PJ_LOG(3,(THIS_FILE, "...msg for rc=ERROR_INVALID_DATA (cut): '%s'", errbuf));
+# endif
+
+ /*
+ * Unix errors
+ */
+# ifdef EINVAL
+ rc = PJ_STATUS_FROM_OS(EINVAL);
+ pj_set_os_error(rc);
+
+ /* Whole */
+ pj_strerror(rc, errbuf, sizeof(errbuf));
+ trim_newlines(errbuf);
+ PJ_LOG(3,(THIS_FILE, "...msg for rc=EINVAL: '%s'", errbuf));
+ if (my_stristr(errbuf, "invalid") == NULL) {
+ PJ_LOG(3, (THIS_FILE,
+ "...error: expecting \"invalid\" string in the msg"));
+ return -30;
+ }
+
+ /* Cut */
+ pj_strerror(rc, errbuf, CUT);
+ PJ_LOG(3,(THIS_FILE, "...msg for rc=EINVAL (cut): '%s'", errbuf));
+# endif
+
+ /*
+ * Windows WSA errors
+ */
+# ifdef WSAEINVAL
+ rc = PJ_STATUS_FROM_OS(WSAEINVAL);
+ pj_set_os_error(rc);
+
+ /* Whole */
+ pj_strerror(rc, errbuf, sizeof(errbuf));
+ trim_newlines(errbuf);
+ PJ_LOG(3,(THIS_FILE, "...msg for rc=WSAEINVAL: '%s'", errbuf));
+ if (my_stristr(errbuf, "invalid") == NULL) {
+ PJ_LOG(3, (THIS_FILE,
+ "...error: expecting \"invalid\" string in the msg"));
+ return -40;
+ }
+
+ /* Cut */
+ pj_strerror(rc, errbuf, CUT);
+ PJ_LOG(3,(THIS_FILE, "...msg for rc=WSAEINVAL (cut): '%s'", errbuf));
+# endif
+
+ pj_strerror(PJ_EBUG, errbuf, sizeof(errbuf));
+ PJ_LOG(3,(THIS_FILE, "...msg for rc=PJ_EBUG: '%s'", errbuf));
+ if (my_stristr(errbuf, "BUG") == NULL) {
+ PJ_LOG(3, (THIS_FILE,
+ "...error: expecting \"BUG\" string in the msg"));
+ return -20;
+ }
+
+ pj_strerror(PJ_EBUG, errbuf, CUT);
+ PJ_LOG(3,(THIS_FILE, "...msg for rc=PJ_EBUG, cut at %d chars: '%s'",
+ CUT, errbuf));
+
+ return 0;
+}
+
+
+#endif /* INCLUDE_ERRNO_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/exception.c b/pjlib/src/pjlib-test/exception.c
new file mode 100644
index 00000000..2fe62e6e
--- /dev/null
+++ b/pjlib/src/pjlib-test/exception.c
@@ -0,0 +1,156 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/exception.c 2 10/14/05 12:26a Bennylp $
+ */
+#include "test.h"
+
+
+/**
+ * \page page_pjlib_exception_test Test: Exception Handling
+ *
+ * This file provides implementation of \b exception_test(). It tests the
+ * functionality of the exception handling API.
+ *
+ * @note This test use static ID not acquired through proper registration.
+ * This is not recommended, since it may create ID collissions.
+ *
+ * \section exception_test_sec Scope of the Test
+ *
+ * Some scenarios tested:
+ * - no exception situation
+ * - basic TRY/CATCH
+ * - multiple exception handlers
+ * - default handlers
+ *
+ *
+ * This file is <b>pjlib-test/exception.c</b>
+ *
+ * \include pjlib-test/exception.c
+ */
+
+
+#if INCLUDE_EXCEPTION_TEST
+
+#include <pjlib.h>
+
+#define ID_1 1
+#define ID_2 2
+
+static int throw_id_1(void)
+{
+ PJ_THROW( ID_1 );
+ return -1;
+}
+
+static int throw_id_2(void)
+{
+ PJ_THROW( ID_2 );
+ return -1;
+}
+
+
+static int test(void)
+{
+ PJ_USE_EXCEPTION;
+ int rc = 0;
+
+ /*
+ * No exception situation.
+ */
+ PJ_TRY {
+ rc = rc;
+ }
+ PJ_CATCH( ID_1 ) {
+ rc = -2;
+ }
+ PJ_DEFAULT {
+ rc = -3;
+ }
+ PJ_END;
+
+ if (rc != 0)
+ return rc;
+
+
+ /*
+ * Basic TRY/CATCH
+ */
+ PJ_TRY {
+ rc = throw_id_1();
+
+ // should not reach here.
+ rc = -10;
+ }
+ PJ_CATCH( ID_1 ) {
+ if (!rc) rc = 0;
+ }
+ PJ_DEFAULT {
+ if (!rc) rc = -20;
+ }
+ PJ_END;
+
+ if (rc != 0)
+ return rc;
+
+ /*
+ * Multiple exceptions handlers
+ */
+ PJ_TRY {
+ rc = throw_id_2();
+ // should not reach here.
+ rc = -25;
+ }
+ PJ_CATCH( ID_1 ) {
+ if (!rc) rc = -30;
+ }
+ PJ_CATCH( ID_2 ) {
+ if (!rc) rc = 0;
+ }
+ PJ_DEFAULT {
+ if (!rc) rc = -40;
+ }
+ PJ_END;
+
+ if (rc != 0)
+ return rc;
+
+ /*
+ * Test default handler.
+ */
+ PJ_TRY {
+ rc = throw_id_1();
+ // should not reach here
+ rc = -50;
+ }
+ PJ_CATCH( ID_2 ) {
+ if (!rc) rc = -60;
+ }
+ PJ_DEFAULT {
+ if (!rc) rc = 0;
+ }
+ PJ_END;
+
+ if (rc != 0)
+ return rc;
+
+ return 0;
+}
+
+int exception_test(void)
+{
+ int i, rc;
+ enum { LOOP = 10 };
+
+ for (i=0; i<LOOP; ++i) {
+ if ((rc=test()) != 0)
+ return rc;
+ }
+ return 0;
+}
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_exception_test;
+#endif /* INCLUDE_EXCEPTION_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/fifobuf.c b/pjlib/src/pjlib-test/fifobuf.c
new file mode 100644
index 00000000..9bb471b9
--- /dev/null
+++ b/pjlib/src/pjlib-test/fifobuf.c
@@ -0,0 +1,100 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/fifobuf.c 2 10/14/05 12:26a Bennylp $
+ */
+#include "test.h"
+
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_fifobuf_test;
+
+#if INCLUDE_FIFOBUF_TEST
+
+#include <pjlib.h>
+
+int fifobuf_test()
+{
+ enum { SIZE = 1024, MAX_ENTRIES = 128,
+ MIN_SIZE = 4, MAX_SIZE = 64,
+ LOOP=10000 };
+ pj_pool_t *pool;
+ pj_fifobuf_t fifo;
+ unsigned available = SIZE;
+ void *entries[MAX_ENTRIES];
+ void *buffer;
+ int i;
+
+ pool = pj_pool_create(mem, NULL, SIZE+256, 0, NULL);
+ if (!pool)
+ return -10;
+
+ buffer = pj_pool_alloc(pool, SIZE);
+ if (!buffer)
+ return -20;
+
+ pj_fifobuf_init (&fifo, buffer, SIZE);
+
+ // Test 1
+ for (i=0; i<LOOP*MAX_ENTRIES; ++i) {
+ int size;
+ int c, f;
+ c = i%2;
+ f = (i+1)%2;
+ do {
+ size = MIN_SIZE+(pj_rand() % MAX_SIZE);
+ entries[c] = pj_fifobuf_alloc (&fifo, size);
+ } while (entries[c] == 0);
+ if ( i!=0) {
+ pj_fifobuf_free(&fifo, entries[f]);
+ }
+ }
+ if (entries[(i+1)%2])
+ pj_fifobuf_free(&fifo, entries[(i+1)%2]);
+
+ if (pj_fifobuf_max_size(&fifo) < SIZE-4) {
+ pj_assert(0);
+ return -1;
+ }
+
+ // Test 2
+ entries[0] = pj_fifobuf_alloc (&fifo, MIN_SIZE);
+ if (!entries[0]) return -1;
+ for (i=0; i<LOOP*MAX_ENTRIES; ++i) {
+ int size = MIN_SIZE+(pj_rand() % MAX_SIZE);
+ entries[1] = pj_fifobuf_alloc (&fifo, size);
+ if (entries[1])
+ pj_fifobuf_unalloc(&fifo, entries[1]);
+ }
+ pj_fifobuf_unalloc(&fifo, entries[0]);
+ if (pj_fifobuf_max_size(&fifo) < SIZE-4) {
+ pj_assert(0);
+ return -2;
+ }
+
+ // Test 3
+ for (i=0; i<LOOP; ++i) {
+ int count, j;
+ for (count=0; available>=MIN_SIZE+4 && count < MAX_ENTRIES;) {
+ int size = MIN_SIZE+(pj_rand() % MAX_SIZE);
+ entries[count] = pj_fifobuf_alloc (&fifo, size);
+ if (entries[count]) {
+ available -= (size+4);
+ ++count;
+ }
+ }
+ for (j=0; j<count; ++j) {
+ pj_fifobuf_free (&fifo, entries[j]);
+ }
+ available = SIZE;
+ }
+
+ if (pj_fifobuf_max_size(&fifo) < SIZE-4) {
+ pj_assert(0);
+ return -3;
+ }
+ pj_pool_release(pool);
+ return 0;
+}
+
+#endif /* INCLUDE_FIFOBUF_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/ioq_perf.c b/pjlib/src/pjlib-test/ioq_perf.c
new file mode 100644
index 00000000..344b0c96
--- /dev/null
+++ b/pjlib/src/pjlib-test/ioq_perf.c
@@ -0,0 +1,466 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/ioq_perf.c 4 10/29/05 11:51a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/ioq_perf.c $
+ *
+ * 4 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 3 14/10/05 11:31 Bennylp
+ * More generalized test method, works for UDP too.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/11/05 3:52p Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+#include <pjlib.h>
+#include <pj/compat/high_precision.h>
+
+/**
+ * \page page_pjlib_ioqueue_perf_test Test: I/O Queue Performance
+ *
+ * Test the performance of the I/O queue, using typical producer
+ * consumer test. The test should examine the effect of using multiple
+ * threads on the performance.
+ *
+ * This file is <b>pjlib-test/ioq_perf.c</b>
+ *
+ * \include pjlib-test/ioq_perf.c
+ */
+
+#if INCLUDE_IOQUEUE_PERF_TEST
+
+#ifdef _MSC_VER
+# pragma warning ( disable: 4204) // non-constant aggregate initializer
+#endif
+
+#define THIS_FILE "ioq_perf"
+//#define TRACE_(expr) PJ_LOG(3,expr)
+#define TRACE_(expr)
+
+
+static pj_bool_t thread_quit_flag;
+static pj_status_t last_error;
+static unsigned last_error_counter;
+
+/* Descriptor for each producer/consumer pair. */
+typedef struct test_item
+{
+ pj_sock_t server_fd,
+ client_fd;
+ pj_ioqueue_t *ioqueue;
+ pj_ioqueue_key_t *server_key,
+ *client_key;
+ pj_size_t buffer_size;
+ char *outgoing_buffer;
+ char *incoming_buffer;
+ pj_size_t bytes_sent,
+ bytes_recv;
+} test_item;
+
+/* Callback when data has been read.
+ * Increment item->bytes_recv and ready to read the next data.
+ */
+static void on_read_complete(pj_ioqueue_key_t *key, pj_ssize_t bytes_read)
+{
+ test_item *item = pj_ioqueue_get_user_data(key);
+ pj_status_t rc;
+
+ //TRACE_((THIS_FILE, " read complete, bytes_read=%d", bytes_read));
+
+ if (thread_quit_flag)
+ return;
+
+ if (bytes_read < 0) {
+ pj_status_t rc = -bytes_read;
+ char errmsg[128];
+
+ if (rc != last_error) {
+ last_error = rc;
+ pj_strerror(rc, errmsg, sizeof(errmsg));
+ PJ_LOG(3,(THIS_FILE, "...error: read error, bytes_read=%d (%s)",
+ bytes_read, errmsg));
+ PJ_LOG(3,(THIS_FILE,
+ ".....additional info: total read=%u, total written=%u",
+ item->bytes_recv, item->bytes_sent));
+ } else {
+ last_error_counter++;
+ }
+ bytes_read = 0;
+
+ } else if (bytes_read == 0) {
+ PJ_LOG(3,(THIS_FILE, "...socket has closed!"));
+ }
+
+ item->bytes_recv += bytes_read;
+
+ /* To assure that the test quits, even if main thread
+ * doesn't have time to run.
+ */
+ if (item->bytes_recv > item->buffer_size * 10000)
+ thread_quit_flag = 1;
+
+ rc = pj_ioqueue_recv( item->ioqueue, item->server_key,
+ item->incoming_buffer, item->buffer_size, 0 );
+
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ if (rc != last_error) {
+ last_error = rc;
+ app_perror("...error: read error", rc);
+ } else {
+ last_error_counter++;
+ }
+ }
+}
+
+/* Callback when data has been written.
+ * Increment item->bytes_sent and write the next data.
+ */
+static void on_write_complete(pj_ioqueue_key_t *key, pj_ssize_t bytes_sent)
+{
+ test_item *item = pj_ioqueue_get_user_data(key);
+
+ //TRACE_((THIS_FILE, " write complete: sent = %d", bytes_sent));
+
+ if (thread_quit_flag)
+ return;
+
+ item->bytes_sent += bytes_sent;
+
+ if (bytes_sent <= 0) {
+ PJ_LOG(3,(THIS_FILE, "...error: sending stopped. bytes_sent=%d",
+ bytes_sent));
+ }
+ else {
+ pj_status_t rc;
+
+ rc = pj_ioqueue_write(item->ioqueue, item->client_key,
+ item->outgoing_buffer, item->buffer_size);
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ app_perror("...error: write error", rc);
+ }
+ }
+}
+
+/* The worker thread. */
+static int worker_thread(void *arg)
+{
+ pj_ioqueue_t *ioqueue = arg;
+ const pj_time_val timeout = {0, 100};
+ int rc;
+
+ while (!thread_quit_flag) {
+ rc = pj_ioqueue_poll(ioqueue, &timeout);
+ //TRACE_((THIS_FILE, " thread: poll returned rc=%d", rc));
+ if (rc < 0) {
+ app_perror("...error in pj_ioqueue_poll()", pj_get_netos_error());
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/* Calculate the bandwidth for the specific test configuration.
+ * The test is simple:
+ * - create sockpair_cnt number of producer-consumer socket pair.
+ * - create thread_cnt number of worker threads.
+ * - each producer will send buffer_size bytes data as fast and
+ * as soon as it can.
+ * - each consumer will read buffer_size bytes of data as fast
+ * as it could.
+ * - measure the total bytes received by all consumers during a
+ * period of time.
+ */
+static int perform_test(int sock_type, const char *type_name,
+ unsigned thread_cnt, unsigned sockpair_cnt,
+ pj_size_t buffer_size,
+ pj_size_t *p_bandwidth)
+{
+ enum { MSEC_DURATION = 5000 };
+ pj_pool_t *pool;
+ test_item *items;
+ pj_thread_t **thread;
+ pj_ioqueue_t *ioqueue;
+ pj_status_t rc;
+ pj_ioqueue_callback ioqueue_callback;
+ pj_uint32_t total_elapsed_usec, total_received;
+ pj_highprec_t bandwidth;
+ pj_timestamp start, stop;
+ unsigned i;
+
+ TRACE_((THIS_FILE, " starting test.."));
+
+ ioqueue_callback.on_read_complete = &on_read_complete;
+ ioqueue_callback.on_write_complete = &on_write_complete;
+
+ thread_quit_flag = 0;
+
+ pool = pj_pool_create(mem, NULL, 4096, 4096, NULL);
+ if (!pool)
+ return -10;
+
+ items = pj_pool_alloc(pool, sockpair_cnt*sizeof(test_item));
+ thread = pj_pool_alloc(pool, thread_cnt*sizeof(pj_thread_t*));
+
+ TRACE_((THIS_FILE, " creating ioqueue.."));
+ rc = pj_ioqueue_create(pool, sockpair_cnt*2, thread_cnt, &ioqueue);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: unable to create ioqueue", rc);
+ return -15;
+ }
+
+ /* Initialize each producer-consumer pair. */
+ for (i=0; i<sockpair_cnt; ++i) {
+
+ items[i].ioqueue = ioqueue;
+ items[i].buffer_size = buffer_size;
+ items[i].outgoing_buffer = pj_pool_alloc(pool, buffer_size);
+ items[i].incoming_buffer = pj_pool_alloc(pool, buffer_size);
+ items[i].bytes_recv = items[i].bytes_sent = 0;
+
+ /* randomize outgoing buffer. */
+ pj_create_random_string(items[i].outgoing_buffer, buffer_size);
+
+ /* Create socket pair. */
+ TRACE_((THIS_FILE, " calling socketpair.."));
+ rc = app_socketpair(PJ_AF_INET, sock_type, 0,
+ &items[i].server_fd, &items[i].client_fd);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: unable to create socket pair", rc);
+ return -20;
+ }
+
+ /* Register server socket to ioqueue. */
+ TRACE_((THIS_FILE, " register(1).."));
+ rc = pj_ioqueue_register_sock(pool, ioqueue,
+ items[i].server_fd,
+ &items[i], &ioqueue_callback,
+ &items[i].server_key);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: registering server socket to ioqueue", rc);
+ return -60;
+ }
+
+ /* Register client socket to ioqueue. */
+ TRACE_((THIS_FILE, " register(2).."));
+ rc = pj_ioqueue_register_sock(pool, ioqueue,
+ items[i].client_fd,
+ &items[i], &ioqueue_callback,
+ &items[i].client_key);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: registering server socket to ioqueue", rc);
+ return -70;
+ }
+
+ /* Start reading. */
+ TRACE_((THIS_FILE, " pj_ioqueue_recv.."));
+ rc = pj_ioqueue_recv(ioqueue, items[i].server_key,
+ items[i].incoming_buffer, items[i].buffer_size,
+ 0);
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ app_perror("...error: pj_ioqueue_recv", rc);
+ return -73;
+ }
+
+ /* Start writing. */
+ TRACE_((THIS_FILE, " pj_ioqueue_write.."));
+ rc = pj_ioqueue_write(ioqueue, items[i].client_key,
+ items[i].outgoing_buffer, items[i].buffer_size);
+ if (rc != PJ_SUCCESS && rc != PJ_EPENDING) {
+ app_perror("...error: pj_ioqueue_write", rc);
+ return -76;
+ }
+
+ }
+
+ /* Create the threads. */
+ for (i=0; i<thread_cnt; ++i) {
+ rc = pj_thread_create( pool, NULL,
+ &worker_thread,
+ ioqueue,
+ PJ_THREAD_DEFAULT_STACK_SIZE,
+ PJ_THREAD_SUSPENDED, &thread[i] );
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: unable to create thread", rc);
+ return -80;
+ }
+ }
+
+ /* Mark start time. */
+ rc = pj_get_timestamp(&start);
+ if (rc != PJ_SUCCESS)
+ return -90;
+
+ /* Start the thread. */
+ TRACE_((THIS_FILE, " resuming all threads.."));
+ for (i=0; i<thread_cnt; ++i) {
+ rc = pj_thread_resume(thread[i]);
+ if (rc != 0)
+ return -100;
+ }
+
+ /* Wait for MSEC_DURATION seconds.
+ * This should be as simple as pj_thread_sleep(MSEC_DURATION) actually,
+ * but unfortunately it doesn't work when system doesn't employ
+ * timeslicing for threads.
+ */
+ TRACE_((THIS_FILE, " wait for few seconds.."));
+ do {
+ pj_thread_sleep(1);
+
+ /* Mark end time. */
+ rc = pj_get_timestamp(&stop);
+
+ if (thread_quit_flag) {
+ TRACE_((THIS_FILE, " transfer limit reached.."));
+ break;
+ }
+
+ if (pj_elapsed_usec(&start,&stop)<MSEC_DURATION * 1000) {
+ TRACE_((THIS_FILE, " time limit reached.."));
+ break;
+ }
+
+ } while (1);
+
+ /* Terminate all threads. */
+ TRACE_((THIS_FILE, " terminating all threads.."));
+ thread_quit_flag = 1;
+
+ for (i=0; i<thread_cnt; ++i) {
+ TRACE_((THIS_FILE, " join thread %d..", i));
+ pj_thread_join(thread[i]);
+ pj_thread_destroy(thread[i]);
+ }
+
+ /* Close all sockets. */
+ TRACE_((THIS_FILE, " closing all sockets.."));
+ for (i=0; i<sockpair_cnt; ++i) {
+ pj_ioqueue_unregister(ioqueue, items[i].server_key);
+ pj_ioqueue_unregister(ioqueue, items[i].client_key);
+ pj_sock_close(items[i].server_fd);
+ pj_sock_close(items[i].client_fd);
+ }
+
+ /* Destroy ioqueue. */
+ TRACE_((THIS_FILE, " destroying ioqueue.."));
+ pj_ioqueue_destroy(ioqueue);
+
+ /* Calculate actual time in usec. */
+ total_elapsed_usec = pj_elapsed_usec(&start, &stop);
+
+ /* Calculate total bytes received. */
+ total_received = 0;
+ for (i=0; i<sockpair_cnt; ++i) {
+ total_received = items[i].bytes_recv;
+ }
+
+ /* bandwidth = total_received*1000/total_elapsed_usec */
+ bandwidth = total_received;
+ pj_highprec_mul(bandwidth, 1000);
+ pj_highprec_div(bandwidth, total_elapsed_usec);
+
+ *p_bandwidth = (pj_uint32_t)bandwidth;
+
+ PJ_LOG(3,(THIS_FILE, " %.4s %d %d %3d us %8d KB/s",
+ type_name, thread_cnt, sockpair_cnt,
+ -1 /*total_elapsed_usec/sockpair_cnt*/,
+ *p_bandwidth));
+
+ /* Done. */
+ pj_pool_release(pool);
+
+ TRACE_((THIS_FILE, " done.."));
+ return 0;
+}
+
+/*
+ * main test entry.
+ */
+int ioqueue_perf_test(void)
+{
+ enum { BUF_SIZE = 512 };
+ int i, rc;
+ struct {
+ int type;
+ const char *type_name;
+ int thread_cnt;
+ int sockpair_cnt;
+ } test_param[] =
+ {
+ { PJ_SOCK_DGRAM, "udp", 1, 1},
+ { PJ_SOCK_DGRAM, "udp", 1, 2},
+ { PJ_SOCK_DGRAM, "udp", 1, 4},
+ { PJ_SOCK_DGRAM, "udp", 1, 8},
+ { PJ_SOCK_DGRAM, "udp", 2, 1},
+ { PJ_SOCK_DGRAM, "udp", 2, 2},
+ { PJ_SOCK_DGRAM, "udp", 2, 4},
+ { PJ_SOCK_DGRAM, "udp", 2, 8},
+ { PJ_SOCK_DGRAM, "udp", 4, 1},
+ { PJ_SOCK_DGRAM, "udp", 4, 2},
+ { PJ_SOCK_DGRAM, "udp", 4, 4},
+ { PJ_SOCK_DGRAM, "udp", 4, 8},
+ { PJ_SOCK_STREAM, "tcp", 1, 1},
+ { PJ_SOCK_STREAM, "tcp", 1, 2},
+ { PJ_SOCK_STREAM, "tcp", 1, 4},
+ { PJ_SOCK_STREAM, "tcp", 1, 8},
+ { PJ_SOCK_STREAM, "tcp", 2, 1},
+ { PJ_SOCK_STREAM, "tcp", 2, 2},
+ { PJ_SOCK_STREAM, "tcp", 2, 4},
+ { PJ_SOCK_STREAM, "tcp", 2, 8},
+ { PJ_SOCK_STREAM, "tcp", 4, 1},
+ { PJ_SOCK_STREAM, "tcp", 4, 2},
+ { PJ_SOCK_STREAM, "tcp", 4, 4},
+ { PJ_SOCK_STREAM, "tcp", 4, 8},
+ };
+ pj_size_t best_bandwidth;
+ int best_index = 0;
+
+ PJ_LOG(3,(THIS_FILE, " Benchmarking ioqueue:"));
+ PJ_LOG(3,(THIS_FILE, " ==============================================="));
+ PJ_LOG(3,(THIS_FILE, " Type Threads Skt.Pairs Avg.Time Bandwidth"));
+ PJ_LOG(3,(THIS_FILE, " ==============================================="));
+
+ best_bandwidth = 0;
+ for (i=0; i<sizeof(test_param)/sizeof(test_param[0]); ++i) {
+ pj_size_t bandwidth;
+
+ rc = perform_test(test_param[i].type,
+ test_param[i].type_name,
+ test_param[i].thread_cnt,
+ test_param[i].sockpair_cnt,
+ BUF_SIZE,
+ &bandwidth);
+ if (rc != 0)
+ return rc;
+
+ if (bandwidth > best_bandwidth)
+ best_bandwidth = bandwidth, best_index = i;
+
+ /* Give it a rest before next test. */
+ pj_thread_sleep(500);
+ }
+
+ PJ_LOG(3,(THIS_FILE,
+ " Best: Type=%s Threads=%d, Skt.Pairs=%d, Bandwidth=%u KB/s",
+ test_param[best_index].type_name,
+ test_param[best_index].thread_cnt,
+ test_param[best_index].sockpair_cnt,
+ best_bandwidth));
+ PJ_LOG(3,(THIS_FILE, " (Note: packet size=%d, total errors=%u)",
+ BUF_SIZE, last_error_counter));
+ return 0;
+}
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_uiq_perf_test;
+#endif /* INCLUDE_IOQUEUE_PERF_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/ioq_tcp.c b/pjlib/src/pjlib-test/ioq_tcp.c
new file mode 100644
index 00000000..434c25ae
--- /dev/null
+++ b/pjlib/src/pjlib-test/ioq_tcp.c
@@ -0,0 +1,474 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/ioq_tcp.c 4 10/29/05 10:23p Bennylp $
+ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/ioq_tcp.c $
+ *
+ * 4 10/29/05 10:23p Bennylp
+ * Fixed no-memory exception.
+ *
+ * 3 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ */
+#include "test.h"
+
+/**
+ * \page page_pjlib_ioqueue_tcp_test Test: I/O Queue (TCP)
+ *
+ * This file provides implementation to test the
+ * functionality of the I/O queue when TCP socket is used.
+ *
+ *
+ * This file is <b>pjlib-test/ioq_tcp.c</b>
+ *
+ * \include pjlib-test/ioq_tcp.c
+ */
+
+
+#if INCLUDE_TCP_IOQUEUE_TEST
+
+#include <pjlib.h>
+
+#if PJ_HAS_TCP
+
+#define THIS_FILE "test_tcp"
+#define PORT 50000
+#define NON_EXISTANT_PORT 50123
+#define LOOP 100
+#define BUF_MIN_SIZE 32
+#define BUF_MAX_SIZE 2048
+#define SOCK_INACTIVE_MIN (4-2)
+#define SOCK_INACTIVE_MAX (PJ_IOQUEUE_MAX_HANDLES - 2)
+#define POOL_SIZE (2*BUF_MAX_SIZE + SOCK_INACTIVE_MAX*128 + 2048)
+
+static pj_ssize_t callback_read_size,
+ callback_write_size,
+ callback_accept_status,
+ callback_connect_status;
+static pj_ioqueue_key_t*callback_read_key,
+ *callback_write_key,
+ *callback_accept_key,
+ *callback_connect_key;
+
+static void on_ioqueue_read(pj_ioqueue_key_t *key, pj_ssize_t bytes_read)
+{
+ callback_read_key = key;
+ callback_read_size = bytes_read;
+}
+
+static void on_ioqueue_write(pj_ioqueue_key_t *key, pj_ssize_t bytes_written)
+{
+ callback_write_key = key;
+ callback_write_size = bytes_written;
+}
+
+static void on_ioqueue_accept(pj_ioqueue_key_t *key, pj_sock_t sock,
+ int status)
+{
+ PJ_UNUSED_ARG(sock);
+
+ callback_accept_key = key;
+ callback_accept_status = status;
+}
+
+static void on_ioqueue_connect(pj_ioqueue_key_t *key, int status)
+{
+ callback_connect_key = key;
+ callback_connect_status = status;
+}
+
+static pj_ioqueue_callback test_cb =
+{
+ &on_ioqueue_read,
+ &on_ioqueue_write,
+ &on_ioqueue_accept,
+ &on_ioqueue_connect,
+};
+
+static int send_recv_test(pj_ioqueue_t *ioque,
+ pj_ioqueue_key_t *skey,
+ pj_ioqueue_key_t *ckey,
+ void *send_buf,
+ void *recv_buf,
+ pj_ssize_t bufsize,
+ pj_timestamp *t_elapsed)
+{
+ int rc;
+ pj_ssize_t bytes;
+ pj_timestamp t1, t2;
+ int pending_op = 0;
+
+ // Start reading on the server side.
+ rc = pj_ioqueue_read(ioque, skey, recv_buf, bufsize);
+ if (rc != 0 && rc != PJ_EPENDING) {
+ return -100;
+ }
+
+ ++pending_op;
+
+ // Randomize send buffer.
+ pj_create_random_string((char*)send_buf, bufsize);
+
+ // Starts send on the client side.
+ bytes = pj_ioqueue_write(ioque, ckey, send_buf, bufsize);
+ if (bytes != bufsize && bytes != PJ_EPENDING) {
+ return -120;
+ }
+ if (bytes == PJ_EPENDING) {
+ ++pending_op;
+ }
+
+ // Begin time.
+ pj_get_timestamp(&t1);
+
+ // Reset indicators
+ callback_read_size = callback_write_size = 0;
+ callback_read_key = callback_write_key = NULL;
+
+ // Poll the queue until we've got completion event in the server side.
+ rc = 0;
+ while (pending_op > 0) {
+ rc = pj_ioqueue_poll(ioque, NULL);
+ if (rc > 0) {
+ if (callback_read_size) {
+ if (callback_read_size != bufsize) {
+ return -160;
+ }
+ if (callback_read_key != skey)
+ return -161;
+ }
+ if (callback_write_size) {
+ if (callback_write_key != ckey)
+ return -162;
+ }
+ pending_op -= rc;
+ }
+ if (rc < 0) {
+ return -170;
+ }
+ }
+
+ // End time.
+ pj_get_timestamp(&t2);
+ t_elapsed->u32.lo += (t2.u32.lo - t1.u32.lo);
+
+ if (rc < 0) {
+ return -150;
+ }
+
+ // Compare recv buffer with send buffer.
+ if (pj_memcmp(send_buf, recv_buf, bufsize) != 0) {
+ return -180;
+ }
+
+ // Success
+ return 0;
+}
+
+
+/*
+ * Compliance test for success scenario.
+ */
+static int compliance_test_0(void)
+{
+ pj_sock_t ssock=-1, csock0=-1, csock1=-1;
+ pj_sockaddr_in addr, client_addr, rmt_addr;
+ int client_addr_len;
+ pj_pool_t *pool = NULL;
+ char *send_buf, *recv_buf;
+ pj_ioqueue_t *ioque = NULL;
+ pj_ioqueue_key_t *skey, *ckey0, *ckey1;
+ int bufsize = BUF_MIN_SIZE;
+ pj_ssize_t status = -1;
+ int pending_op = 0;
+ pj_timestamp t_elapsed;
+ pj_str_t s;
+ pj_status_t rc;
+
+ // Create pool.
+ pool = pj_pool_create(mem, NULL, POOL_SIZE, 4000, NULL);
+
+ // Allocate buffers for send and receive.
+ send_buf = (char*)pj_pool_alloc(pool, bufsize);
+ recv_buf = (char*)pj_pool_alloc(pool, bufsize);
+
+ // Create server socket and client socket for connecting
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_STREAM, 0, &ssock);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error creating socket", rc);
+ status=-1; goto on_error;
+ }
+
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_STREAM, 0, &csock1);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error creating socket", rc);
+ status=-1; goto on_error;
+ }
+
+ // Bind server socket.
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = PJ_AF_INET;
+ addr.sin_port = pj_htons(PORT);
+ if (pj_sock_bind(ssock, &addr, sizeof(addr))) {
+ app_perror("...bind error", rc);
+ status=-10; goto on_error;
+ }
+
+ // Create I/O Queue.
+ rc = pj_ioqueue_create(pool, PJ_IOQUEUE_MAX_HANDLES, 0, &ioque);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...ERROR in pj_ioqueue_create()", rc);
+ status=-20; goto on_error;
+ }
+
+ // Register server socket and client socket.
+ rc = pj_ioqueue_register_sock(pool, ioque, ssock, NULL, &test_cb, &skey);
+ if (rc == PJ_SUCCESS)
+ rc = pj_ioqueue_register_sock(pool, ioque, csock1, NULL, &test_cb,
+ &ckey1);
+ else
+ ckey1 = NULL;
+ if (rc != PJ_SUCCESS) {
+ app_perror("...ERROR in pj_ioqueue_register_sock()", rc);
+ status=-23; goto on_error;
+ }
+
+ // Server socket listen().
+ if (pj_sock_listen(ssock, 5)) {
+ app_perror("...ERROR in pj_sock_listen()", rc);
+ status=-25; goto on_error;
+ }
+
+ // Server socket accept()
+ client_addr_len = sizeof(pj_sockaddr_in);
+ status = pj_ioqueue_accept(ioque, skey, &csock0, &client_addr, &rmt_addr, &client_addr_len);
+ if (status != PJ_EPENDING) {
+ app_perror("...ERROR in pj_ioqueue_accept()", rc);
+ status=-30; goto on_error;
+ }
+ if (status==PJ_EPENDING) {
+ ++pending_op;
+ }
+
+ // Initialize remote address.
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = PJ_AF_INET;
+ addr.sin_port = pj_htons(PORT);
+ addr.sin_addr = pj_inet_addr(pj_cstr(&s, "127.0.0.1"));
+
+ // Client socket connect()
+ status = pj_ioqueue_connect(ioque, ckey1, &addr, sizeof(addr));
+ if (status!=PJ_SUCCESS && status != PJ_EPENDING) {
+ app_perror("...ERROR in pj_ioqueue_connect()", rc);
+ status=-40; goto on_error;
+ }
+ if (status==PJ_EPENDING) {
+ ++pending_op;
+ }
+
+ // Poll until connected
+ callback_read_size = callback_write_size = 0;
+ callback_accept_status = callback_connect_status = -2;
+
+ callback_read_key = callback_write_key =
+ callback_accept_key = callback_connect_key = NULL;
+
+ while (pending_op) {
+ pj_time_val timeout = {1, 0};
+
+ status=pj_ioqueue_poll(ioque, &timeout);
+ if (status > 0) {
+ if (callback_accept_status != -2) {
+ if (callback_accept_status != 0) {
+ status=-41; goto on_error;
+ }
+ if (callback_accept_key != skey) {
+ status=-41; goto on_error;
+ }
+ }
+
+ if (callback_connect_status != -2) {
+ if (callback_connect_status != 0) {
+ status=-50; goto on_error;
+ }
+ if (callback_connect_key != ckey1) {
+ status=-51; goto on_error;
+ }
+ }
+
+ pending_op -= status;
+
+ if (pending_op == 0) {
+ status = 0;
+ }
+ }
+ }
+
+ // Check accepted socket.
+ if (csock0 == PJ_INVALID_SOCKET) {
+ status = -69;
+ app_perror("...accept() error", pj_get_os_error());
+ goto on_error;
+ }
+
+ // Register newly accepted socket.
+ rc = pj_ioqueue_register_sock(pool, ioque, csock0, NULL,
+ &test_cb, &ckey0);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...ERROR in pj_ioqueue_register_sock", rc);
+ status = -70;
+ goto on_error;
+ }
+
+ // Test send and receive.
+ t_elapsed.u32.lo = 0;
+ status = send_recv_test(ioque, ckey0, ckey1, send_buf, recv_buf, bufsize, &t_elapsed);
+ if (status != 0) {
+ goto on_error;
+ }
+
+ // Success
+ status = 0;
+
+on_error:
+ if (ssock != PJ_INVALID_SOCKET)
+ pj_sock_close(ssock);
+ if (csock1 != PJ_INVALID_SOCKET)
+ pj_sock_close(csock1);
+ if (csock0 != PJ_INVALID_SOCKET)
+ pj_sock_close(csock0);
+ if (ioque != NULL)
+ pj_ioqueue_destroy(ioque);
+ pj_pool_release(pool);
+ return status;
+
+}
+
+/*
+ * Compliance test for failed scenario.
+ * In this case, the client connects to a non-existant service.
+ */
+static int compliance_test_1(void)
+{
+ pj_sock_t csock1=-1;
+ pj_sockaddr_in addr;
+ pj_pool_t *pool = NULL;
+ pj_ioqueue_t *ioque = NULL;
+ pj_ioqueue_key_t *ckey1;
+ pj_ssize_t status = -1;
+ int pending_op = 0;
+ pj_str_t s;
+ pj_status_t rc;
+
+ // Create pool.
+ pool = pj_pool_create(mem, NULL, POOL_SIZE, 4000, NULL);
+
+ // Create I/O Queue.
+ rc = pj_ioqueue_create(pool, PJ_IOQUEUE_MAX_HANDLES, 0, &ioque);
+ if (!ioque) {
+ status=-20; goto on_error;
+ }
+
+ // Create client socket
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_STREAM, 0, &csock1);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...ERROR in pj_sock_socket()", rc);
+ status=-1; goto on_error;
+ }
+
+ // Register client socket.
+ rc = pj_ioqueue_register_sock(pool, ioque, csock1, NULL,
+ &test_cb, &ckey1);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...ERROR in pj_ioqueue_register_sock()", rc);
+ status=-23; goto on_error;
+ }
+
+ // Initialize remote address.
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = PJ_AF_INET;
+ addr.sin_port = pj_htons(NON_EXISTANT_PORT);
+ addr.sin_addr = pj_inet_addr(pj_cstr(&s, "127.0.0.1"));
+
+ // Client socket connect()
+ status = pj_ioqueue_connect(ioque, ckey1, &addr, sizeof(addr));
+ if (status==PJ_SUCCESS) {
+ // unexpectedly success!
+ status = -30;
+ goto on_error;
+ }
+ if (status != PJ_EPENDING) {
+ // success
+ } else {
+ ++pending_op;
+ }
+
+ callback_connect_status = -2;
+ callback_connect_key = NULL;
+
+ // Poll until we've got result
+ while (pending_op) {
+ pj_time_val timeout = {1, 0};
+
+ status=pj_ioqueue_poll(ioque, &timeout);
+ if (status > 0) {
+ if (callback_connect_key==ckey1) {
+ if (callback_connect_status == 0) {
+ // unexpectedly connected!
+ status = -50;
+ goto on_error;
+ }
+ }
+
+ pending_op -= status;
+ if (pending_op == 0) {
+ status = 0;
+ }
+ }
+ }
+
+ // Success
+ status = 0;
+
+on_error:
+ if (csock1 != PJ_INVALID_SOCKET)
+ pj_sock_close(csock1);
+ if (ioque != NULL)
+ pj_ioqueue_destroy(ioque);
+ pj_pool_release(pool);
+ return status;
+}
+
+int tcp_ioqueue_test()
+{
+ int status;
+
+ PJ_LOG(3, (THIS_FILE, "..compliance test 0 (success scenario)"));
+ if ((status=compliance_test_0()) != 0) {
+ PJ_LOG(1, (THIS_FILE, "....FAILED (status=%d)\n", status));
+ return status;
+ }
+ PJ_LOG(3, (THIS_FILE, "..compliance test 1 (failed scenario)"));
+ if ((status=compliance_test_1()) != 0) {
+ PJ_LOG(1, (THIS_FILE, "....FAILED (status=%d)\n", status));
+ return status;
+ }
+
+ return 0;
+}
+
+#endif /* PJ_HAS_TCP */
+
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_uiq_tcp;
+#endif /* INCLUDE_TCP_IOQUEUE_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/ioq_udp.c b/pjlib/src/pjlib-test/ioq_udp.c
new file mode 100644
index 00000000..8b95782a
--- /dev/null
+++ b/pjlib/src/pjlib-test/ioq_udp.c
@@ -0,0 +1,664 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/ioq_udp.c 4 10/29/05 10:23p Bennylp $
+ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/ioq_udp.c $
+ *
+ * 4 10/29/05 10:23p Bennylp
+ * Fixed no-memory exception.
+ *
+ * 3 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ */
+#include "test.h"
+
+
+/**
+ * \page page_pjlib_ioqueue_udp_test Test: I/O Queue (UDP)
+ *
+ * This file provides implementation to test the
+ * functionality of the I/O queue when UDP socket is used.
+ *
+ *
+ * This file is <b>pjlib-test/ioq_udp.c</b>
+ *
+ * \include pjlib-test/ioq_udp.c
+ */
+
+
+#if INCLUDE_UDP_IOQUEUE_TEST
+
+#include <pjlib.h>
+
+#include <pj/compat/socket.h>
+
+#define THIS_FILE "test_udp"
+#define PORT 51233
+#define LOOP 100
+#define BUF_MIN_SIZE 32
+#define BUF_MAX_SIZE 2048
+#define SOCK_INACTIVE_MIN (1)
+#define SOCK_INACTIVE_MAX (PJ_IOQUEUE_MAX_HANDLES - 2)
+#define POOL_SIZE (2*BUF_MAX_SIZE + SOCK_INACTIVE_MAX*128 + 2048)
+
+#undef TRACE_
+#define TRACE_(msg) PJ_LOG(3,(THIS_FILE,"....." msg))
+
+static pj_ssize_t callback_read_size,
+ callback_write_size,
+ callback_accept_status,
+ callback_connect_status;
+static pj_ioqueue_key_t *callback_read_key,
+ *callback_write_key,
+ *callback_accept_key,
+ *callback_connect_key;
+
+static void on_ioqueue_read(pj_ioqueue_key_t *key, pj_ssize_t bytes_read)
+{
+ callback_read_key = key;
+ callback_read_size = bytes_read;
+}
+
+static void on_ioqueue_write(pj_ioqueue_key_t *key, pj_ssize_t bytes_written)
+{
+ callback_write_key = key;
+ callback_write_size = bytes_written;
+}
+
+static void on_ioqueue_accept(pj_ioqueue_key_t *key, pj_sock_t sock, int status)
+{
+ PJ_UNUSED_ARG(sock);
+ callback_accept_key = key;
+ callback_accept_status = status;
+}
+
+static void on_ioqueue_connect(pj_ioqueue_key_t *key, int status)
+{
+ callback_connect_key = key;
+ callback_connect_status = status;
+}
+
+static pj_ioqueue_callback test_cb =
+{
+ &on_ioqueue_read,
+ &on_ioqueue_write,
+ &on_ioqueue_accept,
+ &on_ioqueue_connect,
+};
+
+#ifdef PJ_WIN32
+# define S_ADDR S_un.S_addr
+#else
+# define S_ADDR s_addr
+#endif
+
+/*
+ * native_format_test()
+ * This is just a simple test to verify that various structures in sock.h
+ * are really compatible with operating system's definitions.
+ */
+static int native_format_test(void)
+{
+ pj_status_t rc;
+
+ // Test that PJ_INVALID_SOCKET is working.
+ {
+ pj_sock_t sock;
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_STREAM, -1, &sock);
+ if (rc == PJ_SUCCESS)
+ return -1020;
+ }
+
+ // Previous func will set errno var.
+ pj_set_os_error(PJ_SUCCESS);
+
+ return 0;
+}
+
+/*
+ * compliance_test()
+ * To test that the basic IOQueue functionality works. It will just exchange
+ * data between two sockets.
+ */
+static int compliance_test(void)
+{
+ pj_sock_t ssock=-1, csock=-1;
+ pj_sockaddr_in addr;
+ int addrlen;
+ pj_pool_t *pool = NULL;
+ char *send_buf, *recv_buf;
+ pj_ioqueue_t *ioque = NULL;
+ pj_ioqueue_key_t *skey, *ckey;
+ int bufsize = BUF_MIN_SIZE;
+ pj_ssize_t bytes, status = -1;
+ pj_str_t temp;
+ pj_bool_t send_pending, recv_pending;
+ pj_status_t rc;
+
+ pj_set_os_error(PJ_SUCCESS);
+
+ // Create pool.
+ pool = pj_pool_create(mem, NULL, POOL_SIZE, 4000, NULL);
+
+ // Allocate buffers for send and receive.
+ send_buf = (char*)pj_pool_alloc(pool, bufsize);
+ recv_buf = (char*)pj_pool_alloc(pool, bufsize);
+
+ // Allocate sockets for sending and receiving.
+ TRACE_("creating sockets...");
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_DGRAM, 0, &ssock);
+ if (rc==PJ_SUCCESS)
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_DGRAM, 0, &csock);
+ else
+ csock = PJ_INVALID_SOCKET;
+ if (rc != PJ_SUCCESS) {
+ app_perror("...ERROR in pj_sock_socket()", rc);
+ status=-1; goto on_error;
+ }
+
+ // Bind server socket.
+ TRACE_("bind socket...");
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = PJ_AF_INET;
+ addr.sin_port = pj_htons(PORT);
+ if (pj_sock_bind(ssock, &addr, sizeof(addr))) {
+ status=-10; goto on_error;
+ }
+
+ // Create I/O Queue.
+ TRACE_("create ioqueue...");
+ rc = pj_ioqueue_create(pool, PJ_IOQUEUE_MAX_HANDLES,
+ PJ_IOQUEUE_DEFAULT_THREADS, &ioque);
+ if (rc != PJ_SUCCESS) {
+ status=-20; goto on_error;
+ }
+
+ // Register server and client socket.
+ // We put this after inactivity socket, hopefully this can represent the
+ // worst waiting time.
+ TRACE_("registering first sockets...");
+ rc = pj_ioqueue_register_sock(pool, ioque, ssock, NULL,
+ &test_cb, &skey);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error(10): ioqueue_register error", rc);
+ status=-25; goto on_error;
+ }
+ TRACE_("registering second sockets...");
+ rc = pj_ioqueue_register_sock( pool, ioque, csock, NULL,
+ &test_cb, &ckey);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error(11): ioqueue_register error", rc);
+ status=-26; goto on_error;
+ }
+
+ // Set destination address to send the packet.
+ TRACE_("set destination address...");
+ temp = pj_str("127.0.0.1");
+ if ((rc=pj_sockaddr_in_init(&addr, &temp, PORT)) != 0) {
+ app_perror("...error: unable to resolve 127.0.0.1", rc);
+ status=-26; goto on_error;
+ }
+
+ // Randomize send_buf.
+ pj_create_random_string(send_buf, bufsize);
+
+ // Register reading from ioqueue.
+ TRACE_("start recvfrom...");
+ addrlen = sizeof(addr);
+ bytes = pj_ioqueue_recvfrom(ioque, skey, recv_buf, bufsize, 0,
+ &addr, &addrlen);
+ if (bytes < 0 && bytes != PJ_EPENDING) {
+ status=-28; goto on_error;
+ } else if (bytes == PJ_EPENDING) {
+ recv_pending = 1;
+ PJ_LOG(3, (THIS_FILE,
+ "......ok: recvfrom returned pending"));
+ } else {
+ PJ_LOG(3, (THIS_FILE,
+ "......error: recvfrom returned immediate ok!"));
+ status=-29; goto on_error;
+ }
+
+ // Write must return the number of bytes.
+ TRACE_("start sendto...");
+ bytes = pj_ioqueue_sendto(ioque, ckey, send_buf, bufsize, 0, &addr,
+ sizeof(addr));
+ if (bytes != bufsize && bytes != PJ_EPENDING) {
+ PJ_LOG(1,(THIS_FILE,
+ "......error: sendto returned %d", bytes));
+ status=-30; goto on_error;
+ } else if (bytes == PJ_EPENDING) {
+ send_pending = 1;
+ PJ_LOG(3, (THIS_FILE,
+ "......ok: sendto returned pending"));
+ } else {
+ send_pending = 0;
+ PJ_LOG(3, (THIS_FILE,
+ "......ok: sendto returned immediate success"));
+ }
+
+ // reset callback variables.
+ callback_read_size = callback_write_size = 0;
+ callback_accept_status = callback_connect_status = -2;
+ callback_read_key = callback_write_key =
+ callback_accept_key = callback_connect_key = NULL;
+
+ // Poll if pending.
+ while (send_pending && recv_pending) {
+ int rc;
+ pj_time_val timeout = { 5, 0 };
+
+ TRACE_("poll...");
+ rc = pj_ioqueue_poll(ioque, &timeout);
+
+ if (rc == 0) {
+ PJ_LOG(1,(THIS_FILE, "...ERROR: timed out..."));
+ status=-45; goto on_error;
+ } else if (rc < 0) {
+ app_perror("...ERROR in ioqueue_poll()", rc);
+ status=-50; goto on_error;
+ }
+
+ if (callback_read_key != NULL) {
+ if (callback_read_size != bufsize) {
+ status=-61; goto on_error;
+ }
+
+ if (callback_read_key != skey) {
+ status=-65; goto on_error;
+ }
+
+ if (memcmp(send_buf, recv_buf, bufsize) != 0) {
+ status=-70; goto on_error;
+ }
+
+
+ recv_pending = 0;
+ }
+
+ if (callback_write_key != NULL) {
+ if (callback_write_size != bufsize) {
+ status=-73; goto on_error;
+ }
+
+ if (callback_write_key != ckey) {
+ status=-75; goto on_error;
+ }
+
+ send_pending = 0;
+ }
+ }
+
+ // Success
+ status = 0;
+
+on_error:
+ if (status != 0) {
+ char errbuf[128];
+ PJ_LOG(1, (THIS_FILE,
+ "...compliance test error: status=%d, os_err=%d (%s)",
+ status, pj_get_netos_error(),
+ pj_strerror(pj_get_netos_error(), errbuf, sizeof(errbuf))));
+ }
+ if (ssock)
+ pj_sock_close(ssock);
+ if (csock)
+ pj_sock_close(csock);
+ if (ioque != NULL)
+ pj_ioqueue_destroy(ioque);
+ pj_pool_release(pool);
+ return status;
+
+}
+
+/*
+ * Testing with many handles.
+ * This will just test registering PJ_IOQUEUE_MAX_HANDLES count
+ * of sockets to the ioqueue.
+ */
+static int many_handles_test(void)
+{
+ enum { MAX = PJ_IOQUEUE_MAX_HANDLES };
+ pj_pool_t *pool;
+ pj_ioqueue_t *ioqueue;
+ pj_sock_t *sock;
+ pj_ioqueue_key_t **key;
+ pj_status_t rc;
+ int count, i;
+
+ PJ_LOG(3,(THIS_FILE,"...testing with so many handles"));
+
+ pool = pj_pool_create(mem, NULL, 4000, 4000, NULL);
+ if (!pool)
+ return PJ_ENOMEM;
+
+ key = pj_pool_alloc(pool, MAX*sizeof(pj_ioqueue_key_t*));
+ sock = pj_pool_alloc(pool, MAX*sizeof(pj_sock_t));
+
+ /* Create IOQueue */
+ rc = pj_ioqueue_create(pool, MAX,
+ PJ_IOQUEUE_DEFAULT_THREADS,
+ &ioqueue);
+ if (rc != PJ_SUCCESS || ioqueue == NULL) {
+ app_perror("...error in pj_ioqueue_create", rc);
+ return -10;
+ }
+
+ /* Register as many sockets. */
+ for (count=0; count<MAX; ++count) {
+ sock[count] = PJ_INVALID_SOCKET;
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_DGRAM, 0, &sock[count]);
+ if (rc != PJ_SUCCESS || sock[count] == PJ_INVALID_SOCKET) {
+ PJ_LOG(3,(THIS_FILE, "....unable to create %d-th socket, rc=%d",
+ count, rc));
+ break;
+ }
+ key[count] = NULL;
+ rc = pj_ioqueue_register_sock(pool, ioqueue, sock[count],
+ NULL, &test_cb, &key[count]);
+ if (rc != PJ_SUCCESS || key[count] == NULL) {
+ PJ_LOG(3,(THIS_FILE, "....unable to register %d-th socket, rc=%d",
+ count, rc));
+ return -30;
+ }
+ }
+
+ /* Test complete. */
+
+ /* Now deregister and close all handles. */
+
+ for (i=0; i<count; ++i) {
+ rc = pj_ioqueue_unregister(ioqueue, key[i]);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error in pj_ioqueue_unregister", rc);
+ }
+ rc = pj_sock_close(sock[i]);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error in pj_sock_close", rc);
+ }
+ }
+
+ rc = pj_ioqueue_destroy(ioqueue);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error in pj_ioqueue_destroy", rc);
+ }
+
+ pj_pool_release(pool);
+
+ PJ_LOG(3,(THIS_FILE,"....many_handles_test() ok"));
+
+ return 0;
+}
+
+/*
+ * Multi-operation test.
+ */
+
+/*
+ * Benchmarking IOQueue
+ */
+static int bench_test(int bufsize, int inactive_sock_count)
+{
+ pj_sock_t ssock=-1, csock=-1;
+ pj_sockaddr_in addr;
+ pj_pool_t *pool = NULL;
+ pj_sock_t *inactive_sock=NULL;
+ char *send_buf, *recv_buf;
+ pj_ioqueue_t *ioque = NULL;
+ pj_ioqueue_key_t *skey, *ckey, *key;
+ pj_timestamp t1, t2, t_elapsed;
+ int rc=0, i;
+ pj_str_t temp;
+ char errbuf[128];
+
+ // Create pool.
+ pool = pj_pool_create(mem, NULL, POOL_SIZE, 4000, NULL);
+
+ // Allocate buffers for send and receive.
+ send_buf = (char*)pj_pool_alloc(pool, bufsize);
+ recv_buf = (char*)pj_pool_alloc(pool, bufsize);
+
+ // Allocate sockets for sending and receiving.
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_DGRAM, 0, &ssock);
+ if (rc == PJ_SUCCESS) {
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_DGRAM, 0, &csock);
+ } else
+ csock = PJ_INVALID_SOCKET;
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_sock_socket()", rc);
+ goto on_error;
+ }
+
+ // Bind server socket.
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = PJ_AF_INET;
+ addr.sin_port = pj_htons(PORT);
+ if (pj_sock_bind(ssock, &addr, sizeof(addr)))
+ goto on_error;
+
+ pj_assert(inactive_sock_count+2 <= PJ_IOQUEUE_MAX_HANDLES);
+
+ // Create I/O Queue.
+ rc = pj_ioqueue_create(pool, PJ_IOQUEUE_MAX_HANDLES,
+ PJ_IOQUEUE_DEFAULT_THREADS, &ioque);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_ioqueue_create()", rc);
+ goto on_error;
+ }
+
+ // Allocate inactive sockets, and bind them to some arbitrary address.
+ // Then register them to the I/O queue, and start a read operation.
+ inactive_sock = (pj_sock_t*)pj_pool_alloc(pool,
+ inactive_sock_count*sizeof(pj_sock_t));
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = PJ_AF_INET;
+ for (i=0; i<inactive_sock_count; ++i) {
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_DGRAM, 0, &inactive_sock[i]);
+ if (rc != PJ_SUCCESS || inactive_sock[i] < 0) {
+ app_perror("...error: pj_sock_socket()", rc);
+ goto on_error;
+ }
+ if ((rc=pj_sock_bind(inactive_sock[i], &addr, sizeof(addr))) != 0) {
+ pj_sock_close(inactive_sock[i]);
+ inactive_sock[i] = PJ_INVALID_SOCKET;
+ app_perror("...error: pj_sock_bind()", rc);
+ goto on_error;
+ }
+ rc = pj_ioqueue_register_sock(pool, ioque, inactive_sock[i],
+ NULL, &test_cb, &key);
+ if (rc != PJ_SUCCESS) {
+ pj_sock_close(inactive_sock[i]);
+ inactive_sock[i] = PJ_INVALID_SOCKET;
+ app_perror("...error(1): pj_ioqueue_register_sock()", rc);
+ PJ_LOG(3,(THIS_FILE, "....i=%d", i));
+ goto on_error;
+ }
+ rc = pj_ioqueue_read(ioque, key, recv_buf, bufsize);
+ if ( rc < 0 && rc != PJ_EPENDING) {
+ pj_sock_close(inactive_sock[i]);
+ inactive_sock[i] = PJ_INVALID_SOCKET;
+ app_perror("...error: pj_ioqueue_read()", rc);
+ goto on_error;
+ }
+ }
+
+ // Register server and client socket.
+ // We put this after inactivity socket, hopefully this can represent the
+ // worst waiting time.
+ rc = pj_ioqueue_register_sock(pool, ioque, ssock, NULL,
+ &test_cb, &skey);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error(2): pj_ioqueue_register_sock()", rc);
+ goto on_error;
+ }
+
+ rc = pj_ioqueue_register_sock(pool, ioque, csock, NULL,
+ &test_cb, &ckey);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error(3): pj_ioqueue_register_sock()", rc);
+ goto on_error;
+ }
+
+ // Set destination address to send the packet.
+ pj_sockaddr_in_init(&addr, pj_cstr(&temp, "127.0.0.1"), PORT);
+
+ // Test loop.
+ t_elapsed.u64 = 0;
+ for (i=0; i<LOOP; ++i) {
+ pj_ssize_t bytes;
+
+ // Randomize send buffer.
+ pj_create_random_string(send_buf, bufsize);
+
+ // Start reading on the server side.
+ rc = pj_ioqueue_read(ioque, skey, recv_buf, bufsize);
+ if (rc < 0 && rc != PJ_EPENDING) {
+ app_perror("...error: pj_ioqueue_read()", rc);
+ break;
+ }
+
+ // Starts send on the client side.
+ bytes = pj_ioqueue_sendto(ioque, ckey, send_buf, bufsize, 0,
+ &addr, sizeof(addr));
+ if (bytes != bufsize && bytes != PJ_EPENDING) {
+ app_perror("...error: pj_ioqueue_write()", bytes);
+ rc = -1;
+ break;
+ }
+
+ // Begin time.
+ pj_get_timestamp(&t1);
+
+ // Poll the queue until we've got completion event in the server side.
+ callback_read_key = NULL;
+ callback_read_size = 0;
+ do {
+ rc = pj_ioqueue_poll(ioque, NULL);
+ } while (rc >= 0 && callback_read_key != skey);
+
+ // End time.
+ pj_get_timestamp(&t2);
+ t_elapsed.u64 += (t2.u64 - t1.u64);
+
+ if (rc < 0)
+ break;
+
+ // Compare recv buffer with send buffer.
+ if (callback_read_size != bufsize ||
+ memcmp(send_buf, recv_buf, bufsize))
+ {
+ rc = -1;
+ break;
+ }
+
+ // Poll until all events are exhausted, before we start the next loop.
+ do {
+ pj_time_val timeout = { 0, 10 };
+ rc = pj_ioqueue_poll(ioque, &timeout);
+ } while (rc>0);
+
+ rc = 0;
+ }
+
+ // Print results
+ if (rc == 0) {
+ pj_timestamp tzero;
+ pj_uint32_t usec_delay;
+
+ tzero.u32.hi = tzero.u32.lo = 0;
+ usec_delay = pj_elapsed_usec( &tzero, &t_elapsed);
+
+ PJ_LOG(3, (THIS_FILE, "...%10d %15d % 9d",
+ bufsize, inactive_sock_count, usec_delay));
+
+ } else {
+ PJ_LOG(2, (THIS_FILE, "...ERROR (buf:%d, fds:%d)",
+ bufsize, inactive_sock_count+2));
+ }
+
+ // Cleaning up.
+ for (i=0; i<inactive_sock_count; ++i)
+ pj_sock_close(inactive_sock[i]);
+ pj_sock_close(ssock);
+ pj_sock_close(csock);
+
+ pj_ioqueue_destroy(ioque);
+ pj_pool_release( pool);
+ return 0;
+
+on_error:
+ PJ_LOG(1,(THIS_FILE, "...ERROR: %s",
+ pj_strerror(pj_get_netos_error(), errbuf, sizeof(errbuf))));
+ if (ssock)
+ pj_sock_close(ssock);
+ if (csock)
+ pj_sock_close(csock);
+ for (i=0; i<inactive_sock_count && inactive_sock &&
+ inactive_sock[i]!=PJ_INVALID_SOCKET; ++i)
+ {
+ pj_sock_close(inactive_sock[i]);
+ }
+ if (ioque != NULL)
+ pj_ioqueue_destroy(ioque);
+ pj_pool_release( pool);
+ return -1;
+}
+
+int udp_ioqueue_test()
+{
+ int status;
+ int bufsize, sock_count;
+
+ PJ_LOG(3, (THIS_FILE, "...format test"));
+ if ((status = native_format_test()) != 0)
+ return status;
+ PJ_LOG(3, (THIS_FILE, "....native format test ok"));
+
+ PJ_LOG(3, (THIS_FILE, "...compliance test"));
+ if ((status=compliance_test()) != 0) {
+ return status;
+ }
+ PJ_LOG(3, (THIS_FILE, "....compliance test ok"));
+
+ if ((status=many_handles_test()) != 0) {
+ return status;
+ }
+
+ PJ_LOG(4, (THIS_FILE, "...benchmarking different buffer size:"));
+ PJ_LOG(4, (THIS_FILE, "... note: buf=bytes sent, fds=# of fds, "
+ "elapsed=in timer ticks"));
+
+ PJ_LOG(3, (THIS_FILE, "...Benchmarking poll times:"));
+ PJ_LOG(3, (THIS_FILE, "...====================================="));
+ PJ_LOG(3, (THIS_FILE, "...Buf.size #inactive-socks Time/poll"));
+ PJ_LOG(3, (THIS_FILE, "... (bytes) (nanosec)"));
+ PJ_LOG(3, (THIS_FILE, "...====================================="));
+
+ for (bufsize=BUF_MIN_SIZE; bufsize <= BUF_MAX_SIZE; bufsize *= 2) {
+ if (bench_test(bufsize, SOCK_INACTIVE_MIN))
+ return -1;
+ }
+ bufsize = 512;
+ for (sock_count=SOCK_INACTIVE_MIN+2;
+ sock_count<=SOCK_INACTIVE_MAX+2;
+ sock_count *= 2)
+ {
+ //PJ_LOG(3,(THIS_FILE, "...testing with %d fds", sock_count));
+ if (bench_test(bufsize, sock_count-2))
+ return -1;
+ }
+ return 0;
+}
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_uiq_udp;
+#endif /* INCLUDE_UDP_IOQUEUE_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/list.c b/pjlib/src/pjlib-test/list.c
new file mode 100644
index 00000000..8390fe70
--- /dev/null
+++ b/pjlib/src/pjlib-test/list.c
@@ -0,0 +1,209 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/list.c 2 10/14/05 12:26a Bennylp $
+ */
+#include "test.h"
+
+/**
+ * \page page_pjlib_list_test Test: Linked List
+ *
+ * This file provides implementation of \b list_test(). It tests the
+ * functionality of the linked-list API.
+ *
+ * \section list_test_sec Scope of the Test
+ *
+ * API tested:
+ * - pj_list_init()
+ * - pj_list_insert_before()
+ * - pj_list_insert_after()
+ * - pj_list_merge_last()
+ * - pj_list_empty()
+ * - pj_list_insert_nodes_before()
+ * - pj_list_erase()
+ * - pj_list_find_node()
+ * - pj_list_search()
+ *
+ *
+ * This file is <b>pjlib-test/list.c</b>
+ *
+ * \include pjlib-test/list.c
+ */
+
+#if INCLUDE_LIST_TEST
+
+#include <pjlib.h>
+
+typedef struct list_node
+{
+ PJ_DECL_LIST_MEMBER(struct list_node)
+ int value;
+} list_node;
+
+static int compare_node(void *value, const pj_list_type *nd)
+{
+ list_node *node = (list_node*)nd;
+ return ((int)value == node->value) ? 0 : -1;
+}
+
+#define PJ_SIGNED_ARRAY_SIZE(a) ((int)PJ_ARRAY_SIZE(a))
+
+int list_test()
+{
+ list_node nodes[4]; // must be even number of nodes
+ list_node list;
+ list_node list2;
+ list_node *p;
+ int i; // don't change to unsigned!
+
+ //
+ // Test insert_before().
+ //
+ list.value = (unsigned)-1;
+ pj_list_init(&list);
+ for (i=0; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i) {
+ nodes[i].value = i;
+ pj_list_insert_before(&list, &nodes[i]);
+ }
+ // check.
+ for (i=0, p=list.next; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i, p=p->next) {
+ pj_assert(p->value == i);
+ if (p->value != i) {
+ return -1;
+ }
+ }
+
+ //
+ // Test insert_after()
+ //
+ pj_list_init(&list);
+ for (i=PJ_SIGNED_ARRAY_SIZE(nodes)-1; i>=0; --i) {
+ pj_list_insert_after(&list, &nodes[i]);
+ }
+ // check.
+ for (i=0, p=list.next; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i, p=p->next) {
+ pj_assert(p->value == i);
+ if (p->value != i) {
+ return -1;
+ }
+ }
+
+ //
+ // Test merge_last()
+ //
+ // Init lists
+ pj_list_init(&list);
+ pj_list_init(&list2);
+ for (i=0; i<PJ_SIGNED_ARRAY_SIZE(nodes)/2; ++i) {
+ pj_list_insert_before(&list, &nodes[i]);
+ }
+ for (i=PJ_SIGNED_ARRAY_SIZE(nodes)/2; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i) {
+ pj_list_insert_before(&list2, &nodes[i]);
+ }
+ // merge
+ pj_list_merge_last(&list, &list2);
+ // check.
+ for (i=0, p=list.next; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i, p=p->next) {
+ pj_assert(p->value == i);
+ if (p->value != i) {
+ return -1;
+ }
+ }
+ // check list is empty
+ pj_assert( pj_list_empty(&list2) );
+ if (!pj_list_empty(&list2)) {
+ return -1;
+ }
+
+ //
+ // Check merge_first()
+ //
+ pj_list_init(&list);
+ pj_list_init(&list2);
+ for (i=0; i<PJ_SIGNED_ARRAY_SIZE(nodes)/2; ++i) {
+ pj_list_insert_before(&list, &nodes[i]);
+ }
+ for (i=PJ_SIGNED_ARRAY_SIZE(nodes)/2; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i) {
+ pj_list_insert_before(&list2, &nodes[i]);
+ }
+ // merge
+ pj_list_merge_first(&list2, &list);
+ // check (list2).
+ for (i=0, p=list2.next; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i, p=p->next) {
+ pj_assert(p->value == i);
+ if (p->value != i) {
+ return -1;
+ }
+ }
+ // check list is empty
+ pj_assert( pj_list_empty(&list) );
+ if (!pj_list_empty(&list)) {
+ return -1;
+ }
+
+ //
+ // Test insert_nodes_before()
+ //
+ // init list
+ pj_list_init(&list);
+ for (i=0; i<PJ_SIGNED_ARRAY_SIZE(nodes)/2; ++i) {
+ pj_list_insert_before(&list, &nodes[i]);
+ }
+ // chain remaining nodes
+ pj_list_init(&nodes[PJ_SIGNED_ARRAY_SIZE(nodes)/2]);
+ for (i=PJ_SIGNED_ARRAY_SIZE(nodes)/2+1; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i) {
+ pj_list_insert_before(&nodes[PJ_SIGNED_ARRAY_SIZE(nodes)/2], &nodes[i]);
+ }
+ // insert nodes
+ pj_list_insert_nodes_before(&list, &nodes[PJ_SIGNED_ARRAY_SIZE(nodes)/2]);
+ // check
+ for (i=0, p=list.next; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i, p=p->next) {
+ pj_assert(p->value == i);
+ if (p->value != i) {
+ return -1;
+ }
+ }
+
+ // erase test.
+ pj_list_init(&list);
+ for (i=0; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i) {
+ nodes[i].value = i;
+ pj_list_insert_before(&list, &nodes[i]);
+ }
+ for (i=PJ_SIGNED_ARRAY_SIZE(nodes)-1; i>=0; --i) {
+ int j;
+ pj_list_erase(&nodes[i]);
+ for (j=0, p=list.next; j<i; ++j, p=p->next) {
+ pj_assert(p->value == j);
+ if (p->value != j) {
+ return -1;
+ }
+ }
+ }
+
+ // find and search
+ pj_list_init(&list);
+ for (i=0; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i) {
+ nodes[i].value = i;
+ pj_list_insert_before(&list, &nodes[i]);
+ }
+ for (i=0; i<PJ_SIGNED_ARRAY_SIZE(nodes); ++i) {
+ p = (list_node*) pj_list_find_node(&list, &nodes[i]);
+ pj_assert( p == &nodes[i] );
+ if (p != &nodes[i]) {
+ return -1;
+ }
+ p = (list_node*) pj_list_search(&list, (void*)i, &compare_node);
+ pj_assert( p == &nodes[i] );
+ if (p != &nodes[i]) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_list_test;
+#endif /* INCLUDE_LIST_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/main.c b/pjlib/src/pjlib-test/main.c
new file mode 100644
index 00000000..96055100
--- /dev/null
+++ b/pjlib/src/pjlib-test/main.c
@@ -0,0 +1,73 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/main.c 4 29/10/05 21:32 Bennylp $
+ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/main.c $
+ *
+ * 4 29/10/05 21:32 Bennylp
+ * Boost process priority in Win32
+ *
+ * 3 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ */
+#include "test.h"
+
+#include <pj/string.h>
+#include <pj/sock.h>
+#include <pj/log.h>
+
+extern int param_echo_sock_type;
+extern const char *param_echo_server;
+extern int param_echo_port;
+
+
+#if defined(PJ_WIN32) && PJ_WIN32!=0
+#include <windows.h>
+static void boost(void)
+{
+ SetPriorityClass(GetCurrentProcess(), REALTIME_PRIORITY_CLASS);
+}
+#else
+#define boost()
+#endif
+
+int main(int argc, char *argv[])
+{
+ int rc;
+
+ boost();
+
+ while (argc > 1) {
+ char *arg = argv[--argc];
+
+ if (*arg=='-' && *(arg+1)=='p') {
+ pj_str_t port = pj_str(argv[--argc]);
+
+ param_echo_port = pj_strtoul(&port);
+
+ } else if (*arg=='-' && *(arg+1)=='s') {
+ param_echo_server = argv[--argc];
+
+ } else if (*arg=='-' && *(arg+1)=='t') {
+ pj_str_t type = pj_str(argv[--argc]);
+
+ if (pj_stricmp2(&type, "tcp")==0)
+ param_echo_sock_type = PJ_SOCK_STREAM;
+ else if (pj_stricmp2(&type, "udp")==0)
+ param_echo_sock_type = PJ_SOCK_DGRAM;
+ else {
+ PJ_LOG(3,("", "error: unknown socket type %s", type.ptr));
+ return 1;
+ }
+ }
+ }
+
+ rc = test_main();
+
+ return rc;
+}
+
diff --git a/pjlib/src/pjlib-test/main_mod.c b/pjlib/src/pjlib-test/main_mod.c
new file mode 100644
index 00000000..45410184
--- /dev/null
+++ b/pjlib/src/pjlib-test/main_mod.c
@@ -0,0 +1,33 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/main_mod.c 2 10/29/05 11:51a Bennylp $
+ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/main_mod.c $
+ *
+ * 2 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 1 10/05/05 5:12p Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+int init_module(void)
+{
+ printk(KERN_INFO "PJLIB test module loaded. Starting tests...\n");
+
+ test_main();
+
+ /* Prevent module from loading. We've finished test anyway.. */
+ return 1;
+}
+
+void cleanup_module(void)
+{
+ printk(KERN_INFO "PJLIB test module unloading...\n");
+}
+
+MODULE_LICENSE("GPL");
+
diff --git a/pjlib/src/pjlib-test/mutex.c b/pjlib/src/pjlib-test/mutex.c
new file mode 100644
index 00000000..b6609b8d
--- /dev/null
+++ b/pjlib/src/pjlib-test/mutex.c
@@ -0,0 +1,164 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/mutex.c 1 10/23/05 12:52p Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/mutex.c $
+ *
+ * 1 10/23/05 12:52p Bennylp
+ * Craeted.
+ *
+ */
+#include "test.h"
+#include <pjlib.h>
+
+#if INCLUDE_MUTEX_TEST
+
+#undef TRACE_
+//#define TRACE_(x) PJ_LOG(3,x)
+#define TRACE_(x)
+
+/* Test witn non-recursive mutex. */
+static int simple_mutex_test(pj_pool_t *pool)
+{
+ pj_status_t rc;
+ pj_mutex_t *mutex;
+
+ PJ_LOG(3,("", "...testing simple mutex"));
+
+ /* Create mutex. */
+ TRACE_(("", "....create mutex"));
+ rc = pj_mutex_create( pool, "", PJ_MUTEX_SIMPLE, &mutex);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_mutex_create", rc);
+ return -10;
+ }
+
+ /* Normal lock/unlock cycle. */
+ TRACE_(("", "....lock mutex"));
+ rc = pj_mutex_lock(mutex);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_mutex_lock", rc);
+ return -20;
+ }
+ TRACE_(("", "....unlock mutex"));
+ rc = pj_mutex_unlock(mutex);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_mutex_unlock", rc);
+ return -30;
+ }
+
+ /* Lock again. */
+ TRACE_(("", "....lock mutex"));
+ rc = pj_mutex_lock(mutex);
+ if (rc != PJ_SUCCESS) return -40;
+
+ /* Try-lock should fail. It should not deadlocked. */
+ TRACE_(("", "....trylock mutex"));
+ rc = pj_mutex_trylock(mutex);
+ if (rc == PJ_SUCCESS)
+ PJ_LOG(3,("", "...info: looks like simple mutex is recursive"));
+
+ /* Unlock and done. */
+ TRACE_(("", "....unlock mutex"));
+ rc = pj_mutex_unlock(mutex);
+ if (rc != PJ_SUCCESS) return -50;
+
+ TRACE_(("", "....destroy mutex"));
+ rc = pj_mutex_destroy(mutex);
+ if (rc != PJ_SUCCESS) return -60;
+
+ TRACE_(("", "....done"));
+ return PJ_SUCCESS;
+}
+
+
+/* Test with recursive mutex. */
+static int recursive_mutex_test(pj_pool_t *pool)
+{
+ pj_status_t rc;
+ pj_mutex_t *mutex;
+
+ PJ_LOG(3,("", "...testing recursive mutex"));
+
+ /* Create mutex. */
+ TRACE_(("", "....create mutex"));
+ rc = pj_mutex_create( pool, "", PJ_MUTEX_RECURSE, &mutex);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_mutex_create", rc);
+ return -10;
+ }
+
+ /* Normal lock/unlock cycle. */
+ TRACE_(("", "....lock mutex"));
+ rc = pj_mutex_lock(mutex);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_mutex_lock", rc);
+ return -20;
+ }
+ TRACE_(("", "....unlock mutex"));
+ rc = pj_mutex_unlock(mutex);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_mutex_unlock", rc);
+ return -30;
+ }
+
+ /* Lock again. */
+ TRACE_(("", "....lock mutex"));
+ rc = pj_mutex_lock(mutex);
+ if (rc != PJ_SUCCESS) return -40;
+
+ /* Try-lock should NOT fail. . */
+ TRACE_(("", "....trylock mutex"));
+ rc = pj_mutex_trylock(mutex);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: recursive mutex is not recursive!", rc);
+ return -40;
+ }
+
+ /* Locking again should not fail. */
+ TRACE_(("", "....lock mutex"));
+ rc = pj_mutex_lock(mutex);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: recursive mutex is not recursive!", rc);
+ return -45;
+ }
+
+ /* Unlock several times and done. */
+ TRACE_(("", "....unlock mutex 3x"));
+ rc = pj_mutex_unlock(mutex);
+ if (rc != PJ_SUCCESS) return -50;
+ rc = pj_mutex_unlock(mutex);
+ if (rc != PJ_SUCCESS) return -51;
+ rc = pj_mutex_unlock(mutex);
+ if (rc != PJ_SUCCESS) return -52;
+
+ TRACE_(("", "....destroy mutex"));
+ rc = pj_mutex_destroy(mutex);
+ if (rc != PJ_SUCCESS) return -60;
+
+ TRACE_(("", "....done"));
+ return PJ_SUCCESS;
+}
+
+int mutex_test(void)
+{
+ pj_pool_t *pool;
+ int rc;
+
+ pool = pj_pool_create(mem, "", 4000, 4000, NULL);
+
+ rc = simple_mutex_test(pool);
+ if (rc != 0)
+ return rc;
+
+ rc = recursive_mutex_test(pool);
+ if (rc != 0)
+ return rc;
+
+ pj_pool_release(pool);
+
+ return 0;
+}
+
+#else
+int dummy_mutex_test;
+#endif
+
diff --git a/pjlib/src/pjlib-test/os.c b/pjlib/src/pjlib-test/os.c
new file mode 100644
index 00000000..893cfc69
--- /dev/null
+++ b/pjlib/src/pjlib-test/os.c
@@ -0,0 +1,10 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/os.c 2 10/14/05 12:26a Bennylp $
+ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/os.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ */
diff --git a/pjlib/src/pjlib-test/pool.c b/pjlib/src/pjlib-test/pool.c
new file mode 100644
index 00000000..8b9d1ff0
--- /dev/null
+++ b/pjlib/src/pjlib-test/pool.c
@@ -0,0 +1,164 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/pool.c 2 10/14/05 12:26a Bennylp $
+ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/pool.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ */
+#include <pj/pool.h>
+#include <pj/rand.h>
+#include <pj/log.h>
+#include "test.h"
+
+/**
+ * \page page_pjlib_pool_test Test: Pool
+ *
+ * This file provides implementation of \b pool_test(). It tests the
+ * functionality of the memory pool.
+ *
+ *
+ * This file is <b>pjlib-test/pool.c</b>
+ *
+ * \include pjlib-test/pool.c
+ */
+
+
+#if INCLUDE_POOL_TEST
+
+#define SIZE 4096
+
+/* Normally we should throw exception when memory alloc fails.
+ * Here we do nothing so that the flow will go back to original caller,
+ * which will test the result using NULL comparison. Normally caller will
+ * catch the exception instead of checking for NULLs.
+ */
+static void null_callback(pj_pool_t *pool, pj_size_t size)
+{
+ PJ_UNUSED_ARG(pool);
+ PJ_UNUSED_ARG(size);
+}
+
+#define GET_FREE(p) (pj_pool_get_capacity(p)-pj_pool_get_used_size(p))
+
+/* Test that the capacity and used size reported by the pool is correct.
+ */
+static int capacity_test(void)
+{
+ pj_pool_t *pool = pj_pool_create(mem, NULL, SIZE, 0, &null_callback);
+ pj_size_t freesize;
+
+ PJ_LOG(3,("test", "...capacity_test()"));
+
+ if (!pool)
+ return -200;
+
+ freesize = GET_FREE(pool);
+
+ if (pj_pool_alloc(pool, freesize) == NULL) {
+ PJ_LOG(3,("test", "...error: wrong freesize %u reported",
+ freesize));
+ pj_pool_release(pool);
+ return -210;
+ }
+
+ pj_pool_release(pool);
+ return 0;
+}
+
+/* Test function to drain the pool's space.
+ */
+static int drain_test(pj_size_t size, pj_size_t increment)
+{
+ pj_pool_t *pool = pj_pool_create(mem, NULL, size, increment,
+ &null_callback);
+ pj_size_t freesize;
+ void *p;
+ int status = 0;
+
+ PJ_LOG(3,("test", "...drain_test(%d,%d)", size, increment));
+
+ if (!pool)
+ return -10;
+
+ /* Get free size */
+ freesize = GET_FREE(pool);
+ if (freesize < 1) {
+ status=-15;
+ goto on_error;
+ }
+
+ /* Drain the pool until there's nothing left. */
+ while (freesize > 0) {
+ int size;
+
+ if (freesize > 255)
+ size = ((pj_rand() & 0x000000FF) + 4) & ~0x03L;
+ else
+ size = freesize;
+
+ p = pj_pool_alloc(pool, size);
+ if (!p) {
+ status=-20; goto on_error;
+ }
+
+ freesize -= size;
+ }
+
+ /* Check that capacity is zero. */
+ if (GET_FREE(pool) != 0) {
+ PJ_LOG(3,("test", "....error: returned free=%u (expecting 0)",
+ GET_FREE(pool)));
+ status=-30; goto on_error;
+ }
+
+ /* Try to allocate once more */
+ p = pj_pool_alloc(pool, 257);
+ if (!p) {
+ status=-40; goto on_error;
+ }
+
+ /* Check that capacity is NOT zero. */
+ if (GET_FREE(pool) == 0) {
+ status=-50; goto on_error;
+ }
+
+
+on_error:
+ pj_pool_release(pool);
+ return status;
+}
+
+int pool_test(void)
+{
+ enum { LOOP = 2 };
+ int loop;
+ int rc;
+
+ rc = capacity_test();
+ if (rc) return rc;
+
+ for (loop=0; loop<LOOP; ++loop) {
+ /* Test that the pool should grow automaticly. */
+ rc = drain_test(SIZE, SIZE);
+ if (rc != 0) return rc;
+
+ /* Test situation where pool is not allowed to grow.
+ * We expect the test to return correct error.
+ */
+ rc = drain_test(SIZE, 0);
+ if (rc != -40) return rc;
+ }
+
+ return 0;
+}
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_pool_test;
+#endif /* INCLUDE_POOL_TEST */
+
diff --git a/pjlib/src/pjlib-test/pool_perf.c b/pjlib/src/pjlib-test/pool_perf.c
new file mode 100644
index 00000000..76e45606
--- /dev/null
+++ b/pjlib/src/pjlib-test/pool_perf.c
@@ -0,0 +1,134 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/pool_perf.c 2 10/14/05 12:26a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/pool_perf.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/05/05 5:13p Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+
+#if INCLUDE_POOL_PERF_TEST
+
+#include <pjlib.h>
+#include <pj/compat/malloc.h>
+
+#if !PJ_HAS_HIGH_RES_TIMER
+# error Need high resolution timer for this test.
+#endif
+
+#define THIS_FILE "test"
+
+#define LOOP 10
+#define COUNT 1024
+static unsigned sizes[COUNT];
+#define MIN_SIZE 4
+#define MAX_SIZE 512
+static unsigned total_size;
+
+static int pool_test_pool()
+{
+ int i;
+ pj_pool_t *pool = pj_pool_create(mem, NULL, total_size + 4*COUNT, 0, NULL);
+ if (!pool)
+ return -1;
+
+ for (i=0; i<COUNT; ++i) {
+ char *p;
+ if ( (p=(char*)pj_pool_alloc(pool, sizes[i])) == NULL)
+ return -1;
+ *p = '\0';
+ }
+
+ pj_pool_release(pool);
+ return 0;
+}
+
+static int pool_test_malloc_free()
+{
+ char *p[COUNT];
+ int i;
+
+ for (i=0; i<COUNT; ++i) {
+ p[i] = (char*)malloc(sizes[i]);
+ if (!p[i]) {
+ // Don't care for memory leak in this test
+ return -1;
+ }
+ *p[i] = '\0';
+ }
+
+ for (i=0; i<COUNT; ++i) {
+ free(p[i]);
+ }
+
+ return 0;
+}
+
+int pool_perf_test()
+{
+ unsigned i;
+ pj_uint32_t pool_time=0, malloc_time=0, pool_time2=0;
+ pj_timestamp start, end;
+ pj_uint32_t best, worst;
+
+ // Initialize sizes.
+ for (i=0; i<COUNT; ++i) {
+ sizes[i] = MIN_SIZE + pj_rand() % MAX_SIZE;
+ total_size += sizes[i];
+ }
+
+ PJ_LOG(3, (THIS_FILE, "Benchmarking pool.."));
+
+ // Warmup
+ pool_test_pool();
+ pool_test_malloc_free();
+
+ for (i=0; i<LOOP; ++i) {
+ pj_get_timestamp(&start);
+ if (pool_test_pool()) {
+ return 1;
+ }
+ pj_get_timestamp(&end);
+ pool_time += (end.u32.lo - start.u32.lo);
+
+ pj_get_timestamp(&start);
+ if (pool_test_malloc_free()) {
+ return 2;
+ }
+ pj_get_timestamp(&end);
+ malloc_time += (end.u32.lo - start.u32.lo);
+
+ pj_get_timestamp(&start);
+ if (pool_test_pool()) {
+ return 4;
+ }
+ pj_get_timestamp(&end);
+ pool_time2 += (end.u32.lo - start.u32.lo);
+ }
+
+ PJ_LOG(4, (THIS_FILE, "..LOOP count: %u", LOOP));
+ PJ_LOG(4, (THIS_FILE, "..number of alloc/dealloc per loop: %u", COUNT));
+ PJ_LOG(4, (THIS_FILE, "..pool allocation/deallocation time: %u", pool_time));
+ PJ_LOG(4, (THIS_FILE, "..malloc/free time: %u", malloc_time));
+ PJ_LOG(4, (THIS_FILE, "..pool again, second invocation: %u", pool_time2));
+
+ if (pool_time2==0) pool_time2=1;
+ if (pool_time < pool_time2)
+ best = pool_time, worst = pool_time2;
+ else
+ best = pool_time2, worst = pool_time;
+
+ PJ_LOG(3, (THIS_FILE, "..malloc Speedup best=%dx, worst=%dx",
+ (int)(malloc_time/best),
+ (int)(malloc_time/worst)));
+ return 0;
+}
+
+
+#endif /* INCLUDE_POOL_PERF_TEST */
+
diff --git a/pjlib/src/pjlib-test/rand.c b/pjlib/src/pjlib-test/rand.c
new file mode 100644
index 00000000..25f7a47e
--- /dev/null
+++ b/pjlib/src/pjlib-test/rand.c
@@ -0,0 +1,43 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/rand.c 1 10/05/05 5:13p Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/rand.c $
+ *
+ * 1 10/05/05 5:13p Bennylp
+ * Created.
+ *
+ */
+#include <pj/rand.h>
+#include <pj/log.h>
+#include "test.h"
+
+#if INCLUDE_RAND_TEST
+
+#define COUNT 1024
+static int values[COUNT];
+
+/*
+ * rand_test(), simply generates COUNT number of random number and
+ * check that there's no duplicate numbers.
+ */
+int rand_test(void)
+{
+ int i;
+
+ for (i=0; i<COUNT; ++i) {
+ int j;
+
+ values[i] = pj_rand();
+ for (j=0; j<i; ++j) {
+ if (values[i] == values[j]) {
+ PJ_LOG(3,("test", "error: duplicate value %d at %d-th index",
+ values[i], i));
+ return -10;
+ }
+ }
+ }
+
+ return 0;
+}
+
+#endif /* INCLUDE_RAND_TEST */
+
diff --git a/pjlib/src/pjlib-test/rbtree.c b/pjlib/src/pjlib-test/rbtree.c
new file mode 100644
index 00000000..4b1fd4a4
--- /dev/null
+++ b/pjlib/src/pjlib-test/rbtree.c
@@ -0,0 +1,150 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/rbtree.c 2 10/14/05 12:26a Bennylp $ */
+#include "test.h"
+
+#if INCLUDE_RBTREE_TEST
+
+#include <pjlib.h>
+
+#define LOOP 32
+#define MIN_COUNT 64
+#define MAX_COUNT (LOOP * MIN_COUNT)
+#define STRSIZE 16
+#define THIS_FILE "rbtree_test"
+
+typedef struct node_key
+{
+ pj_uint32_t hash;
+ char str[STRSIZE];
+} node_key;
+
+static int compare_node(const node_key *k1, const node_key *k2)
+{
+ if (k1->hash == k2->hash) {
+ return strcmp(k1->str, k2->str);
+ } else {
+ return k1->hash < k2->hash ? -1 : 1;
+ }
+}
+
+void randomize_string(char *str, int len)
+{
+ int i;
+ for (i=0; i<len-1; ++i)
+ str[i] = (char)('a' + pj_rand() % 26);
+ str[len-1] = '\0';
+}
+
+static int test(void)
+{
+ pj_rbtree rb;
+ node_key *key;
+ pj_rbtree_node *node;
+ pj_pool_t *pool;
+ int err=0;
+ int count = MIN_COUNT;
+ int i;
+ unsigned size;
+
+ pj_rbtree_init(&rb, (pj_rbtree_comp*)&compare_node);
+ size = MAX_COUNT*(sizeof(*key)+PJ_RBTREE_NODE_SIZE) +
+ PJ_RBTREE_SIZE + PJ_POOL_SIZE;
+ pool = pj_pool_create( mem, "pool", size, 0, NULL);
+ if (!pool) {
+ PJ_LOG(3,("test", "...error: creating pool of %u bytes", size));
+ return -10;
+ }
+
+ key = (node_key *)pj_pool_alloc(pool, MAX_COUNT*sizeof(*key));
+ if (!key)
+ return -20;
+
+ node = (pj_rbtree_node*)pj_pool_alloc(pool, MAX_COUNT*sizeof(*node));
+ if (!node)
+ return -30;
+
+ for (i=0; i<LOOP; ++i) {
+ int j;
+ pj_rbtree_node *prev, *it;
+ pj_timestamp t1, t2, t_setup, t_insert, t_search, t_erase;
+
+ pj_assert(rb.size == 0);
+
+ t_setup.u32.lo = t_insert.u32.lo = t_search.u32.lo = t_erase.u32.lo = 0;
+
+ for (j=0; j<count; j++) {
+ randomize_string(key[j].str, STRSIZE);
+
+ pj_get_timestamp(&t1);
+ node[j].key = &key[j];
+ node[j].user_data = key[j].str;
+ key[j].hash = pj_hash_calc(0, key[j].str, PJ_HASH_KEY_STRING);
+ pj_get_timestamp(&t2);
+ t_setup.u32.lo += (t2.u32.lo - t1.u32.lo);
+
+ pj_get_timestamp(&t1);
+ pj_rbtree_insert(&rb, &node[j]);
+ pj_get_timestamp(&t2);
+ t_insert.u32.lo += (t2.u32.lo - t1.u32.lo);
+ }
+
+ pj_assert(rb.size == (unsigned)count);
+
+ // Iterate key, make sure they're sorted.
+ prev = NULL;
+ it = pj_rbtree_first(&rb);
+ while (it) {
+ if (prev) {
+ if (compare_node((node_key*)prev->key,(node_key*)it->key)>=0) {
+ ++err;
+ PJ_LOG(3, (THIS_FILE, "Error: %s >= %s",
+ (char*)prev->user_data, (char*)it->user_data));
+ }
+ }
+ prev = it;
+ it = pj_rbtree_next(&rb, it);
+ }
+
+ // Search.
+ for (j=0; j<count; j++) {
+ pj_get_timestamp(&t1);
+ it = pj_rbtree_find(&rb, &key[j]);
+ pj_get_timestamp(&t2);
+ t_search.u32.lo += (t2.u32.lo - t1.u32.lo);
+
+ pj_assert(it != NULL);
+ if (it == NULL)
+ ++err;
+ }
+
+ // Erase node.
+ for (j=0; j<count; j++) {
+ pj_get_timestamp(&t1);
+ it = pj_rbtree_erase(&rb, &node[j]);
+ pj_get_timestamp(&t2);
+ t_erase.u32.lo += (t2.u32.lo - t1.u32.lo);
+ }
+
+ PJ_LOG(4, (THIS_FILE,
+ "...count:%d, setup:%d, insert:%d, search:%d, erase:%d",
+ count,
+ t_setup.u32.lo / count, t_insert.u32.lo / count,
+ t_search.u32.lo / count, t_erase.u32.lo / count));
+
+ count = 2 * count;
+ if (count > MAX_COUNT)
+ break;
+ }
+
+ pj_pool_release(pool);
+ return err;
+}
+
+
+int rbtree_test()
+{
+ return test();
+}
+
+#endif /* INCLUDE_RBTREE_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/select.c b/pjlib/src/pjlib-test/select.c
new file mode 100644
index 00000000..e6562d2c
--- /dev/null
+++ b/pjlib/src/pjlib-test/select.c
@@ -0,0 +1,208 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/select.c 2 10/14/05 12:26a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/select.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ */
+#include "test.h"
+
+/**
+ * \page page_pjlib_select_test Test: Socket Select()
+ *
+ * This file provides implementation of \b select_test(). It tests the
+ * functionality of the pj_sock_select() API.
+ *
+ *
+ * This file is <b>pjlib-test/select.c</b>
+ *
+ * \include pjlib-test/select.c
+ */
+
+
+#if INCLUDE_SELECT_TEST
+
+#include <pj/sock.h>
+#include <pj/sock_select.h>
+#include <pj/log.h>
+#include <pj/string.h>
+#include <pj/assert.h>
+#include <pj/os.h>
+#include <pj/errno.h>
+
+enum
+{
+ READ_FDS,
+ WRITE_FDS,
+ EXCEPT_FDS
+};
+
+#define UDP_PORT 51232
+#define THIS_FILE "select_test"
+
+/*
+ * do_select()
+ *
+ * Perform pj_sock_select() and find out which sockets
+ * are signalled.
+ */
+static int do_select( pj_sock_t sock1, pj_sock_t sock2,
+ int setcount[])
+{
+ pj_fd_set_t fds[3];
+ pj_time_val timeout;
+ int i, n;
+
+ for (i=0; i<3; ++i) {
+ PJ_FD_ZERO(&fds[i]);
+ PJ_FD_SET(sock1, &fds[i]);
+ PJ_FD_SET(sock2, &fds[i]);
+ setcount[i] = 0;
+ }
+
+ timeout.sec = 1;
+ timeout.msec = 0;
+
+ n = pj_sock_select(FD_SETSIZE, &fds[0], &fds[1], &fds[2],
+ &timeout);
+ if (n < 0)
+ return n;
+ if (n == 0)
+ return 0;
+
+ for (i=0; i<3; ++i) {
+ if (PJ_FD_ISSET(sock1, &fds[i]))
+ setcount[i]++;
+ if (PJ_FD_ISSET(sock2, &fds[i]))
+ setcount[i]++;
+ }
+
+ return n;
+}
+
+/*
+ * select_test()
+ *
+ * Test main entry.
+ */
+int select_test()
+{
+ pj_sock_t udp1=PJ_INVALID_SOCKET, udp2=PJ_INVALID_SOCKET;
+ pj_sockaddr_in udp_addr;
+ int status;
+ int setcount[3];
+ pj_str_t s;
+ const char data[] = "hello";
+ const int datalen = 5;
+ pj_ssize_t sent, received;
+ char buf[10];
+ pj_status_t rc;
+
+ PJ_LOG(3, (THIS_FILE, "...Testing simple UDP select()"));
+
+ // Create two UDP sockets.
+ rc = pj_sock_socket( PJ_AF_INET, PJ_SOCK_DGRAM, 0, &udp1);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: unable to create socket", rc);
+ status=-10; goto on_return;
+ }
+ rc = pj_sock_socket( PJ_AF_INET, PJ_SOCK_DGRAM, 0, &udp2);
+ if (udp2 == PJ_INVALID_SOCKET) {
+ app_perror("...error: unable to create socket", rc);
+ status=-20; goto on_return;
+ }
+
+ // Bind one of the UDP socket.
+ pj_memset(&udp_addr, 0, sizeof(udp_addr));
+ udp_addr.sin_family = PJ_AF_INET;
+ udp_addr.sin_port = UDP_PORT;
+ udp_addr.sin_addr = pj_inet_addr(pj_cstr(&s, "127.0.0.1"));
+
+ if (pj_sock_bind(udp2, &udp_addr, sizeof(udp_addr))) {
+ status=-30; goto on_return;
+ }
+
+ // Send data.
+ sent = datalen;
+ rc = pj_sock_sendto(udp1, data, &sent, 0, &udp_addr, sizeof(udp_addr));
+ if (rc != PJ_SUCCESS || sent != datalen) {
+ app_perror("...error: sendto() error", rc);
+ status=-40; goto on_return;
+ }
+
+ // Check that socket is marked as reable.
+ // Note that select() may also report that sockets are writable.
+ status = do_select(udp1, udp2, setcount);
+ if (status < 0) {
+ char errbuf[128];
+ pj_strerror(pj_get_netos_error(), errbuf, sizeof(errbuf));
+ PJ_LOG(1,(THIS_FILE, "...error: %s", errbuf));
+ status=-50; goto on_return;
+ }
+ if (status == 0) {
+ status=-60; goto on_return;
+ }
+
+ if (setcount[READ_FDS] != 1) {
+ status=-70; goto on_return;
+ }
+ if (setcount[WRITE_FDS] != 0) {
+ if (setcount[WRITE_FDS] == 2) {
+ PJ_LOG(3,(THIS_FILE, "...info: system reports writable sockets"));
+ } else {
+ status=-80; goto on_return;
+ }
+ } else {
+ PJ_LOG(3,(THIS_FILE,
+ "...info: system doesn't report writable sockets"));
+ }
+ if (setcount[EXCEPT_FDS] != 0) {
+ status=-90; goto on_return;
+ }
+
+ // Read the socket to clear readable sockets.
+ received = sizeof(buf);
+ rc = pj_sock_recv(udp2, buf, &received, 0);
+ if (rc != PJ_SUCCESS || received != 5) {
+ status=-100; goto on_return;
+ }
+
+ status = 0;
+
+ // Test timeout on the read part.
+ // This won't necessarily return zero, as select() may report that
+ // sockets are writable.
+ setcount[0] = setcount[1] = setcount[2] = 0;
+ status = do_select(udp1, udp2, setcount);
+ if (status != 0 && status != setcount[WRITE_FDS]) {
+ PJ_LOG(3,(THIS_FILE, "...error: expecting timeout but got %d sks set",
+ status));
+ PJ_LOG(3,(THIS_FILE, " rdset: %d, wrset: %d, exset: %d",
+ setcount[0], setcount[1], setcount[2]));
+ status = -110; goto on_return;
+ }
+ if (setcount[READ_FDS] != 0) {
+ PJ_LOG(3,(THIS_FILE, "...error: readable socket not expected"));
+ status = -120; goto on_return;
+ }
+
+ status = 0;
+
+on_return:
+ if (udp1 != PJ_INVALID_SOCKET)
+ pj_sock_close(udp1);
+ if (udp2 != PJ_INVALID_SOCKET)
+ pj_sock_close(udp2);
+ return status;
+}
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_select_test;
+#endif /* INCLUDE_SELECT_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/sleep.c b/pjlib/src/pjlib-test/sleep.c
new file mode 100644
index 00000000..95fa3bac
--- /dev/null
+++ b/pjlib/src/pjlib-test/sleep.c
@@ -0,0 +1,198 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/sleep.c 3 10/29/05 11:51a Bennylp $
+ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/sleep.c $
+ *
+ * 3 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/11/05 12:53a Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+
+/**
+ * \page page_pjlib_sleep_test Test: Sleep, Time, and Timestamp
+ *
+ * This file provides implementation of \b sleep_test().
+ *
+ * \section sleep_test_sec Scope of the Test
+ *
+ * This tests:
+ * - whether pj_thread_sleep() works.
+ * - whether pj_gettimeofday() works.
+ * - whether pj_get_timestamp() and friends works.
+ *
+ * API tested:
+ * - pj_thread_sleep()
+ * - pj_gettimeofday()
+ * - PJ_TIME_VAL_SUB()
+ * - PJ_TIME_VAL_LTE()
+ * - pj_get_timestamp()
+ * - pj_get_timestamp_freq() (implicitly)
+ * - pj_elapsed_time()
+ * - pj_elapsed_usec()
+ *
+ *
+ * This file is <b>pjlib-test/sleep.c</b>
+ *
+ * \include pjlib-test/sleep.c
+ */
+
+#if INCLUDE_SLEEP_TEST
+
+#include <pjlib.h>
+
+#define THIS_FILE "sleep_test"
+
+static int simple_sleep_test(void)
+{
+ enum { COUNT = 5 };
+ int i;
+ pj_status_t rc;
+
+ PJ_LOG(3,(THIS_FILE, "..will write messages every 1 second:"));
+
+ for (i=0; i<COUNT; ++i) {
+ rc = pj_thread_sleep(1000);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_thread_sleep()", rc);
+ return -10;
+ }
+ PJ_LOG(3,(THIS_FILE, "...wake up.."));
+ }
+
+ return 0;
+}
+
+static int sleep_duration_test(void)
+{
+ enum { MIS = 20, DURATION = 1000, DURATION2 = 500 };
+ pj_status_t rc;
+
+ PJ_LOG(3,(THIS_FILE, "..running sleep duration test"));
+
+ /* Test pj_thread_sleep() and pj_gettimeofday() */
+ {
+ pj_time_val start, stop;
+ pj_uint32_t msec;
+
+ /* Mark start of test. */
+ rc = pj_gettimeofday(&start);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_gettimeofday()", rc);
+ return -10;
+ }
+
+ /* Sleep */
+ rc = pj_thread_sleep(DURATION);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_thread_sleep()", rc);
+ return -20;
+ }
+
+ /* Mark end of test. */
+ rc = pj_gettimeofday(&stop);
+
+ /* Calculate duration (store in stop). */
+ PJ_TIME_VAL_SUB(stop, start);
+
+ /* Convert to msec. */
+ msec = PJ_TIME_VAL_MSEC(stop);
+
+ /* Check if it's within range. */
+ if (msec < DURATION * (100-MIS)/100 ||
+ msec > DURATION * (100+MIS)/100)
+ {
+ PJ_LOG(3,(THIS_FILE,
+ "...error: slept for %d ms instead of %d ms "
+ "(outside %d%% err window)",
+ msec, DURATION, MIS));
+ return -30;
+ }
+ }
+
+
+ /* Test pj_thread_sleep() and pj_get_timestamp() and friends */
+ {
+ pj_time_val t1, t2;
+ pj_timestamp start, stop;
+ pj_time_val elapsed;
+ pj_uint32_t msec;
+
+ /* Mark start of test. */
+ rc = pj_get_timestamp(&start);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_get_timestamp()", rc);
+ return -60;
+ }
+
+ /* ..also with gettimeofday() */
+ pj_gettimeofday(&t1);
+
+ /* Sleep */
+ rc = pj_thread_sleep(DURATION2);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: pj_thread_sleep()", rc);
+ return -70;
+ }
+
+ /* Mark end of test. */
+ pj_get_timestamp(&stop);
+
+ /* ..also with gettimeofday() */
+ pj_gettimeofday(&t2);
+
+ /* Compare t1 and t2. */
+ if (PJ_TIME_VAL_LTE(t2, t1)) {
+ PJ_LOG(3,(THIS_FILE, "...error: t2 is less than t1!!"));
+ return -75;
+ }
+
+ /* Get elapsed time in time_val */
+ elapsed = pj_elapsed_time(&start, &stop);
+
+ msec = PJ_TIME_VAL_MSEC(elapsed);
+
+ /* Check if it's within range. */
+ if (msec < DURATION2 * (100-MIS)/100 ||
+ msec > DURATION2 * (100+MIS)/100)
+ {
+ PJ_LOG(3,(THIS_FILE,
+ "...error: slept for %d ms instead of %d ms "
+ "(outside %d%% err window)",
+ msec, DURATION2, MIS));
+ return -30;
+ }
+ }
+
+ /* All done. */
+ return 0;
+}
+
+int sleep_test()
+{
+ int rc;
+
+ rc = simple_sleep_test();
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ rc = sleep_duration_test();
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ return 0;
+}
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_sleep_test;
+#endif /* INCLUDE_SLEEP_TEST */
diff --git a/pjlib/src/pjlib-test/sock.c b/pjlib/src/pjlib-test/sock.c
new file mode 100644
index 00000000..9135a8bb
--- /dev/null
+++ b/pjlib/src/pjlib-test/sock.c
@@ -0,0 +1,459 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/sock.c 4 10/29/05 11:51a Bennylp $ */
+/* $Log: /pjproject-0.3/pjlib/src/pjlib-test/sock.c $
+ *
+ * 4 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 3 14/10/05 11:31 Bennylp
+ * Fixed bug when TCP data is received in chunks.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 9/21/05 1:38p Bennylp
+ * Renamed from *.cpp
+ *
+ * 2 9/17/05 10:37a Bennylp
+ * Major reorganization towards version 0.3.
+ *
+ * 1 9/15/05 8:41p Bennylp
+ */
+#include <pjlib.h>
+#include "test.h"
+
+
+/**
+ * \page page_pjlib_sock_test Test: Socket
+ *
+ * This file provides implementation of \b sock_test(). It tests the
+ * various aspects of the socket API.
+ *
+ * \section sock_test_scope_sec Scope of the Test
+ *
+ * The scope of the test:
+ * - verify the validity of the address structs.
+ * - verify that address manipulation API works.
+ * - simple socket creation and destruction.
+ * - simple socket send/recv and sendto/recvfrom.
+ * - UDP connect()
+ * - send/recv big data.
+ * - all for both UDP and TCP.
+ *
+ * The APIs tested in this test:
+ * - pj_inet_aton()
+ * - pj_inet_ntoa()
+ * - pj_gethostname()
+ * - pj_sock_socket()
+ * - pj_sock_close()
+ * - pj_sock_send()
+ * - pj_sock_sendto()
+ * - pj_sock_recv()
+ * - pj_sock_recvfrom()
+ * - pj_sock_bind()
+ * - pj_sock_connect()
+ * - pj_sock_listen()
+ * - pj_sock_accept()
+ *
+ *
+ * This file is <b>pjlib-test/sock.c</b>
+ *
+ * \include pjlib-test/sock.c
+ */
+
+#if INCLUDE_SOCK_TEST
+
+#define UDP_PORT 51234
+#define TCP_PORT (UDP_PORT+10)
+#define BIG_DATA_LEN 9000
+
+static char bigdata[BIG_DATA_LEN];
+static char bigbuffer[BIG_DATA_LEN];
+
+static int format_test(void)
+{
+ pj_str_t s = pj_str("127.0.0.1");
+ char *p;
+ pj_in_addr addr;
+ const pj_str_t *hostname;
+
+ PJ_LOG(3,("test", "...format_test()"));
+
+ /* pj_inet_aton() */
+ if (pj_inet_aton(&s, &addr) != 1)
+ return -10;
+
+ /* Check the result. */
+ p = (char*)&addr;
+ if (p[0]!=127 || p[1]!=0 || p[2]!=0 || p[3]!=1)
+ return -15;
+
+ /* pj_inet_ntoa() */
+ p = pj_inet_ntoa(addr);
+ if (!p)
+ return -20;
+
+ if (pj_strcmp2(&s, p) != 0)
+ return -30;
+
+ /* pj_gethostname() */
+ hostname = pj_gethostname();
+ if (!hostname || !hostname->ptr || !hostname->slen)
+ return -40;
+
+ /* pj_gethostaddr() */
+
+ return 0;
+}
+
+static int simple_sock_test(void)
+{
+ int types[2];
+ pj_sock_t sock;
+ int i;
+ pj_status_t rc = PJ_SUCCESS;
+
+ types[0] = PJ_SOCK_STREAM;
+ types[1] = PJ_SOCK_DGRAM;
+
+ PJ_LOG(3,("test", "...simple_sock_test()"));
+
+ for (i=0; i<sizeof(types)/sizeof(types[0]); ++i) {
+
+ rc = pj_sock_socket(PJ_AF_INET, types[i], 0, &sock);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: unable to create socket type %d", rc);
+ break;
+ } else {
+ rc = pj_sock_close(sock);
+ if (rc != 0) {
+ app_perror("...error: close socket", rc);
+ break;
+ }
+ }
+ }
+ return rc;
+}
+
+
+static int send_recv_test(int sock_type,
+ pj_sock_t ss, pj_sock_t cs,
+ pj_sockaddr_in *dstaddr, pj_sockaddr_in *srcaddr,
+ int addrlen)
+{
+ enum { DATA_LEN = 16 };
+ char senddata[DATA_LEN+4], recvdata[DATA_LEN+4];
+ pj_ssize_t sent, received, total_received;
+ pj_status_t rc;
+
+ TRACE_(("test", "....create_random_string()"));
+ pj_create_random_string(senddata, DATA_LEN);
+ senddata[DATA_LEN-1] = '\0';
+
+ /*
+ * Test send/recv small data.
+ */
+ TRACE_(("test", "....sendto()"));
+ if (dstaddr) {
+ sent = DATA_LEN;
+ rc = pj_sock_sendto(cs, senddata, &sent, 0, dstaddr, addrlen);
+ if (rc != PJ_SUCCESS || sent != DATA_LEN) {
+ app_perror("...sendto error", rc);
+ rc = -140; goto on_error;
+ }
+ } else {
+ sent = DATA_LEN;
+ rc = pj_sock_send(cs, senddata, &sent, 0);
+ if (rc != PJ_SUCCESS || sent != DATA_LEN) {
+ app_perror("...send error", rc);
+ rc = -145; goto on_error;
+ }
+ }
+
+ TRACE_(("test", "....recv()"));
+ if (srcaddr) {
+ pj_sockaddr_in addr;
+ int srclen = sizeof(addr);
+
+ pj_memset(&addr, 0, sizeof(addr));
+
+ received = DATA_LEN;
+ rc = pj_sock_recvfrom(ss, recvdata, &received, 0, &addr, &srclen);
+ if (rc != PJ_SUCCESS || received != DATA_LEN) {
+ app_perror("...recvfrom error", rc);
+ rc = -150; goto on_error;
+ }
+ if (srclen != addrlen)
+ return -151;
+ if (pj_memcmp(&addr, srcaddr, srclen) != 0) {
+ char srcaddr_str[32], addr_str[32];
+ strcpy(srcaddr_str, pj_inet_ntoa(srcaddr->sin_addr));
+ strcpy(addr_str, pj_inet_ntoa(addr.sin_addr));
+ PJ_LOG(3,("test", "...error: src address mismatch (original=%s, "
+ "recvfrom addr=%s)",
+ srcaddr_str, addr_str));
+ return -152;
+ }
+
+ } else {
+ /* Repeat recv() until all data is received.
+ * This applies only for non-UDP of course, since for UDP
+ * we would expect all data to be received in one packet.
+ */
+ total_received = 0;
+ do {
+ received = DATA_LEN-total_received;
+ rc = pj_sock_recv(ss, recvdata+total_received, &received, 0);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...recv error", rc);
+ rc = -155; goto on_error;
+ }
+ if (received <= 0) {
+ PJ_LOG(3,("", "...error: socket has closed! (received=%d)",
+ received));
+ rc = -156; goto on_error;
+ }
+ if (received != DATA_LEN-total_received) {
+ if (sock_type != PJ_SOCK_STREAM) {
+ PJ_LOG(3,("", "...error: expecting %u bytes, got %u bytes",
+ DATA_LEN-total_received, received));
+ rc = -157; goto on_error;
+ }
+ }
+ total_received += received;
+ } while (total_received < DATA_LEN);
+ }
+
+ TRACE_(("test", "....memcmp()"));
+ if (pj_memcmp(senddata, recvdata, DATA_LEN) != 0) {
+ PJ_LOG(3,("","...error: received data mismatch "
+ "(got:'%s' expecting:'%s'",
+ recvdata, senddata));
+ rc = -160; goto on_error;
+ }
+
+ /*
+ * Test send/recv big data.
+ */
+ TRACE_(("test", "....sendto()"));
+ if (dstaddr) {
+ sent = BIG_DATA_LEN;
+ rc = pj_sock_sendto(cs, bigdata, &sent, 0, dstaddr, addrlen);
+ if (rc != PJ_SUCCESS || sent != BIG_DATA_LEN) {
+ app_perror("...sendto error", rc);
+ rc = -161; goto on_error;
+ }
+ } else {
+ sent = BIG_DATA_LEN;
+ rc = pj_sock_send(cs, bigdata, &sent, 0);
+ if (rc != PJ_SUCCESS || sent != BIG_DATA_LEN) {
+ app_perror("...send error", rc);
+ rc = -165; goto on_error;
+ }
+ }
+
+ TRACE_(("test", "....recv()"));
+
+ /* Repeat recv() until all data is received.
+ * This applies only for non-UDP of course, since for UDP
+ * we would expect all data to be received in one packet.
+ */
+ total_received = 0;
+ do {
+ received = BIG_DATA_LEN-total_received;
+ rc = pj_sock_recv(ss, bigbuffer+total_received, &received, 0);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...recv error", rc);
+ rc = -170; goto on_error;
+ }
+ if (received <= 0) {
+ PJ_LOG(3,("", "...error: socket has closed! (received=%d)",
+ received));
+ rc = -173; goto on_error;
+ }
+ if (received != BIG_DATA_LEN-total_received) {
+ if (sock_type != PJ_SOCK_STREAM) {
+ PJ_LOG(3,("", "...error: expecting %u bytes, got %u bytes",
+ BIG_DATA_LEN-total_received, received));
+ rc = -176; goto on_error;
+ }
+ }
+ total_received += received;
+ } while (total_received < BIG_DATA_LEN);
+
+ TRACE_(("test", "....memcmp()"));
+ if (pj_memcmp(bigdata, bigbuffer, BIG_DATA_LEN) != 0) {
+ PJ_LOG(3,("", "...error: received data has been altered!"));
+ rc = -180; goto on_error;
+ }
+
+ rc = 0;
+
+on_error:
+ return rc;
+}
+
+static int udp_test(void)
+{
+ pj_sock_t cs = PJ_INVALID_SOCKET, ss = PJ_INVALID_SOCKET;
+ pj_sockaddr_in dstaddr, srcaddr;
+ pj_str_t s;
+ pj_status_t rc = 0, retval;
+
+ PJ_LOG(3,("test", "...udp_test()"));
+
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_DGRAM, 0, &ss);
+ if (rc != 0) {
+ app_perror("...error: unable to create socket", rc);
+ return -100;
+ }
+
+ rc = pj_sock_socket(PJ_AF_INET, PJ_SOCK_DGRAM, 0, &cs);
+ if (rc != 0)
+ return -110;
+
+ /* Bind server socket. */
+ pj_memset(&dstaddr, 0, sizeof(dstaddr));
+ dstaddr.sin_family = PJ_AF_INET;
+ dstaddr.sin_port = pj_htons(UDP_PORT);
+ dstaddr.sin_addr = pj_inet_addr(pj_cstr(&s, "127.0.0.1"));
+
+ if ((rc=pj_sock_bind(ss, &dstaddr, sizeof(dstaddr))) != 0) {
+ app_perror("...bind error", rc);
+ rc = -120; goto on_error;
+ }
+
+ /* Bind client socket. */
+ pj_memset(&srcaddr, 0, sizeof(srcaddr));
+ srcaddr.sin_family = PJ_AF_INET;
+ srcaddr.sin_port = pj_htons(UDP_PORT-1);
+ srcaddr.sin_addr = pj_inet_addr(pj_cstr(&s, "127.0.0.1"));
+
+ if ((rc=pj_sock_bind(cs, &srcaddr, sizeof(srcaddr))) != 0) {
+ app_perror("...bind error", rc);
+ rc = -121; goto on_error;
+ }
+
+ /* Test send/recv, with sendto */
+ rc = send_recv_test(PJ_SOCK_DGRAM, ss, cs, &dstaddr, NULL,
+ sizeof(dstaddr));
+ if (rc != 0)
+ goto on_error;
+
+ /* Test send/recv, with sendto and recvfrom */
+ rc = send_recv_test(PJ_SOCK_DGRAM, ss, cs, &dstaddr,
+ &srcaddr, sizeof(dstaddr));
+ if (rc != 0)
+ goto on_error;
+
+ /* connect() the sockets. */
+ rc = pj_sock_connect(cs, &dstaddr, sizeof(dstaddr));
+ if (rc != 0) {
+ app_perror("...connect() error", rc);
+ rc = -122; goto on_error;
+ }
+
+ /* Test send/recv with send() */
+ rc = send_recv_test(PJ_SOCK_DGRAM, ss, cs, NULL, NULL, 0);
+ if (rc != 0)
+ goto on_error;
+
+ /* Test send/recv with send() and recvfrom */
+ rc = send_recv_test(PJ_SOCK_DGRAM, ss, cs, NULL, &srcaddr,
+ sizeof(srcaddr));
+ if (rc != 0)
+ goto on_error;
+
+on_error:
+ retval = rc;
+ if (cs != PJ_INVALID_SOCKET) {
+ rc = pj_sock_close(cs);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error in closing socket", rc);
+ return -1000;
+ }
+ }
+ if (ss != PJ_INVALID_SOCKET) {
+ rc = pj_sock_close(ss);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error in closing socket", rc);
+ return -1010;
+ }
+ }
+
+ return retval;
+}
+
+static int tcp_test(void)
+{
+ pj_sock_t cs, ss;
+ pj_status_t rc = 0, retval;
+
+ PJ_LOG(3,("test", "...tcp_test()"));
+
+ rc = app_socketpair(PJ_AF_INET, PJ_SOCK_STREAM, 0, &ss, &cs);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: app_socketpair():", rc);
+ return -2000;
+ }
+
+ /* Test send/recv with send() and recv() */
+ retval = send_recv_test(PJ_SOCK_STREAM, ss, cs, NULL, NULL, 0);
+
+ rc = pj_sock_close(cs);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error in closing socket", rc);
+ return -2000;
+ }
+
+ rc = pj_sock_close(ss);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error in closing socket", rc);
+ return -2010;
+ }
+
+ return retval;
+}
+
+static int ioctl_test(void)
+{
+ return 0;
+}
+
+int sock_test()
+{
+ int rc;
+
+ pj_create_random_string(bigdata, BIG_DATA_LEN);
+
+ rc = format_test();
+ if (rc != 0)
+ return rc;
+
+ rc = simple_sock_test();
+ if (rc != 0)
+ return rc;
+
+ rc = ioctl_test();
+ if (rc != 0)
+ return rc;
+
+ rc = udp_test();
+ if (rc != 0)
+ return rc;
+
+ rc = tcp_test();
+ if (rc != 0)
+ return rc;
+
+ return 0;
+}
+
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_sock_test;
+#endif /* INCLUDE_SOCK_TEST */
+
diff --git a/pjlib/src/pjlib-test/sock_perf.c b/pjlib/src/pjlib-test/sock_perf.c
new file mode 100644
index 00000000..9e800432
--- /dev/null
+++ b/pjlib/src/pjlib-test/sock_perf.c
@@ -0,0 +1,183 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/sock_perf.c 4 10/29/05 11:51a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/sock_perf.c $
+ *
+ * 4 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 3 14/10/05 11:31 Bennylp
+ * Fixed bug when TCP data is part_received in chunks.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/11/05 11:18p Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+#include <pjlib.h>
+#include <pj/compat/high_precision.h>
+
+
+/**
+ * \page page_pjlib_sock_perf_test Test: Socket Performance
+ *
+ * Test the performance of the socket communication. This will perform
+ * simple producer-consumer type of test, where we calculate how long
+ * does it take to send certain number of packets from producer to
+ * consumer.
+ *
+ * This file is <b>pjlib-test/sock_perf.c</b>
+ *
+ * \include pjlib-test/sock_perf.c
+ */
+
+#if INCLUDE_SOCK_PERF_TEST
+
+/*
+ * sock_producer_consumer()
+ *
+ * Simple producer-consumer benchmarking. Send loop number of
+ * buf_size size packets as fast as possible.
+ */
+static int sock_producer_consumer(int sock_type,
+ unsigned buf_size,
+ unsigned loop,
+ unsigned *p_bandwidth)
+{
+ pj_sock_t consumer, producer;
+ pj_pool_t *pool;
+ char *outgoing_buffer, *incoming_buffer;
+ pj_timestamp start, stop;
+ unsigned i;
+ pj_highprec_t elapsed, bandwidth;
+ pj_size_t total_received;
+ pj_status_t rc;
+
+ /* Create pool. */
+ pool = pj_pool_create(mem, NULL, 4096, 4096, NULL);
+ if (!pool)
+ return -10;
+
+ /* Create producer-consumer pair. */
+ rc = app_socketpair(PJ_AF_INET, sock_type, 0, &consumer, &producer);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: create socket pair", rc);
+ return -20;
+ }
+
+ /* Create buffers. */
+ outgoing_buffer = pj_pool_alloc(pool, buf_size);
+ incoming_buffer = pj_pool_alloc(pool, buf_size);
+
+ /* Start loop. */
+ pj_get_timestamp(&start);
+ total_received = 0;
+ for (i=0; i<loop; ++i) {
+ pj_ssize_t sent, part_received, received;
+ pj_time_val delay;
+
+ sent = buf_size;
+ rc = pj_sock_send(producer, outgoing_buffer, &sent, 0);
+ if (rc != PJ_SUCCESS || sent != (pj_ssize_t)buf_size) {
+ app_perror("...error: send()", rc);
+ return -61;
+ }
+
+ /* Repeat recv() until all data is part_received.
+ * This applies only for non-UDP of course, since for UDP
+ * we would expect all data to be part_received in one packet.
+ */
+ received = 0;
+ do {
+ part_received = buf_size-received;
+ rc = pj_sock_recv(consumer, incoming_buffer+received,
+ &part_received, 0);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...recv error", rc);
+ return -70;
+ }
+ if (part_received <= 0) {
+ PJ_LOG(3,("", "...error: socket has closed (part_received=%d)!",
+ part_received));
+ return -73;
+ }
+ if ((pj_size_t)part_received != buf_size-received) {
+ if (sock_type != PJ_SOCK_STREAM) {
+ PJ_LOG(3,("", "...error: expecting %u bytes, got %u bytes",
+ buf_size-received, part_received));
+ return -76;
+ }
+ }
+ received += part_received;
+ } while ((pj_size_t)received < buf_size);
+
+ total_received += received;
+
+ /* Stop test if it's been runnign for more than 10 secs. */
+ pj_get_timestamp(&stop);
+ delay = pj_elapsed_time(&start, &stop);
+ if (delay.sec > 10)
+ break;
+ }
+
+ /* Stop timer. */
+ pj_get_timestamp(&stop);
+
+ elapsed = pj_elapsed_usec(&start, &stop);
+
+ /* bandwidth = total_received * 1000 / elapsed */
+ bandwidth = total_received;
+ pj_highprec_mul(bandwidth, 1000);
+ pj_highprec_div(bandwidth, elapsed);
+
+ *p_bandwidth = (pj_uint32_t)bandwidth;
+
+ /* Close sockets. */
+ pj_sock_close(consumer);
+ pj_sock_close(producer);
+
+ /* Done */
+ pj_pool_release(pool);
+
+ return 0;
+}
+
+/*
+ * sock_perf_test()
+ *
+ * Main test entry.
+ */
+int sock_perf_test(void)
+{
+ enum { LOOP = 64 * 1024 };
+ int rc;
+ unsigned bandwidth;
+
+ PJ_LOG(3,("", "...benchmarking socket "
+ "(2 sockets, packet=512, single threaded):"));
+
+ /* Benchmarking UDP */
+ rc = sock_producer_consumer(PJ_SOCK_DGRAM, 512, LOOP, &bandwidth);
+ if (rc != 0) return rc;
+ PJ_LOG(3,("", "....bandwidth UDP = %d KB/s", bandwidth));
+
+ /* Benchmarking TCP */
+ rc = sock_producer_consumer(PJ_SOCK_STREAM, 512, LOOP, &bandwidth);
+ if (rc != 0) return rc;
+ PJ_LOG(3,("", "....bandwidth TCP = %d KB/s", bandwidth));
+
+ return rc;
+}
+
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_sock_perf_test;
+#endif /* INCLUDE_SOCK_PERF_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/string.c b/pjlib/src/pjlib-test/string.c
new file mode 100644
index 00000000..1a3de325
--- /dev/null
+++ b/pjlib/src/pjlib-test/string.c
@@ -0,0 +1,168 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/string.c 2 10/14/05 12:26a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/string.c $
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/05/05 5:13p Bennylp
+ * Created.
+ *
+ */
+#include <pj/string.h>
+#include <pj/pool.h>
+#include <pj/log.h>
+#include "test.h"
+
+/**
+ * \page page_pjlib_string_test Test: String
+ *
+ * This file provides implementation of \b string_test(). It tests the
+ * functionality of the string API.
+ *
+ * \section sleep_test_sec Scope of the Test
+ *
+ * API tested:
+ * - pj_str()
+ * - pj_strcmp()
+ * - pj_strcmp2()
+ * - pj_stricmp()
+ * - pj_strlen()
+ * - pj_strncmp()
+ * - pj_strnicmp()
+ * - pj_strchr()
+ * - pj_strdup()
+ * - pj_strdup2()
+ * - pj_strcpy()
+ * - pj_strcat()
+ * - pj_strtrim()
+ * - pj_utoa()
+ * - pj_strtoul()
+ * - pj_create_random_string()
+ *
+ *
+ * This file is <b>pjlib-test/string.c</b>
+ *
+ * \include pjlib-test/string.c
+ */
+
+#if INCLUDE_STRING_TEST
+
+#ifdef _MSC_VER
+# pragma warning(disable: 4204)
+#endif
+
+#define HELLO_WORLD "Hello World"
+#define JUST_HELLO "Hello"
+#define UL_VALUE 3456789012UL
+
+int string_test(void)
+{
+ const pj_str_t hello_world = { HELLO_WORLD, strlen(HELLO_WORLD) };
+ const pj_str_t just_hello = { JUST_HELLO, strlen(JUST_HELLO) };
+ pj_str_t s1, s2, s3, s4, s5;
+ enum { RCOUNT = 10, RLEN = 16 };
+ pj_str_t random[RCOUNT];
+ pj_pool_t *pool;
+ int i;
+
+ pool = pj_pool_create(mem, NULL, 4096, 0, NULL);
+ if (!pool) return -5;
+
+ /*
+ * pj_str(), pj_strcmp(), pj_stricmp(), pj_strlen(),
+ * pj_strncmp(), pj_strchr()
+ */
+ s1 = pj_str(HELLO_WORLD);
+ if (pj_strcmp(&s1, &hello_world) != 0)
+ return -10;
+ if (pj_stricmp(&s1, &hello_world) != 0)
+ return -20;
+ if (pj_strcmp(&s1, &just_hello) <= 0)
+ return -30;
+ if (pj_stricmp(&s1, &just_hello) <= 0)
+ return -40;
+ if (pj_strlen(&s1) != strlen(HELLO_WORLD))
+ return -50;
+ if (pj_strncmp(&s1, &hello_world, 5) != 0)
+ return -60;
+ if (pj_strnicmp(&s1, &hello_world, 5) != 0)
+ return -70;
+ if (pj_strchr(&s1, HELLO_WORLD[1]) != s1.ptr+1)
+ return -80;
+
+ /*
+ * pj_strdup()
+ */
+ if (!pj_strdup(pool, &s2, &s1))
+ return -100;
+ if (pj_strcmp(&s1, &s2) != 0)
+ return -110;
+
+ /*
+ * pj_strcpy(), pj_strcat()
+ */
+ s3.ptr = pj_pool_alloc(pool, 256);
+ if (!s3.ptr)
+ return -200;
+ pj_strcpy(&s3, &s2);
+ pj_strcat(&s3, &just_hello);
+
+ if (pj_strcmp2(&s3, HELLO_WORLD JUST_HELLO) != 0)
+ return -210;
+
+ /*
+ * pj_strdup2(), pj_strtrim().
+ */
+ pj_strdup2(pool, &s4, " " HELLO_WORLD "\t ");
+ pj_strtrim(&s4);
+ if (pj_strcmp2(&s4, HELLO_WORLD) != 0)
+ return -250;
+
+ /*
+ * pj_utoa()
+ */
+ s5.ptr = pj_pool_alloc(pool, 16);
+ if (!s5.ptr)
+ return -270;
+ s5.slen = pj_utoa(UL_VALUE, s5.ptr);
+
+ /*
+ * pj_strtoul()
+ */
+ if (pj_strtoul(&s5) != UL_VALUE)
+ return -280;
+
+ /*
+ * pj_create_random_string()
+ * Check that no duplicate strings are returned.
+ */
+ for (i=0; i<RCOUNT; ++i) {
+ int j;
+
+ random[i].ptr = pj_pool_alloc(pool, RLEN);
+ if (!random[i].ptr)
+ return -320;
+
+ random[i].slen = RLEN;
+ pj_create_random_string(random[i].ptr, RLEN);
+
+ for (j=0; j<i; ++j) {
+ if (pj_strcmp(&random[i], &random[j])==0)
+ return -330;
+ }
+ }
+
+ /* Done. */
+ pj_pool_release(pool);
+ return 0;
+}
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_string_test;
+#endif /* INCLUDE_STRING_TEST */
+
diff --git a/pjlib/src/pjlib-test/test.c b/pjlib/src/pjlib-test/test.c
new file mode 100644
index 00000000..44b89c40
--- /dev/null
+++ b/pjlib/src/pjlib-test/test.c
@@ -0,0 +1,196 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/test.c 4 29/10/05 21:33 Bennylp $
+ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/test.c $
+ *
+ * 4 29/10/05 21:33 Bennylp
+ * Changed echo_server() to echo_srv_sync()
+ *
+ * 3 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/05/05 5:13p Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+#include <pjlib.h>
+#ifdef _MSC_VER
+# pragma warning(disable:4127)
+#endif
+
+#define DO_TEST(test) do { \
+ PJ_LOG(3, ("test", "Running %s...", #test)); \
+ rc = test; \
+ PJ_LOG(3, ("test", \
+ "%s(%d)", \
+ (rc ? "..ERROR" : "..success"), rc)); \
+ if (rc!=0) goto on_return; \
+ } while (0)
+
+
+pj_pool_factory *mem;
+
+int param_echo_sock_type;
+const char *param_echo_server = ECHO_SERVER_ADDRESS;
+int param_echo_port = ECHO_SERVER_START_PORT;
+
+int test_inner(void)
+{
+ pj_caching_pool caching_pool;
+ const char *filename;
+ int line;
+ int rc = 0;
+
+ mem = &caching_pool.factory;
+
+ rc = pj_init();
+ if (rc != 0) {
+ app_perror("pj_init() error!!", rc);
+ return rc;
+ }
+
+ pj_log_set_level(3);
+ pj_log_set_decor(PJ_LOG_HAS_NEWLINE);
+ pj_dump_config();
+ pj_caching_pool_init( &caching_pool, &pj_pool_factory_default_policy, 0 );
+
+#if INCLUDE_ERRNO_TEST
+ DO_TEST( errno_test() );
+#endif
+
+#if INCLUDE_TIMESTAMP_TEST
+ DO_TEST( timestamp_test() );
+#endif
+
+#if INCLUDE_EXCEPTION_TEST
+ DO_TEST( exception_test() );
+#endif
+
+#if INCLUDE_RAND_TEST
+ DO_TEST( rand_test() );
+#endif
+
+#if INCLUDE_LIST_TEST
+ DO_TEST( list_test() );
+#endif
+
+#if INCLUDE_POOL_TEST
+ DO_TEST( pool_test() );
+#endif
+
+#if INCLUDE_POOL_PERF_TEST
+ DO_TEST( pool_perf_test() );
+#endif
+
+#if INCLUDE_STRING_TEST
+ DO_TEST( string_test() );
+#endif
+
+#if INCLUDE_FIFOBUF_TEST
+ DO_TEST( fifobuf_test() );
+#endif
+
+#if INCLUDE_RBTREE_TEST
+ DO_TEST( rbtree_test() );
+#endif
+
+#if INCLUDE_ATOMIC_TEST
+ DO_TEST( atomic_test() );
+#endif
+
+#if INCLUDE_MUTEX_TEST
+ DO_TEST( mutex_test() );
+#endif
+
+#if INCLUDE_TIMER_TEST
+ DO_TEST( timer_test() );
+#endif
+
+#if INCLUDE_SLEEP_TEST
+ DO_TEST( sleep_test() );
+#endif
+
+#if INCLUDE_THREAD_TEST
+ DO_TEST( thread_test() );
+#endif
+
+#if INCLUDE_SOCK_TEST
+ DO_TEST( sock_test() );
+#endif
+
+#if INCLUDE_SOCK_PERF_TEST
+ DO_TEST( sock_perf_test() );
+#endif
+
+#if INCLUDE_SELECT_TEST
+ DO_TEST( select_test() );
+#endif
+
+#if INCLUDE_UDP_IOQUEUE_TEST
+ DO_TEST( udp_ioqueue_test() );
+#endif
+
+#if PJ_HAS_TCP && INCLUDE_TCP_IOQUEUE_TEST
+ DO_TEST( tcp_ioqueue_test() );
+#endif
+
+#if INCLUDE_IOQUEUE_PERF_TEST
+ DO_TEST( ioqueue_perf_test() );
+#endif
+
+#if INCLUDE_XML_TEST
+ DO_TEST( xml_test() );
+#endif
+
+#if INCLUDE_ECHO_SERVER
+ //echo_server();
+ echo_srv_sync();
+#elif INCLUDE_ECHO_CLIENT
+ if (param_echo_sock_type == 0)
+ param_echo_sock_type = PJ_SOCK_DGRAM;
+
+ echo_client( param_echo_sock_type,
+ param_echo_server,
+ param_echo_port);
+#endif
+
+ goto on_return;
+
+on_return:
+
+ pj_caching_pool_destroy( &caching_pool );
+
+ PJ_LOG(3,("test", ""));
+
+ pj_thread_get_stack_info(pj_thread_this(), &filename, &line);
+ PJ_LOG(3,("test", "Stack max usage: %u, deepest: %s:%u",
+ pj_thread_get_stack_max_usage(pj_thread_this()),
+ filename, line));
+ if (rc == 0)
+ PJ_LOG(3,("test", "Looks like everything is okay!.."));
+ else
+ PJ_LOG(3,("test", "Test completed with error(s)"));
+ return 0;
+}
+
+int test_main(void)
+{
+ PJ_USE_EXCEPTION;
+
+ PJ_TRY {
+ return test_inner();
+ }
+ PJ_DEFAULT {
+ int id = PJ_GET_EXCEPTION();
+ PJ_LOG(3,("test", "FATAL: unhandled exception id %d (%s)",
+ id, pj_exception_id_name(id)));
+ }
+ PJ_END;
+
+ return -1;
+}
diff --git a/pjlib/src/pjlib-test/test.h b/pjlib/src/pjlib-test/test.h
new file mode 100644
index 00000000..475cdff6
--- /dev/null
+++ b/pjlib/src/pjlib-test/test.h
@@ -0,0 +1,90 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/test.h 4 10/29/05 10:28p Bennylp $ */
+#ifndef __PJLIB_TEST_H__
+#define __PJLIB_TEST_H__
+
+#include <pj/types.h>
+
+#define GROUP_LIBC 1
+#define GROUP_OS 1
+#define GROUP_DATA_STRUCTURE 1
+#define GROUP_NETWORK 1
+#define GROUP_EXTRA 1
+
+#define INCLUDE_ERRNO_TEST GROUP_LIBC
+#define INCLUDE_TIMESTAMP_TEST GROUP_OS
+#define INCLUDE_EXCEPTION_TEST GROUP_LIBC
+#define INCLUDE_RAND_TEST GROUP_LIBC
+#define INCLUDE_LIST_TEST GROUP_DATA_STRUCTURE
+#define INCLUDE_POOL_TEST GROUP_LIBC
+#define INCLUDE_POOL_PERF_TEST (PJ_HAS_MALLOC && GROUP_LIBC)
+#define INCLUDE_STRING_TEST GROUP_DATA_STRUCTURE
+#define INCLUDE_FIFOBUF_TEST GROUP_DATA_STRUCTURE
+#define INCLUDE_RBTREE_TEST GROUP_DATA_STRUCTURE
+#define INCLUDE_TIMER_TEST GROUP_DATA_STRUCTURE
+#define INCLUDE_ATOMIC_TEST GROUP_OS
+#define INCLUDE_MUTEX_TEST GROUP_OS
+#define INCLUDE_SLEEP_TEST GROUP_OS
+#define INCLUDE_THREAD_TEST GROUP_OS
+#define INCLUDE_SOCK_TEST GROUP_NETWORK
+#define INCLUDE_SOCK_PERF_TEST GROUP_NETWORK
+#define INCLUDE_SELECT_TEST GROUP_NETWORK
+#define INCLUDE_UDP_IOQUEUE_TEST GROUP_NETWORK
+#define INCLUDE_TCP_IOQUEUE_TEST GROUP_NETWORK
+#define INCLUDE_IOQUEUE_PERF_TEST GROUP_NETWORK
+#define INCLUDE_XML_TEST GROUP_EXTRA
+
+
+#define INCLUDE_ECHO_SERVER 0
+#define INCLUDE_ECHO_CLIENT 0
+
+#define ECHO_SERVER_MAX_THREADS 4
+#define ECHO_SERVER_START_PORT 65000
+#define ECHO_SERVER_ADDRESS "compaq.home"
+#define ECHO_SERVER_DURATION_MSEC (60*60*1000)
+
+#define ECHO_CLIENT_MAX_THREADS 10
+
+PJ_BEGIN_DECL
+
+extern int errno_test(void);
+extern int timestamp_test(void);
+extern int exception_test(void);
+extern int rand_test(void);
+extern int list_test(void);
+extern int pool_test(void);
+extern int pool_perf_test(void);
+extern int string_test(void);
+extern int fifobuf_test(void);
+extern int timer_test(void);
+extern int rbtree_test(void);
+extern int atomic_test(void);
+extern int mutex_test(void);
+extern int sleep_test(void);
+extern int thread_test(void);
+extern int sock_test(void);
+extern int sock_perf_test(void);
+extern int select_test(void);
+extern int udp_ioqueue_test(void);
+extern int tcp_ioqueue_test(void);
+extern int ioqueue_perf_test(void);
+extern int xml_test(void);
+
+extern int echo_server(void);
+extern int echo_client(int sock_type, const char *server, int port);
+
+extern pj_pool_factory *mem;
+
+extern int test_main(void);
+extern void app_perror(const char *msg, pj_status_t err);
+extern pj_status_t app_socket(int family, int type, int proto, int port,
+ pj_sock_t *ptr_sock);
+extern pj_status_t app_socketpair(int family, int type, int protocol,
+ pj_sock_t *server, pj_sock_t *client);
+
+//#define TRACE_(expr) PJ_LOG(3,expr)
+#define TRACE_(expr)
+
+PJ_END_DECL
+
+#endif /* __PJLIB_TEST_H__ */
+
diff --git a/pjlib/src/pjlib-test/thread.c b/pjlib/src/pjlib-test/thread.c
new file mode 100644
index 00000000..f41ec16e
--- /dev/null
+++ b/pjlib/src/pjlib-test/thread.c
@@ -0,0 +1,290 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/thread.c 4 10/29/05 11:51a Bennylp $
+ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/thread.c $
+ *
+ * 4 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 3 14/10/05 11:32 Bennylp
+ * More lenient with timeslice difference.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/11/05 12:39a Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+
+/**
+ * \page page_pjlib_thread_test Test: Thread Test
+ *
+ * This file contains \a thread_test() definition.
+ *
+ * \section thread_test_scope_sec Scope of Test
+ * This tests:
+ * - whether PJ_THREAD_SUSPENDED flag works.
+ * - whether multithreading works.
+ * - whether thread timeslicing works, and threads have equal
+ * time-slice proportion.
+ *
+ * APIs tested:
+ * - pj_thread_create()
+ * - pj_thread_register()
+ * - pj_thread_this()
+ * - pj_thread_get_name()
+ * - pj_thread_destroy()
+ * - pj_thread_resume()
+ * - pj_thread_sleep()
+ * - pj_thread_join()
+ * - pj_thread_destroy()
+ *
+ *
+ * This file is <b>pjlib-test/thread.c</b>
+ *
+ * \include pjlib-test/thread.c
+ */
+#if INCLUDE_THREAD_TEST
+
+#include <pjlib.h>
+
+#define THIS_FILE "thread_test"
+
+static int quit_flag=0;
+
+/*
+ * The thread's entry point.
+ *
+ * Each of the thread mainly will just execute the loop which
+ * increments a variable.
+ */
+static void* thread_proc(pj_uint32_t *pcounter)
+{
+ /* Test that pj_thread_register() works. */
+ pj_thread_desc desc;
+ pj_thread_t *this_thread;
+ pj_status_t rc;
+
+ rc = pj_thread_register("thread", desc, &this_thread);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error in pj_thread_register", rc);
+ return NULL;
+ }
+
+ /* Test that pj_thread_this() works */
+ this_thread = pj_thread_this();
+ if (this_thread == NULL) {
+ PJ_LOG(3,(THIS_FILE, "...error: pj_thread_this() returns NULL!"));
+ return NULL;
+ }
+
+ /* Test that pj_thread_get_name() works */
+ if (pj_thread_get_name(this_thread) == NULL) {
+ PJ_LOG(3,(THIS_FILE, "...error: pj_thread_get_name() returns NULL!"));
+ return NULL;
+ }
+
+ /* Main loop */
+ for (;!quit_flag;) {
+ (*pcounter)++;
+ //Must sleep if platform doesn't do time-slicing.
+ pj_thread_sleep(0);
+ }
+
+ return NULL;
+}
+
+/*
+ * simple_thread()
+ */
+static int simple_thread(const char *title, unsigned flags)
+{
+ pj_pool_t *pool;
+ pj_thread_t *thread;
+ pj_status_t rc;
+ pj_uint32_t counter = 0;
+
+ PJ_LOG(3,(THIS_FILE, "..%s", title));
+
+ pool = pj_pool_create(mem, NULL, 4000, 4000, NULL);
+ if (!pool)
+ return -1000;
+
+ quit_flag = 0;
+
+ rc = pj_thread_create(pool, "thread", (pj_thread_proc*)&thread_proc,
+ &counter,
+ PJ_THREAD_DEFAULT_STACK_SIZE,
+ flags,
+ &thread);
+
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: unable to create thread", rc);
+ return -1010;
+ }
+
+ if (flags & PJ_THREAD_SUSPENDED) {
+ rc = pj_thread_resume(thread);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: resume thread error", rc);
+ return -1020;
+ }
+ }
+
+ PJ_LOG(3,(THIS_FILE, "..waiting for thread to quit.."));
+
+ quit_flag = 1;
+ pj_thread_join(thread);
+
+ pj_pool_release(pool);
+
+ PJ_LOG(3,(THIS_FILE, "...%s success", title));
+ return PJ_SUCCESS;
+}
+
+
+/*
+ * timeslice_test()
+ */
+static int timeslice_test(void)
+{
+ enum { NUM_THREADS = 4 };
+ pj_pool_t *pool;
+ pj_uint32_t counter[NUM_THREADS], lowest, highest, diff;
+ pj_thread_t *thread[NUM_THREADS];
+ int i;
+ pj_status_t rc;
+
+ quit_flag = 0;
+
+ pool = pj_pool_create(mem, NULL, 4096, 0, NULL);
+ if (!pool)
+ return -10;
+
+ PJ_LOG(3,(THIS_FILE, "..timeslice testing with %d threads", NUM_THREADS));
+
+ /* Create all threads in suspended mode. */
+ for (i=0; i<NUM_THREADS; ++i) {
+ counter[i] = 0;
+ rc = pj_thread_create(pool, "thread", (pj_thread_proc*)&thread_proc,
+ &counter[i],
+ PJ_THREAD_DEFAULT_STACK_SIZE,
+ PJ_THREAD_SUSPENDED,
+ &thread[i]);
+ if (rc!=PJ_SUCCESS) {
+ app_perror("...ERROR in pj_thread_create()", rc);
+ return -20;
+ }
+ }
+
+ /* Sleep for 1 second.
+ * The purpose of this is to test whether all threads are suspended.
+ */
+ pj_thread_sleep(1000);
+
+ /* Check that all counters are still zero. */
+ for (i=0; i<NUM_THREADS; ++i) {
+ if (counter[i] != 0) {
+ PJ_LOG(3,(THIS_FILE, "....ERROR! Thread %d-th is not suspended!",
+ i));
+ return -30;
+ }
+ }
+
+ /* Now resume all threads. */
+ for (i=0; i<NUM_THREADS; ++i) {
+ rc = pj_thread_resume(thread[i]);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...ERROR in pj_thread_resume()", rc);
+ return -40;
+ }
+ }
+
+ /* Main thread sleeps for some time to allow threads to run.
+ * The longer we sleep, the more accurate the calculation will be,
+ * but it'll make user waits for longer for the test to finish.
+ */
+ pj_thread_sleep(5000);
+
+ /* Signal all threads to quit. */
+ quit_flag = 1;
+
+ /* Wait until all threads quit, then destroy. */
+ for (i=0; i<NUM_THREADS; ++i) {
+ rc = pj_thread_join(thread[i]);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...ERROR in pj_thread_join()", rc);
+ return -50;
+ }
+ rc = pj_thread_destroy(thread[i]);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...ERROR in pj_thread_destroy()", rc);
+ return -60;
+ }
+ }
+
+ /* Now examine the value of the counters.
+ * Check that all threads had equal proportion of processing.
+ */
+ lowest = 0xFFFFFFFF;
+ highest = 0;
+ for (i=0; i<NUM_THREADS; ++i) {
+ if (counter[i] < lowest)
+ lowest = counter[i];
+ if (counter[i] > highest)
+ highest = counter[i];
+ }
+
+ /* Check that all threads are running. */
+ if (lowest < 2) {
+ PJ_LOG(3,(THIS_FILE, "...ERROR: not all threads were running!"));
+ return -70;
+ }
+
+ /* The difference between lowest and higest should be lower than 50%.
+ */
+ diff = (highest-lowest)*100 / ((highest+lowest)/2);
+ if ( diff >= 50) {
+ PJ_LOG(3,(THIS_FILE, "...ERROR: thread didn't have equal timeslice!"));
+ PJ_LOG(3,(THIS_FILE, ".....lowest counter=%u, highest counter=%u, diff=%u%%",
+ lowest, highest, diff));
+ return -80;
+ } else {
+ PJ_LOG(3,(THIS_FILE,
+ "...info: timeslice diff between lowest & highest=%u%%",
+ diff));
+ }
+
+ return 0;
+}
+
+int thread_test(void)
+{
+ int rc;
+
+ rc = simple_thread("simple thread test", 0);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ rc = simple_thread("suspended thread test", PJ_THREAD_SUSPENDED);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ rc = timeslice_test();
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ return rc;
+}
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_thread_test;
+#endif /* INCLUDE_THREAD_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/timer.c b/pjlib/src/pjlib-test/timer.c
new file mode 100644
index 00000000..1aaa208d
--- /dev/null
+++ b/pjlib/src/pjlib-test/timer.c
@@ -0,0 +1,169 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/timer.c 3 10/29/05 10:23p Bennylp $ */
+#include "test.h"
+
+/**
+ * \page page_pjlib_timer_test Test: Timer
+ *
+ * This file provides implementation of \b timer_test(). It tests the
+ * functionality of the timer heap.
+ *
+ *
+ * This file is <b>pjlib-test/timer.c</b>
+ *
+ * \include pjlib-test/timer.c
+ */
+
+
+#if INCLUDE_TIMER_TEST
+
+#include <pjlib.h>
+
+#define LOOP 16
+#define MIN_COUNT 250
+#define MAX_COUNT (LOOP * MIN_COUNT)
+#define MIN_DELAY 2
+#define D (MAX_COUNT / 32000)
+#define DELAY (D < MIN_DELAY ? MIN_DELAY : D)
+#define THIS_FILE "timer_test"
+
+
+static void timer_callback(pj_timer_heap_t *ht, pj_timer_entry *e)
+{
+ PJ_UNUSED_ARG(ht);
+ PJ_UNUSED_ARG(e);
+}
+
+static int test_timer_heap(void)
+{
+ int i, j;
+ pj_timer_entry *entry;
+ pj_pool_t *pool;
+ pj_timer_heap_t *timer;
+ pj_time_val delay;
+ pj_status_t rc; int err=0;
+ unsigned size, count;
+
+ size = pj_timer_heap_mem_size(MAX_COUNT)+MAX_COUNT*sizeof(pj_timer_entry);
+ pool = pj_pool_create( mem, NULL, size, 4000, NULL);
+ if (!pool) {
+ PJ_LOG(3,("test", "...error: unable to create pool of %u bytes",
+ size));
+ return -10;
+ }
+
+ entry = (pj_timer_entry*)pj_pool_calloc(pool, MAX_COUNT, sizeof(*entry));
+ if (!entry)
+ return -20;
+
+ for (i=0; i<MAX_COUNT; ++i) {
+ entry[i].cb = &timer_callback;
+ }
+ rc = pj_timer_heap_create(pool, MAX_COUNT, 0, &timer);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...error: unable to create timer heap", rc);
+ return -30;
+ }
+
+ count = MIN_COUNT;
+ for (i=0; i<LOOP; ++i) {
+ int early = 0;
+ int done=0;
+ int cancelled=0;
+ int rc;
+ pj_timestamp t1, t2, t_sched, t_cancel, t_poll;
+ pj_time_val now, expire;
+
+ pj_gettimeofday(&now);
+ pj_srand(now.sec);
+ t_sched.u32.lo = t_cancel.u32.lo = t_poll.u32.lo = 0;
+
+ // Register timers
+ for (j=0; j<(int)count; ++j) {
+ delay.sec = pj_rand() % DELAY;
+ delay.msec = pj_rand() % 1000;
+
+ // Schedule timer
+ pj_get_timestamp(&t1);
+ rc = pj_timer_heap_schedule(timer, &entry[j], &delay);
+ if (rc != 0)
+ return -40;
+ pj_get_timestamp(&t2);
+
+ t_sched.u32.lo += (t2.u32.lo - t1.u32.lo);
+
+ // Poll timers.
+ pj_get_timestamp(&t1);
+ rc = pj_timer_heap_poll(timer, NULL);
+ pj_get_timestamp(&t2);
+ if (rc > 0) {
+ t_poll.u32.lo += (t2.u32.lo - t1.u32.lo);
+ early += rc;
+ }
+ }
+
+ // Set the time where all timers should finish
+ pj_gettimeofday(&expire);
+ delay.sec = DELAY;
+ delay.msec = 0;
+ PJ_TIME_VAL_ADD(expire, delay);
+
+ // Wait unfil all timers finish, cancel some of them.
+ do {
+ int index = pj_rand() % count;
+ pj_get_timestamp(&t1);
+ rc = pj_timer_heap_cancel(timer, &entry[index]);
+ pj_get_timestamp(&t2);
+ if (rc > 0) {
+ cancelled += rc;
+ t_cancel.u32.lo += (t2.u32.lo - t1.u32.lo);
+ }
+
+ pj_gettimeofday(&now);
+
+ pj_get_timestamp(&t1);
+ rc = pj_timer_heap_poll(timer, NULL);
+ pj_get_timestamp(&t2);
+ if (rc > 0) {
+ done += rc;
+ t_poll.u32.lo += (t2.u32.lo - t1.u32.lo);
+ }
+
+ } while (PJ_TIME_VAL_LTE(now, expire)&&pj_timer_heap_count(timer) > 0);
+
+ if (pj_timer_heap_count(timer)) {
+ PJ_LOG(3, (THIS_FILE, "ERROR: %d timers left",
+ pj_timer_heap_count(timer)));
+ ++err;
+ }
+ t_sched.u32.lo /= count;
+ t_cancel.u32.lo /= count;
+ t_poll.u32.lo /= count;
+ PJ_LOG(4, (THIS_FILE,
+ "...ok (count:%d, early:%d, cancelled:%d, "
+ "sched:%d, cancel:%d poll:%d)",
+ count, early, cancelled, t_sched.u32.lo, t_cancel.u32.lo,
+ t_poll.u32.lo));
+
+ count = count * 2;
+ if (count > MAX_COUNT)
+ break;
+ }
+
+ pj_pool_release(pool);
+ return err;
+}
+
+
+int timer_test()
+{
+ return test_timer_heap();
+}
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_timer_test;
+#endif /* INCLUDE_TIMER_TEST */
+
+
diff --git a/pjlib/src/pjlib-test/timestamp.c b/pjlib/src/pjlib-test/timestamp.c
new file mode 100644
index 00000000..3d4d9f8e
--- /dev/null
+++ b/pjlib/src/pjlib-test/timestamp.c
@@ -0,0 +1,140 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/timestamp.c 4 10/29/05 11:51a Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/timestamp.c $
+ *
+ * 4 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 3 14/10/05 11:32 Bennylp
+ * Longer test, to check if timestamp is running backwards.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/09/05 9:39p Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+#include <pj/os.h>
+#include <pj/log.h>
+
+
+/**
+ * \page page_pjlib_timestamp_test Test: Timestamp
+ *
+ * This file provides implementation of timestamp_test()
+ *
+ * \section timestamp_test_sec Scope of the Test
+ *
+ * This tests whether timestamp API works.
+ *
+ * API tested:
+ * - pj_get_timestamp_freq()
+ * - pj_get_timestamp()
+ * - pj_elapsed_usec()
+ * - PJ_LOG()
+ *
+ *
+ * This file is <b>pjlib-test/timestamp.c</b>
+ *
+ * \include pjlib-test/timestamp.c
+ */
+
+#if INCLUDE_TIMESTAMP_TEST
+
+#define THIS_FILE "timestamp"
+
+int timestamp_test(void)
+{
+ enum { CONSECUTIVE_LOOP = 1000 };
+ volatile unsigned i;
+ pj_timestamp freq, t1, t2;
+ unsigned elapsed;
+ pj_status_t rc;
+
+ PJ_LOG(3,(THIS_FILE, "...Testing timestamp (high res time)"));
+
+ /* Get and display timestamp frequency. */
+ if ((rc=pj_get_timestamp_freq(&freq)) != PJ_SUCCESS) {
+ app_perror("...ERROR: get timestamp freq", rc);
+ return -1000;
+ }
+
+ PJ_LOG(3,(THIS_FILE, "....frequency: hiword=%lu loword=%lu",
+ freq.u32.hi, freq.u32.lo));
+
+ PJ_LOG(3,(THIS_FILE, "...checking if time can run backwards (pls wait).."));
+
+ /*
+ * Check if consecutive readings should yield timestamp value
+ * that is bigger than previous value.
+ * First we get the first timestamp.
+ */
+ rc = pj_get_timestamp(&t1);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...ERROR: get timestamp", rc);
+ return -1001;
+ }
+ for (i=0; i<CONSECUTIVE_LOOP; ++i) {
+ /*
+ volatile unsigned j;
+ for (j=0; j<1000; ++j)
+ ;
+ */
+ pj_thread_sleep(1);
+ rc = pj_get_timestamp(&t2);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...ERROR: get timestamp", rc);
+ return -1002;
+ }
+ /* compare t2 with t1, expecting t2 >= t1. */
+ if (t2.u32.hi < t1.u32.hi ||
+ (t2.u32.hi == t1.u32.hi && t2.u32.lo < t1.u32.lo))
+ {
+ PJ_LOG(3,(THIS_FILE, "...ERROR: timestamp runs backwards!"));
+ return -1003;
+ }
+ }
+
+ /*
+ * Simple test to time some loop.
+ */
+ PJ_LOG(3,(THIS_FILE, "....testing simple 1000000 loop"));
+
+
+ /* Mark start time. */
+ if ((rc=pj_get_timestamp(&t1)) != PJ_SUCCESS) {
+ app_perror("....error: cat't get timestamp", rc);
+ return -1010;
+ }
+
+ /* Loop.. */
+ for (i=0; i<1000000; ++i)
+ ;
+
+ /* Mark end time. */
+ pj_get_timestamp(&t2);
+
+ /* Get elapsed time in usec. */
+ elapsed = pj_elapsed_usec(&t1, &t2);
+ PJ_LOG(3,(THIS_FILE, "....elapsed: %u usec", (unsigned)elapsed));
+
+ /* See if elapsed time is reasonable. */
+ if (elapsed < 1 || elapsed > 100000) {
+ PJ_LOG(3,(THIS_FILE, "....error: elapsed time outside window (%u)",
+ elapsed));
+ return -1030;
+ }
+ return 0;
+}
+
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_timestamp_test;
+#endif /* INCLUDE_TIMESTAMP_TEST */
+
diff --git a/pjlib/src/pjlib-test/udp_echo_srv_sync.c b/pjlib/src/pjlib-test/udp_echo_srv_sync.c
new file mode 100644
index 00000000..b513498b
--- /dev/null
+++ b/pjlib/src/pjlib-test/udp_echo_srv_sync.c
@@ -0,0 +1,168 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/udp_echo_srv_sync.c 2 29/10/05 21:34 Bennylp $ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/udp_echo_srv_sync.c $
+ *
+ * 2 29/10/05 21:34 Bennylp
+ * Tested on Win32
+ *
+ * 1 10/29/05 9:56a Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+#include <pjlib.h>
+
+static pj_sem_t *sem;
+static pj_mutex_t *mutex;
+static pj_size_t total_bw;
+
+static int worker_thread(void *arg)
+{
+ pj_sock_t sock = (pj_sock_t)arg;
+ char buf[1516];
+ pj_size_t received;
+ pj_time_val last_print;
+ pj_status_t last_recv_err = PJ_SUCCESS, last_write_err = PJ_SUCCESS;
+
+ received = 0;
+ pj_gettimeofday(&last_print);
+
+ for (;;) {
+ pj_ssize_t len;
+ pj_uint32_t delay_msec;
+ pj_time_val now;
+ pj_highprec_t bw;
+ pj_status_t rc;
+ pj_sockaddr_in addr;
+ int addrlen;
+
+ len = sizeof(buf);
+ addrlen = sizeof(addr);
+ rc = pj_sock_recvfrom(sock, buf, &len, 0, &addr, &addrlen);
+ if (rc != 0) {
+ if (rc != last_recv_err) {
+ app_perror("...recv error", rc);
+ last_recv_err = rc;
+ }
+ continue;
+ }
+
+ received += len;
+
+ rc = pj_sock_sendto(sock, buf, &len, 0, &addr, addrlen);
+ if (rc != PJ_SUCCESS) {
+ if (rc != last_write_err) {
+ app_perror("...send error", rc);
+ last_write_err = rc;
+ }
+ continue;
+ }
+
+ pj_gettimeofday(&now);
+ PJ_TIME_VAL_SUB(now, last_print);
+ delay_msec = PJ_TIME_VAL_MSEC(now);
+
+ if (delay_msec < 1000)
+ continue;
+
+ bw = received;
+ pj_highprec_mul(bw, 1000);
+ pj_highprec_div(bw, delay_msec);
+
+ pj_mutex_lock(mutex);
+ total_bw = total_bw + (pj_size_t)bw;
+ pj_mutex_unlock(mutex);
+
+ pj_gettimeofday(&last_print);
+ received = 0;
+ pj_sem_post(sem);
+ pj_thread_sleep(0);
+ }
+}
+
+
+int echo_srv_sync(void)
+{
+ pj_pool_t *pool;
+ pj_sock_t sock;
+ pj_thread_t *thread[ECHO_SERVER_MAX_THREADS];
+ pj_status_t rc;
+ pj_highprec_t abs_total;
+ unsigned count;
+ int i;
+
+ pool = pj_pool_create(mem, NULL, 4000, 4000, NULL);
+ if (!pool)
+ return -5;
+
+ rc = pj_sem_create(pool, NULL, 0, ECHO_SERVER_MAX_THREADS, &sem);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...unable to create semaphore", rc);
+ return -6;
+ }
+
+ rc = pj_mutex_create_simple(pool, NULL, &mutex);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...unable to create mutex", rc);
+ return -7;
+ }
+
+ rc = app_socket(PJ_AF_INET, PJ_SOCK_DGRAM, 0, ECHO_SERVER_START_PORT, &sock);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...socket error", rc);
+ return -10;
+ }
+
+ for (i=0; i<ECHO_SERVER_MAX_THREADS; ++i) {
+ rc = pj_thread_create(pool, NULL, &worker_thread, (void*)sock,
+ PJ_THREAD_DEFAULT_STACK_SIZE, 0,
+ &thread[i]);
+ if (rc != PJ_SUCCESS) {
+ app_perror("...unable to create thread", rc);
+ return -20;
+ }
+ }
+
+ PJ_LOG(3,("", "...UDP echo server running with %d threads at port %d",
+ ECHO_SERVER_MAX_THREADS, ECHO_SERVER_START_PORT));
+ PJ_LOG(3,("", "...Press Ctrl-C to abort"));
+
+ abs_total = 0;
+ count = 0;
+
+ for (;;) {
+ pj_uint32_t avg32;
+ pj_highprec_t avg;
+
+ for (i=0; i<ECHO_SERVER_MAX_THREADS; ++i)
+ pj_sem_wait(sem);
+
+ /* calculate average so far:
+ avg = abs_total / count;
+ */
+ count++;
+ abs_total += total_bw;
+ avg = abs_total;
+ pj_highprec_div(avg, count);
+ avg32 = (pj_uint32_t)avg;
+
+
+ PJ_LOG(3,("", "Synchronous UDP (%d threads): %u KB/s (avg=%u KB/s) %s",
+ ECHO_SERVER_MAX_THREADS,
+ total_bw / 1000,
+ avg32 / 1000,
+ (count==20 ? "<ses avg>" : "")));
+
+ total_bw = 0;
+
+ if (count==20) {
+ count = 0;
+ abs_total = 0;
+ }
+
+ while (pj_sem_trywait(sem) == PJ_SUCCESS)
+ ;
+ }
+}
+
+
diff --git a/pjlib/src/pjlib-test/util.c b/pjlib/src/pjlib-test/util.c
new file mode 100644
index 00000000..c698cff4
--- /dev/null
+++ b/pjlib/src/pjlib-test/util.c
@@ -0,0 +1,129 @@
+/* $Header: /pjproject-0.3/pjlib/src/pjlib-test/util.c 3 10/29/05 11:51a Bennylp $
+ */
+/*
+ * $Log: /pjproject-0.3/pjlib/src/pjlib-test/util.c $
+ *
+ * 3 10/29/05 11:51a Bennylp
+ * Version 0.3-pre2.
+ *
+ * 2 10/14/05 12:26a Bennylp
+ * Finished error code framework, some fixes in ioqueue, etc. Pretty
+ * major.
+ *
+ * 1 10/12/05 10:00a Bennylp
+ * Created.
+ *
+ */
+#include "test.h"
+#include <pjlib.h>
+
+void app_perror(const char *msg, pj_status_t rc)
+{
+ char errbuf[256];
+
+ PJ_CHECK_STACK();
+
+ pj_strerror(rc, errbuf, sizeof(errbuf));
+ PJ_LOG(1,("test", "%s: [pj_status_t=%d] %s", msg, rc, errbuf));
+}
+
+#define SERVER 0
+#define CLIENT 1
+
+pj_status_t app_socket(int family, int type, int proto, int port,
+ pj_sock_t *ptr_sock)
+{
+ pj_sockaddr_in addr;
+ pj_sock_t sock;
+ pj_status_t rc;
+
+ rc = pj_sock_socket(family, type, proto, &sock);
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ pj_memset(&addr, 0, sizeof(addr));
+ addr.sin_family = (pj_uint16_t)family;
+ addr.sin_port = (short)(port!=-1 ? pj_htons((pj_uint16_t)port) : 0);
+ rc = pj_sock_bind(sock, &addr, sizeof(addr));
+ if (rc != PJ_SUCCESS)
+ return rc;
+
+ if (type == PJ_SOCK_STREAM) {
+ rc = pj_sock_listen(sock, 5);
+ if (rc != PJ_SUCCESS)
+ return rc;
+ }
+
+ *ptr_sock = sock;
+ return PJ_SUCCESS;
+}
+
+pj_status_t app_socketpair(int family, int type, int protocol,
+ pj_sock_t *serverfd, pj_sock_t *clientfd)
+{
+ int i;
+ static unsigned short port = 11000;
+ pj_sockaddr_in addr;
+ pj_str_t s;
+ pj_status_t rc = 0;
+ pj_sock_t sock[2];
+
+ /* Create both sockets. */
+ for (i=0; i<2; ++i) {
+ rc = pj_sock_socket(family, type, protocol, &sock[i]);
+ if (rc != PJ_SUCCESS) {
+ if (i==1)
+ pj_sock_close(sock[0]);
+ return rc;
+ }
+ }
+
+ /* Retry bind */
+ pj_memset(&addr, 0, sizeof(addr));
+ addr.sin_family = PJ_AF_INET;
+ for (i=0; i<5; ++i) {
+ addr.sin_port = pj_htons(port++);
+ rc = pj_sock_bind(sock[SERVER], &addr, sizeof(addr));
+ if (rc == PJ_SUCCESS)
+ break;
+ }
+
+ if (rc != PJ_SUCCESS)
+ goto on_error;
+
+ /* For TCP, listen the socket. */
+ if (type == PJ_SOCK_STREAM) {
+ rc = pj_sock_listen(sock[SERVER], PJ_SOMAXCONN);
+ if (rc != PJ_SUCCESS)
+ goto on_error;
+ }
+
+ /* Connect client socket. */
+ addr.sin_addr = pj_inet_addr(pj_cstr(&s, "127.0.0.1"));
+ rc = pj_sock_connect(sock[CLIENT], &addr, sizeof(addr));
+ if (rc != PJ_SUCCESS)
+ goto on_error;
+
+ /* For TCP, must accept(), and get the new socket. */
+ if (type == PJ_SOCK_STREAM) {
+ pj_sock_t newserver;
+
+ rc = pj_sock_accept(sock[SERVER], &newserver, NULL, NULL);
+ if (rc != PJ_SUCCESS)
+ goto on_error;
+
+ /* Replace server socket with new socket. */
+ pj_sock_close(sock[SERVER]);
+ sock[SERVER] = newserver;
+ }
+
+ *serverfd = sock[SERVER];
+ *clientfd = sock[CLIENT];
+
+ return rc;
+
+on_error:
+ for (i=0; i<2; ++i)
+ pj_sock_close(sock[i]);
+ return rc;
+}
diff --git a/pjlib/src/pjlib-test/xml.c b/pjlib/src/pjlib-test/xml.c
new file mode 100644
index 00000000..9a7c0a1e
--- /dev/null
+++ b/pjlib/src/pjlib-test/xml.c
@@ -0,0 +1,127 @@
+#include "test.h"
+
+
+#if INCLUDE_XML_TEST
+
+#include <pj/xml.h>
+#include <pjlib.h>
+
+#define THIS_FILE "xml_test"
+
+static const char *xml_doc[] =
+{
+" <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
+" <p:pidf-full xmlns=\"urn:ietf:params:xml:ns:pidf\"\n"
+" xmlns:p=\"urn:ietf:params:xml:ns:pidf-diff\"\n"
+" xmlns:r=\"urn:ietf:params:xml:ns:pidf:rpid\"\n"
+" xmlns:c=\"urn:ietf:params:xml:ns:pidf:caps\"\n"
+" entity=\"pres:someone@example.com\"\n"
+" version=\"567\">\n"
+"\n"
+" <tuple id=\"sg89ae\">\n"
+" <status>\n"
+" <basic>open</basic>\n"
+" <r:relationship>assistant</r:relationship>\n"
+" </status>\n"
+" <c:servcaps>\n"
+" <c:audio>true</c:audio>\n"
+" <c:video>false</c:video>\n"
+" <c:message>true</c:message>\n"
+" </c:servcaps>\n"
+" <contact priority=\"0.8\">tel:09012345678</contact>\n"
+" </tuple>\n"
+"\n"
+" <tuple id=\"cg231jcr\">\n"
+" <status>\n"
+" <basic>open</basic>\n"
+" </status>\n"
+" <contact priority=\"1.0\">im:pep@example.com</contact>\n"
+" </tuple>\n"
+"\n"
+" <tuple id=\"r1230d\">\n"
+" <status>\n"
+" <basic>closed</basic>\n"
+" <r:activity>meeting</r:activity>\n"
+" </status>\n"
+" <r:homepage>http://example.com/~pep/</r:homepage>\n"
+" <r:icon>http://example.com/~pep/icon.gif</r:icon>\n"
+" <r:card>http://example.com/~pep/card.vcd</r:card>\n"
+" <contact priority=\"0.9\">sip:pep@example.com</contact>\n"
+" </tuple>\n"
+"\n"
+" <note xml:lang=\"en\">Full state presence document</note>\n"
+"\n"
+" <r:person>\n"
+" <r:status>\n"
+" <r:activities>\n"
+" <r:on-the-phone/>\n"
+" <r:busy/>\n"
+" </r:activities>\n"
+" </r:status>\n"
+" </r:person>\n"
+"\n"
+" <r:device id=\"urn:esn:600b40c7\">\n"
+" <r:status>\n"
+" <c:devcaps>\n"
+" <c:mobility>\n"
+" <c:supported>\n"
+" <c:mobile/>\n"
+" </c:supported>\n"
+" </c:mobility>\n"
+" </c:devcaps>\n"
+" </r:status>\n"
+" </r:device>\n"
+"\n"
+" </p:pidf-full>\n"
+}
+;
+
+static int xml_parse_print_test(const char *doc)
+{
+ pj_str_t msg;
+ pj_pool_t *pool;
+ pj_xml_node *root;
+ char *output;
+ int output_len;
+
+ pool = pj_pool_create(mem, "xml", 4096, 1024, NULL);
+ pj_strdup2(pool, &msg, doc);
+ root = pj_xml_parse(pool, msg.ptr, msg.slen);
+ if (!root) {
+ PJ_LOG(1, (THIS_FILE, " Error: unable to parse XML"));
+ return -10;
+ }
+
+ output = (char*)pj_pool_alloc(pool, msg.slen + 512);
+ pj_memset(output, 0, msg.slen+512);
+ output_len = pj_xml_print(root, output, msg.slen+512, PJ_TRUE);
+ if (output_len < 1) {
+ PJ_LOG(1, (THIS_FILE, " Error: buffer too small to print XML file"));
+ return -20;
+ }
+ output[output_len] = '\0';
+
+
+ pj_pool_release(pool);
+ return 0;
+}
+
+int xml_test()
+{
+ unsigned i;
+ for (i=0; i<sizeof(xml_doc)/sizeof(xml_doc[0]); ++i) {
+ int status;
+ if ((status=xml_parse_print_test(xml_doc[i])) != 0)
+ return status;
+ }
+ return 0;
+}
+
+#else
+/* To prevent warning about "translation unit is empty"
+ * when this test is disabled.
+ */
+int dummy_xml_test;
+#endif /* INCLUDE_XML_TEST */
+
+