This is an automated email from the git hooks/post-receive script.
firstyear pushed a commit to branch master
in repository nunc-stans.
commit 2df1fcbeb3b76e0fd84105e218768ba896bf24bb
Author: William Brown <firstyear(a)redhat.com>
Date: Thu Feb 16 13:53:34 2017 +1000
Ticket 80 - Decouple nunc-stans from NSPR
Bug Description: NSPR uses pthreads underneath so why don't we?
We also want to use the cleaner inttypes.h. Pthreads is just as
portable, and cleaner than NSPR
Fix Description: Replace NSPR in locations except for IO events.
This includes locks and threads. This has a benefit: We have
doubled the performance of nunc-stans. We previously saw ~1700
connections per second, this now exceeds ~3800 per second with
lock free, and ~1900 with a locked queue.
https://pagure.io/nunc-stans/issue/80
Author: wibrown
Review by: mreynolds (thanks!)
---
ns_event_fw.h | 23 ++--
ns_private.h | 2 +
ns_thrpool.c | 290 +++++++++++++++++++++++++--------------------
nunc-stans.h | 6 +-
tests/cmocka/stress_test.c | 91 +++++++-------
5 files changed, 226 insertions(+), 186 deletions(-)
diff --git a/ns_event_fw.h b/ns_event_fw.h
index 20f7f55..26181f2 100644
--- a/ns_event_fw.h
+++ b/ns_event_fw.h
@@ -34,6 +34,11 @@
#include "ns_private.h"
+/* For locks and cond var */
+#include <pthread.h>
+
+#include <pthread.h>
+
struct ns_event_fw_ctx_t;
struct ns_event_fw_fd_t;
struct ns_event_fw_time_t;
@@ -64,17 +69,19 @@ typedef struct ns_event_fw_sig_t ns_event_fw_sig_t;
* NEEDS_DELETE -> DELETED
*/
-#define NS_JOB_NEEDS_DELETE 2
-#define NS_JOB_DELETED 3
-#define NS_JOB_NEEDS_ARM 4
-#define NS_JOB_ARMED 5
-#define NS_JOB_RUNNING 6
-#define NS_JOB_WAITING 7
+typedef enum _ns_job_state {
+ NS_JOB_NEEDS_DELETE = 2,
+ NS_JOB_DELETED = 3,
+ NS_JOB_NEEDS_ARM = 4,
+ NS_JOB_ARMED = 5,
+ NS_JOB_RUNNING = 6,
+ NS_JOB_WAITING = 7,
+} ns_job_state_t;
/* this is our "kitchen sink" pblock/glue object that is the main
interface between the app/thread pool/event framework */
typedef struct ns_job_t {
- PRMonitor *monitor;
+ pthread_mutex_t *monitor;
struct ns_thrpool_t *tp;
ns_job_func_t func;
struct ns_job_data_t *data;
@@ -86,7 +93,7 @@ typedef struct ns_job_t {
ns_event_fw_time_t *ns_event_fw_time; /* event framework timer event object */
ns_event_fw_sig_t *ns_event_fw_sig; /* event framework signal event object */
ns_job_type_t output_job_type; /* info about event that triggered the callback */
- PRInt32 state; /* What state the job is currently in. */
+ ns_job_state_t state; /* What state the job is currently in. */
ns_event_fw_ctx_t *ns_event_fw_ctx;
void *(*alloc_event_context)(size_t size, struct ns_job_t *job);
void (*free_event_context)(void *ev_ctx, struct ns_job_t *job);
diff --git a/ns_private.h b/ns_private.h
index a7ed23b..bd31843 100644
--- a/ns_private.h
+++ b/ns_private.h
@@ -42,6 +42,8 @@
#include "prmon.h"
#include "nunc-stans.h"
+#include <inttypes.h>
+
/**
* Forward declaration of the thread struct - internal
*
diff --git a/ns_thrpool.c b/ns_thrpool.c
index 10e1233..cbcb03d 100644
--- a/ns_thrpool.c
+++ b/ns_thrpool.c
@@ -53,13 +53,14 @@
#endif
#include "nspr.h"
-#include "pratom.h"
#include "private/pprio.h"
#include "ns_event_fw.h"
/* SDS contains the lock free queue wrapper */
#include <sds.h>
+#include <assert.h>
+
/*
* Threadpool
@@ -67,22 +68,22 @@
struct ns_thrpool_t {
sds_lqueue *work_q;
sds_lqueue *event_q;
- PRInt32 shutdown;
- PRInt32 shutdown_event_loop;
- PRLock *work_q_lock;
- PRCondVar *work_q_cv;
+ int32_t shutdown;
+ int32_t shutdown_event_loop;
+ pthread_cond_t work_q_cv;
+ pthread_mutex_t work_q_lock;
sds_queue *thread_stack;
- PRThread *event_thread; /* the event loop thread */
+ pthread_t event_thread;
PRFileDesc *event_q_wakeup_pipe_read;
PRFileDesc *event_q_wakeup_pipe_write;
ns_job_t *event_q_wakeup_job;
ns_event_fw_t *ns_event_fw;
ns_event_fw_ctx_t *ns_event_fw_ctx;
- PRUint32 stacksize;
+ size_t stacksize;
};
struct ns_thread_t {
- PRThread *thr; /* the thread */
+ pthread_t thr; /* the thread */
struct ns_thrpool_t *tp; /* pointer back to thread pool */
};
@@ -161,18 +162,22 @@ os_free(void *ptr)
free(ptr);
}
-PRInt32
+int32_t
ns_thrpool_is_shutdown(struct ns_thrpool_t *tp)
{
/* We need to barrier this somehow? */
- return tp->shutdown;
+ int32_t result = 0;
+ __atomic_load(&(tp->shutdown), &result, __ATOMIC_SEQ_CST);
+ return result;
}
-PRInt32
+int32_t
ns_thrpool_is_event_shutdown(struct ns_thrpool_t *tp)
{
/* We need to barrier this somehow? */
- return tp->shutdown_event_loop;
+ int32_t result = 0;
+ __atomic_load(&(tp->shutdown_event_loop), &result, __ATOMIC_SEQ_CST);
+ return result;
}
@@ -190,7 +195,7 @@ job_queue_cleanup(void *arg) {
static void
internal_ns_job_done(ns_job_t *job)
{
- PR_EnterMonitor(job->monitor);
+ pthread_mutex_lock(job->monitor);
#ifdef DEBUG_FSM
ns_log(LOG_DEBUG, "internal_ns_job_done %x state %d moving to
NS_JOB_DELETED\n", job, job->state);
#endif
@@ -215,8 +220,9 @@ internal_ns_job_done(ns_job_t *job)
job->done_cb(job);
}
- PR_ExitMonitor(job->monitor);
- PR_DestroyMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
+ pthread_mutex_destroy(job->monitor);
+ ns_free(job->monitor);
ns_free(job);
}
@@ -225,7 +231,7 @@ internal_ns_job_done(ns_job_t *job)
static void
internal_ns_job_rearm(ns_job_t *job)
{
- PR_EnterMonitor(job->monitor);
+ pthread_mutex_lock(job->monitor);
PR_ASSERT(job->state == NS_JOB_NEEDS_ARM);
/* Don't think I need to check persistence here, it could be the first arm ...
*/
#ifdef DEBUG_FSM
@@ -235,13 +241,12 @@ internal_ns_job_rearm(ns_job_t *job)
if (NS_JOB_IS_IO(job->job_type) || NS_JOB_IS_TIMER(job->job_type) ||
NS_JOB_IS_SIGNAL(job->job_type)) {
event_q_notify(job);
- PR_ExitMonitor(job->monitor);
} else {
/* if this is a non event task, just queue it on the work q */
/* Prevents an un-necessary queue / dequeue to the event_q */
work_q_notify(job);
- PR_ExitMonitor(job->monitor);
}
+ pthread_mutex_unlock(job->monitor);
}
static void
@@ -255,7 +260,7 @@ work_job_execute(ns_job_t *job)
* DELETED! Crashes abound, you have been warned ...
*/
PR_ASSERT(job);
- PR_EnterMonitor(job->monitor);
+ pthread_mutex_lock(job->monitor);
#ifdef DEBUG_FSM
ns_log(LOG_DEBUG, "work_job_execute %x state %d moving to
NS_JOB_RUNNING\n", job, job->state);
#endif
@@ -279,7 +284,7 @@ work_job_execute(ns_job_t *job)
*/
if (NS_JOB_IS_IO(job->job_type) || NS_JOB_IS_SIGNAL(job->job_type)) {
job->state = NS_JOB_ARMED;
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
return;
} else {
job->state = NS_JOB_NEEDS_ARM;
@@ -291,7 +296,7 @@ work_job_execute(ns_job_t *job)
#ifdef DEBUG_FSM
ns_log(LOG_DEBUG, "work_job_execute %x state %d job func complete, sending
to job_done...\n", job, job->state);
#endif
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
internal_ns_job_done(job);
/* MUST NOT ACCESS JOB AGAIN.*/
} else if (job->state == NS_JOB_NEEDS_ARM) {
@@ -299,7 +304,7 @@ work_job_execute(ns_job_t *job)
ns_log(LOG_DEBUG, "work_job_execute %x state %d job func complete, sending
to rearm...\n", job, job->state);
#endif
/* Rearm the job! */
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
internal_ns_job_rearm(job);
} else {
#ifdef DEBUG_FSM
@@ -309,7 +314,7 @@ work_job_execute(ns_job_t *job)
PR_ASSERT(!NS_JOB_IS_PERSIST(job->job_type));
/* We are now idle, set waiting. */
job->state = NS_JOB_WAITING;
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
}
/* MUST NOT ACCESS JOB AGAIN */
}
@@ -317,16 +322,16 @@ work_job_execute(ns_job_t *job)
static void
work_q_wait(ns_thrpool_t *tp)
{
- PR_Lock(tp->work_q_lock);
- PR_WaitCondVar(tp->work_q_cv, PR_INTERVAL_NO_TIMEOUT);
- PR_Unlock(tp->work_q_lock);
+ pthread_mutex_lock(&(tp->work_q_lock));
+ pthread_cond_wait(&(tp->work_q_cv), &(tp->work_q_lock));
+ pthread_mutex_unlock(&(tp->work_q_lock));
}
static void
work_q_notify(ns_job_t *job)
{
PR_ASSERT(job);
- PR_EnterMonitor(job->monitor);
+ pthread_mutex_lock(job->monitor);
#ifdef DEBUG_FSM
ns_log(LOG_DEBUG, "work_q_notify %x state %d\n", job, job->state);
#endif
@@ -334,23 +339,23 @@ work_q_notify(ns_job_t *job)
if (job->state != NS_JOB_ARMED) {
/* Maybe we should return some error here? */
ns_log(LOG_ERR, "work_q_notify %x state %d is not ARMED, cannot
queue!\n", job, job->state);
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
return;
}
/* MUST NOT ACCESS job after enqueue. So we stash tp.*/
ns_thrpool_t *ltp = job->tp;
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
sds_lqueue_enqueue(ltp->work_q, (void *)job);
- PR_Lock(ltp->work_q_lock);
- PR_NotifyCondVar(ltp->work_q_cv);
- PR_Unlock(ltp->work_q_lock);
+ pthread_mutex_lock(&(ltp->work_q_lock));
+ pthread_cond_signal(&(ltp->work_q_cv));
+ pthread_mutex_unlock(&(ltp->work_q_lock));
PR_Sleep(PR_INTERVAL_NO_WAIT); /* yield to allow worker thread to pick up job */
}
/*
* worker thread function
*/
-static void
+static void *
worker_thread_func(void *arg)
{
ns_thread_t *thr = (ns_thread_t *)arg;
@@ -377,6 +382,7 @@ worker_thread_func(void *arg)
}
/* With sds, it cleans the thread on join automatically. */
+ return NULL;
}
/*
@@ -386,13 +392,13 @@ static void
update_event(ns_job_t *job)
{
PR_ASSERT(job);
- PR_EnterMonitor(job->monitor);
+ pthread_mutex_lock(job->monitor);
#ifdef DEBUG_FSM
ns_log(LOG_DEBUG, "update_event %x state %d\n", job, job->state);
#endif
PR_ASSERT(job->state == NS_JOB_NEEDS_DELETE || job->state == NS_JOB_ARMED);
if (job->state == NS_JOB_NEEDS_DELETE) {
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
internal_ns_job_done(job);
return;
} else if (NS_JOB_IS_IO(job->job_type) || job->ns_event_fw_fd) {
@@ -401,7 +407,7 @@ update_event(ns_job_t *job)
} else {
job->tp->ns_event_fw->ns_event_fw_mod_io(job->tp->ns_event_fw_ctx, job);
}
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
/* We need these returns to prevent a race on the next else if condition when we
release job->monitor */
return;
} else if (NS_JOB_IS_TIMER(job->job_type) || job->ns_event_fw_time) {
@@ -410,7 +416,7 @@ update_event(ns_job_t *job)
} else {
job->tp->ns_event_fw->ns_event_fw_mod_timer(job->tp->ns_event_fw_ctx,
job);
}
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
return;
} else if (NS_JOB_IS_SIGNAL(job->job_type) || job->ns_event_fw_sig) {
if (!job->ns_event_fw_sig) {
@@ -418,15 +424,15 @@ update_event(ns_job_t *job)
} else {
job->tp->ns_event_fw->ns_event_fw_mod_signal(job->tp->ns_event_fw_ctx,
job);
}
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
return;
} else {
/* It's a "run now" job. */
if (NS_JOB_IS_THREAD(job->job_type)) {
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
work_q_notify(job);
} else {
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
event_q_notify(job);
}
}
@@ -446,7 +452,14 @@ event_q_wait(ns_thrpool_t *tp __attribute__((unused)))
static void
event_q_wake(ns_thrpool_t *tp)
{
- PRInt32 len;
+ int32_t len;
+
+ /* Rather than trying to make anew event, tell the event loop to exit with no
+ * events.
+ */
+#ifdef DEBUG_FSM
+ ns_log(LOG_DEBUG, "event_q_wake attempting to wake event queue.\n");
+#endif
/* NSPR I/O doesn't allow non-blocking signal pipes, so use write instead of
PR_Write */
len = write(PR_FileDesc2NativeHandle(tp->event_q_wakeup_pipe_write),
@@ -461,6 +474,9 @@ event_q_wake(ns_thrpool_t *tp)
}
}
PR_Sleep(PR_INTERVAL_NO_WAIT); /* yield to allow event thread to pick up event */
+#ifdef DEBUG_FSM
+ ns_log(LOG_DEBUG, "event_q_wake result. 0\n");
+#endif
}
static void
@@ -470,7 +486,7 @@ event_q_notify(ns_job_t *job)
/* if we are being called from a thread other than the
event loop thread, we have to notify that thread to
perform the event work */
- if (PR_GetCurrentThread() == tp->event_thread) {
+ if (pthread_equal(tp->event_thread, pthread_self()) != 0) {
/* If we are being run from the same thread as the event
loop thread, we can just update the event here */
update_event(job);
@@ -520,26 +536,30 @@ get_new_event_requests(ns_thrpool_t *tp)
}
}
-static void
+static void *
event_loop_thread_func(void *arg)
{
struct ns_thrpool_t *tp = (struct ns_thrpool_t *)arg;
+ int rc;
sds_lqueue_tprep(tp->event_q);
while (!ns_thrpool_is_event_shutdown(tp)) {
- int rc;
/* get new event requests */
get_new_event_requests(tp);
/* process events */
/* return 1 - no events ; 0 - normal exit ; -1 - error */
rc = tp->ns_event_fw->ns_event_fw_loop(tp->ns_event_fw_ctx);
+#ifdef DEBUG_FSM
+ ns_log(LOG_DEBUG, "event_loop_thread_func woke event queue. rc=%d\n",
rc);
+#endif
if (rc == -1) { /* error */
} else if (rc == 0) { /* exiting */
} else { /* no events to process */
event_q_wait(tp);
}
}
+ return NULL;
}
/*
@@ -564,14 +584,14 @@ event_cb(ns_job_t *job)
*/
/* There is no guarantee this won't be called once we start to enter the
shutdown, especially with timers .... */
- PR_EnterMonitor(job->monitor);
+ pthread_mutex_lock(job->monitor);
PR_ASSERT(job->state == NS_JOB_ARMED || job->state == NS_JOB_NEEDS_DELETE);
if (job->state == NS_JOB_ARMED && NS_JOB_IS_THREAD(job->job_type)) {
#ifdef DEBUG_FSM
ns_log(LOG_DEBUG, "event_cb %x state %d threaded, send to work_q\n",
job, job->state);
#endif
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
work_q_notify(job);
} else if (job->state == NS_JOB_NEEDS_DELETE) {
#ifdef DEBUG_FSM
@@ -582,14 +602,14 @@ event_cb(ns_job_t *job)
* It's here because it's been QUEUED for deletion and *may* be coming
* from the thrpool destroy thread!
*/
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
} else {
#ifdef DEBUG_FSM
ns_log(LOG_DEBUG, "event_cb %x state %d non-threaded, execute right
meow\n", job, job->state);
#endif
/* Not threaded, execute now! */
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
work_job_execute(job);
/* MUST NOT ACCESS JOB FROM THIS POINT */
}
@@ -598,7 +618,7 @@ event_cb(ns_job_t *job)
static void
wakeup_cb(ns_job_t *job)
{
- PRInt32 len;
+ int32_t len;
char buf[1];
#ifdef DEBUG_FSM
@@ -644,7 +664,14 @@ static ns_job_t *
new_ns_job(ns_thrpool_t *tp, PRFileDesc *fd, ns_job_type_t job_type, ns_job_func_t func,
struct ns_job_data_t *data)
{
ns_job_t *job = ns_calloc(1, sizeof(ns_job_t));
- job->monitor = PR_NewMonitor();
+ job->monitor = ns_calloc(1, sizeof(pthread_mutex_t));
+
+ pthread_mutexattr_t *monitor_attr = ns_calloc(1, sizeof(pthread_mutexattr_t));
+ pthread_mutexattr_init(monitor_attr);
+ pthread_mutexattr_settype(monitor_attr, PTHREAD_MUTEX_RECURSIVE);
+ assert(pthread_mutex_init(job->monitor, monitor_attr) == 0);
+ ns_free(monitor_attr);
+
job->tp = tp;
/* We have to have this due to our obsession of hiding struct contents ... */
/* It's only used in tevent anyway .... */
@@ -701,22 +728,26 @@ ns_job_done(ns_job_t *job)
return PR_FAILURE;
}
- PR_EnterMonitor(job->monitor);
+ /* Get the shutdown state ONCE at the start, atomically */
+ int32_t shutdown_state = ns_thrpool_is_shutdown(job->tp);
+
+ pthread_mutex_lock(job->monitor);
+
if (job->state == NS_JOB_NEEDS_DELETE || job->state == NS_JOB_DELETED){
/* Just return if the job has been marked for deletion */
#ifdef DEBUG_FSM
- ns_log(LOG_DEBUG, "ns_job_done %x tp shutdown -> %x state %d return
early\n", job, job->tp->shutdown, job->state);
+ ns_log(LOG_DEBUG, "ns_job_done %x tp shutdown -> %x state %d return
early\n", job, shutdown_state, job->state);
#endif
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
return PR_SUCCESS;
}
/* Do not allow an armed job to be removed UNLESS the server is shutting down */
- if (job->state == NS_JOB_ARMED && !ns_thrpool_is_shutdown(job->tp)) {
+ if (job->state == NS_JOB_ARMED && !shutdown_state) {
#ifdef DEBUG_FSM
ns_log(LOG_DEBUG, "ns_job_done %x tp shutdown -> false state %d failed to
mark as done\n", job, job->state);
#endif
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
return PR_FAILURE;
}
@@ -727,13 +758,13 @@ ns_job_done(ns_job_t *job)
ns_log(LOG_DEBUG, "ns_job_done %x tp shutdown -> false state %d setting
to async NS_JOB_NEEDS_DELETE\n", job, job->state);
#endif
job->state = NS_JOB_NEEDS_DELETE;
- PR_ExitMonitor(job->monitor);
- } else if (!ns_thrpool_is_shutdown(job->tp)) {
+ pthread_mutex_unlock(job->monitor);
+ } else if (!shutdown_state) {
#ifdef DEBUG_FSM
ns_log(LOG_DEBUG, "ns_job_done %x tp shutdown -> false state %d setting
NS_JOB_NEEDS_DELETE and queuing\n", job, job->state);
#endif
job->state = NS_JOB_NEEDS_DELETE;
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
event_q_notify(job);
} else {
#ifdef DEBUG_FSM
@@ -741,7 +772,7 @@ ns_job_done(ns_job_t *job)
#endif
job->state = NS_JOB_NEEDS_DELETE;
/* We are shutting down, just remove it! */
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
internal_ns_job_done(job);
}
return PR_SUCCESS;
@@ -988,13 +1019,13 @@ void *
ns_job_get_data(ns_job_t *job)
{
PR_ASSERT(job);
- PR_EnterMonitor(job->monitor);
+ pthread_mutex_lock(job->monitor);
PR_ASSERT(job->state != NS_JOB_DELETED);
if (job->state != NS_JOB_DELETED) {
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
return job->data;
} else {
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
return NULL;
}
}
@@ -1003,14 +1034,14 @@ PRStatus
ns_job_set_data(ns_job_t *job, void *data)
{
PR_ASSERT(job);
- PR_EnterMonitor(job->monitor);
+ pthread_mutex_lock(job->monitor);
PR_ASSERT(job->state == NS_JOB_WAITING || job->state == NS_JOB_RUNNING);
if (job->state == NS_JOB_WAITING || job->state == NS_JOB_RUNNING ) {
job->data = data;
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
return PR_SUCCESS;
} else {
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
return PR_FAILURE;
}
}
@@ -1019,13 +1050,13 @@ ns_thrpool_t *
ns_job_get_tp(ns_job_t *job)
{
PR_ASSERT(job);
- PR_EnterMonitor(job->monitor);
+ pthread_mutex_lock(job->monitor);
PR_ASSERT(job->state != NS_JOB_DELETED);
if (job->state != NS_JOB_DELETED) {
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
return job->tp;
} else {
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
return NULL;
}
}
@@ -1034,13 +1065,13 @@ ns_job_type_t
ns_job_get_output_type(ns_job_t *job)
{
PR_ASSERT(job);
- PR_EnterMonitor(job->monitor);
+ pthread_mutex_lock(job->monitor);
PR_ASSERT(job->state == NS_JOB_RUNNING);
if (job->state == NS_JOB_RUNNING) {
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
return job->output_job_type;
} else {
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
return 0;
}
}
@@ -1049,13 +1080,13 @@ ns_job_type_t
ns_job_get_type(ns_job_t *job)
{
PR_ASSERT(job);
- PR_EnterMonitor(job->monitor);
+ pthread_mutex_lock(job->monitor);
PR_ASSERT(job->state != NS_JOB_DELETED);
if (job->state != NS_JOB_DELETED) {
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
return job->job_type;
} else {
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
return 0;
}
}
@@ -1064,13 +1095,13 @@ PRFileDesc *
ns_job_get_fd(ns_job_t *job)
{
PR_ASSERT(job);
- PR_EnterMonitor(job->monitor);
+ pthread_mutex_lock(job->monitor);
PR_ASSERT(job->state != NS_JOB_DELETED);
if (job->state != NS_JOB_DELETED) {
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
return job->fd;
} else {
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
return NULL;
}
}
@@ -1079,14 +1110,14 @@ PRStatus
ns_job_set_done_cb(struct ns_job_t *job, ns_job_func_t func)
{
PR_ASSERT(job);
- PR_EnterMonitor(job->monitor);
+ pthread_mutex_lock(job->monitor);
PR_ASSERT(job->state == NS_JOB_WAITING || job->state == NS_JOB_RUNNING );
if (job->state == NS_JOB_WAITING || job->state == NS_JOB_RUNNING ) {
job->done_cb = func;
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
return PR_SUCCESS;
} else {
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
return PR_FAILURE;
}
}
@@ -1100,7 +1131,7 @@ PRStatus
ns_job_rearm(ns_job_t *job)
{
PR_ASSERT(job);
- PR_EnterMonitor(job->monitor);
+ pthread_mutex_lock(job->monitor);
PR_ASSERT(job->state == NS_JOB_WAITING || job->state == NS_JOB_RUNNING);
if (job->state == NS_JOB_WAITING) {
#ifdef DEBUG_FSM
@@ -1108,7 +1139,7 @@ ns_job_rearm(ns_job_t *job)
#endif
job->state = NS_JOB_NEEDS_ARM;
internal_ns_job_rearm(job);
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
return PR_SUCCESS;
} else if ( !NS_JOB_IS_PERSIST(job->job_type) && job->state ==
NS_JOB_RUNNING) {
/* For this to be called, and NS_JOB_RUNNING, we *must* be the callback thread!
*/
@@ -1117,10 +1148,10 @@ ns_job_rearm(ns_job_t *job)
ns_log(LOG_DEBUG, "ns_rearm_job %x state %d setting
NS_JOB_NEEDS_ARM\n", job, job->state);
#endif
job->state = NS_JOB_NEEDS_ARM;
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
return PR_SUCCESS;
} else {
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
return PR_FAILURE;
}
/* Unreachable code .... */
@@ -1157,20 +1188,17 @@ static void
setup_event_q_wakeup(ns_thrpool_t *tp)
{
ns_job_t *job;
- PR_CreatePipe(&tp->event_q_wakeup_pipe_read,
- &tp->event_q_wakeup_pipe_write);
+ PR_CreatePipe(&tp->event_q_wakeup_pipe_read,
&tp->event_q_wakeup_pipe_write);
/* setting options is not supported on NSPR pipes - use fcntl
PRSocketOptionData prsod = {PR_SockOpt_Nonblocking, {PR_TRUE}};
PR_SetSocketOption(tp->event_q_wakeup_pipe_read, &prsod);
PR_SetSocketOption(tp->event_q_wakeup_pipe_write, &prsod);
*/
- if (fcntl(PR_FileDesc2NativeHandle(tp->event_q_wakeup_pipe_read),
- F_SETFD, O_NONBLOCK) == -1) {
+ if (fcntl(PR_FileDesc2NativeHandle(tp->event_q_wakeup_pipe_read), F_SETFD,
O_NONBLOCK) == -1) {
ns_log(LOG_ERR, "setup_event_q_wakeup(): could not make read pipe
non-blocking: %d\n",
PR_GetOSError());
}
- if (fcntl(PR_FileDesc2NativeHandle(tp->event_q_wakeup_pipe_write),
- F_SETFD, O_NONBLOCK) == -1) {
+ if (fcntl(PR_FileDesc2NativeHandle(tp->event_q_wakeup_pipe_write), F_SETFD,
O_NONBLOCK) == -1) {
ns_log(LOG_ERR, "setup_event_q_wakeup(): could not make write pipe
non-blocking: %d\n",
PR_GetOSError());
}
@@ -1179,7 +1207,7 @@ setup_event_q_wakeup(ns_thrpool_t *tp)
NS_JOB_READ|NS_JOB_PERSIST|NS_JOB_PRESERVE_FD,
wakeup_cb, NULL);
- PR_EnterMonitor(job->monitor);
+ pthread_mutex_lock(job->monitor);
/* The event_queue wakeup is ready, arm it. */
#ifdef DEBUG_FSM
@@ -1192,7 +1220,7 @@ setup_event_q_wakeup(ns_thrpool_t *tp)
/* Stash the wakeup job in tp so we can release it later. */
tp->event_q_wakeup_job = job;
- PR_ExitMonitor(job->monitor);
+ pthread_mutex_unlock(job->monitor);
}
/* Initialize the thrpool config */
@@ -1288,10 +1316,10 @@ ns_thrpool_process_config(struct ns_thrpool_config *tp_config)
ns_thrpool_t *
ns_thrpool_new(struct ns_thrpool_config *tp_config)
{
+ pthread_attr_t attr;
ns_thrpool_t *tp = NULL;
ns_thread_t *thr;
- PRThread *event_thr;
- int ii;
+ size_t ii;
if(ns_thrpool_process_config(tp_config) == -1){
ns_log(LOG_ERR, "ns_thrpool_new(): config has not been properly
initialized\n");
@@ -1314,10 +1342,10 @@ ns_thrpool_new(struct ns_thrpool_config *tp_config)
goto failed;
}
- if (!(tp->work_q_lock = PR_NewLock())) {
+ if (pthread_mutex_init(&(tp->work_q_lock), NULL) != 0) {
goto failed;
}
- if (!(tp->work_q_cv = PR_NewCondVar(tp->work_q_lock))) {
+ if (pthread_cond_init(&(tp->work_q_cv), NULL) != 0) {
goto failed;
}
@@ -1335,24 +1363,29 @@ ns_thrpool_new(struct ns_thrpool_config *tp_config)
setup_event_q_wakeup(tp);
+ /* Create the thread attributes. */
+ if (pthread_attr_init(&attr) != 0) {
+ goto failed;
+ }
+ /* Setup the stack size. */
+ if (tp_config->stacksize > 0) {
+ if (pthread_attr_setstacksize(&attr, tp_config->stacksize) != 0) {
+ goto failed;
+ }
+ }
+
for (ii = 0; ii < tp_config->max_threads; ++ii) {
thr = ns_calloc(1, sizeof(ns_thread_t));
PR_ASSERT(thr);
thr->tp = tp;
- thr->thr = PR_CreateThread(PR_USER_THREAD, worker_thread_func,
- thr, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
- PR_JOINABLE_THREAD, tp_config->stacksize);
- PR_ASSERT(thr->thr);
+ assert(pthread_create(&(thr->thr), &attr, &worker_thread_func,
thr) == 0);
sds_queue_enqueue(tp->thread_stack, thr);
}
- event_thr = PR_CreateThread(PR_USER_THREAD, event_loop_thread_func,
- tp, PR_PRIORITY_HIGH, PR_GLOBAL_THREAD,
- PR_JOINABLE_THREAD, tp_config->stacksize);
- PR_ASSERT(event_thr);
+ assert(pthread_create(&(tp->event_thread), &attr,
&event_loop_thread_func, tp) == 0);
/* We keep the event thread separate from the stack of worker threads. */
- tp->event_thread = event_thr;
+ // tp->event_thread = event_thr;
return tp;
failed:
@@ -1363,12 +1396,13 @@ failed:
void
ns_thrpool_destroy(struct ns_thrpool_t *tp)
{
+ void *retval = NULL;
#ifdef DEBUG_FSM
ns_log(LOG_DEBUG, "ns_thrpool_destroy\n");
#endif
if (tp) {
/* Set the flag to shutdown the event loop. */
- PR_AtomicSet(&tp->shutdown_event_loop, 1);
+ __atomic_add_fetch(&(tp->shutdown_event_loop), 1, __ATOMIC_SEQ_CST);
/* Finish the event queue wakeup job. This has the
* side effect of waking up the event loop thread, which
@@ -1378,21 +1412,24 @@ ns_thrpool_destroy(struct ns_thrpool_t *tp)
* and use it to wake up the event loop.
*/
- PR_EnterMonitor(tp->event_q_wakeup_job->monitor);
+ pthread_mutex_lock(tp->event_q_wakeup_job->monitor);
- tp->event_q_wakeup_job->job_type |= NS_JOB_THREAD;
+ // tp->event_q_wakeup_job->job_type |= NS_JOB_THREAD;
/* This triggers the job to "run", which will cause a shutdown cascade
*/
#ifdef DEBUG_FSM
ns_log(LOG_DEBUG, "ns_thrpool_destroy %x state %d moving to
NS_JOB_NEEDS_DELETE\n", tp->event_q_wakeup_job,
tp->event_q_wakeup_job->state);
#endif
tp->event_q_wakeup_job->state = NS_JOB_NEEDS_DELETE;
- PR_ExitMonitor(tp->event_q_wakeup_job->monitor);
+ pthread_mutex_unlock(tp->event_q_wakeup_job->monitor);
/* Has to be event_q_notify, not internal_job_done */
event_q_notify(tp->event_q_wakeup_job);
/* Wait for the event thread to finish before we free the
* internals of tp. */
- PR_JoinThread(tp->event_thread);
+ int32_t rc = pthread_join(tp->event_thread, &retval);
+ if (rc != 0) {
+ ns_log(LOG_DEBUG, "Failed to join event thread %d\n", rc);
+ }
if (tp->work_q) {
sds_lqueue_destroy(tp->work_q);
@@ -1402,17 +1439,10 @@ ns_thrpool_destroy(struct ns_thrpool_t *tp)
sds_queue_destroy(tp->thread_stack);
}
- /* Free the work queue lock. */
- if (tp->work_q_lock) {
- PR_DestroyLock(tp->work_q_lock);
- tp->work_q_lock = NULL;
- }
-
/* Free the work queue condition variable. */
- if (tp->work_q_cv) {
- PR_DestroyCondVar(tp->work_q_cv);
- tp->work_q_cv = NULL;
- }
+ pthread_cond_destroy(&(tp->work_q_cv));
+ /* Free the work queue lock. */
+ pthread_mutex_destroy(&(tp->work_q_lock));
if (tp->event_q) {
sds_lqueue_destroy(tp->event_q);
@@ -1455,14 +1485,18 @@ ns_thrpool_shutdown(struct ns_thrpool_t *tp)
#ifdef DEBUG_FSM
ns_log(LOG_DEBUG, "ns_thrpool_shutdown initiated ...\n");
#endif
+ if (ns_thrpool_is_shutdown(tp) != 0) {
+ /* Already done! */
+ return;
+ }
/* Set the shutdown flag. This will cause the worker
* threads to exit after they finish all remaining work. */
- PR_AtomicSet(&tp->shutdown, 1);
+ __atomic_add_fetch(&(tp->shutdown), 1, __ATOMIC_SEQ_CST);
/* Wake up the idle worker threads so they can exit. */
- PR_Lock(tp->work_q_lock);
- PR_NotifyAllCondVar(tp->work_q_cv);
- PR_Unlock(tp->work_q_lock);
+ pthread_mutex_lock(&(tp->work_q_lock));
+ pthread_cond_broadcast(&(tp->work_q_cv));
+ pthread_mutex_unlock(&(tp->work_q_lock));
}
PRStatus
@@ -1476,14 +1510,14 @@ ns_thrpool_wait(ns_thrpool_t *tp)
while (sds_queue_dequeue(tp->thread_stack, (void **)&thr) == SDS_SUCCESS)
{
- PRStatus rc = PR_JoinThread(thr->thr);
+ void *retval = NULL;
+ int32_t rc = pthread_join(thr->thr, &retval);
#ifdef DEBUG_FSM
ns_log(LOG_DEBUG, "ns_thrpool_wait joined thread, result %d\n", rc);
#endif
- if (rc != PR_SUCCESS) {
+ if (rc != 0) {
/* NGK TODO - this is unused right now. */
- PRErrorCode prerr = PR_GetError();
- ns_log(LOG_ERR, "ns_thrpool_wait, failed to join thread %d",
prerr);
+ ns_log(LOG_ERR, "ns_thrpool_wait, failed to join thread %d", rc);
}
ns_free(thr);
}
diff --git a/nunc-stans.h b/nunc-stans.h
index 8b36729..fd510fc 100644
--- a/nunc-stans.h
+++ b/nunc-stans.h
@@ -335,10 +335,10 @@ typedef unsigned short ns_job_type_t;
*/
struct ns_thrpool_config {
/** \cond */
- int init_flag;
+ int32_t init_flag;
/** \endcond */
- PRInt32 max_threads; /**< Do not grow the thread pool greater than this size */
- PRUint32 stacksize; /**< Thread stack size */
+ size_t max_threads; /**< Do not grow the thread pool greater than this size */
+ size_t stacksize; /**< Thread stack size */
/* pluggable logging functions */
void (*log_fct)(int, const char *, va_list); /**< Provide a function that works
like vsyslog */
diff --git a/tests/cmocka/stress_test.c b/tests/cmocka/stress_test.c
index 979abe8..bc7bf0a 100644
--- a/tests/cmocka/stress_test.c
+++ b/tests/cmocka/stress_test.c
@@ -45,9 +45,11 @@
#include <setjmp.h>
#include <cmocka.h>
+/*
#include <nspr.h>
#include <plstr.h>
#include <prlog.h>
+*/
#include "nunc-stans.h"
@@ -61,6 +63,8 @@
#include <time.h>
#include <sys/time.h>
+#include <assert.h>
+
struct conn_ctx {
size_t offset; /* current offset into buffer for reading or writing */
size_t len; /* size of buffer */
@@ -69,24 +73,22 @@ struct conn_ctx {
size_t cl; /* http content-length when reading */
#define CONN_BUFFER_SIZE BUFSIZ /* default buffer size */
char *buffer;
- PRBool need_sec_layer_before_next_read;
- struct ns_sec_ctx_t *sc;
};
static FILE *logfp;
void do_logging(int, const char*, ...);
-PRInt32 client_success_count = 0;
-PRInt32 server_success_count = 0;
-PRInt32 client_fail_count = 0;
-PRInt32 client_timeout_count = 0;
-PRInt32 server_fail_count = 0;
-PRInt32 job_count = 0;
-PRInt32 client_thread_count = 80;
-PRInt32 server_thread_count = 20;
-PRInt32 jobs = 200;
-PRInt32 test_timeout = 70;
+int32_t client_success_count = 0;
+int32_t server_success_count = 0;
+int32_t client_fail_count = 0;
+int32_t client_timeout_count = 0;
+int32_t server_fail_count = 0;
+int32_t job_count = 0;
+int32_t client_thread_count = 80;
+int32_t server_thread_count = 20;
+int32_t jobs = 200;
+int32_t test_timeout = 70;
#define PR_WOULD_BLOCK(iii) (iii == PR_PENDING_INTERRUPT_ERROR) || (iii ==
PR_WOULD_BLOCK_ERROR)
@@ -123,15 +125,7 @@ do_logging(int level, const char *format, ...)
static struct conn_ctx *
conn_ctx_new( void )
{
- struct conn_ctx *connctx = PR_NEW(struct conn_ctx);
- connctx->offset = 0;
- connctx->len = 0;
- connctx->buffer = NULL;
- connctx->needed = 0;
- connctx->body = 0;
- connctx->cl = 0;
- connctx->need_sec_layer_before_next_read = PR_FALSE;
- connctx->sc = NULL;
+ struct conn_ctx *connctx = calloc(1, sizeof(struct conn_ctx));
return connctx;
}
@@ -140,10 +134,9 @@ conn_ctx_free(struct conn_ctx *connctx)
{
/* Why don't we use PR_DELETE here? */
if (connctx->buffer != NULL) {
- PR_Free(connctx->buffer);
- connctx->buffer = NULL;
+ free(connctx->buffer);
}
- PR_DELETE(connctx);
+ free(connctx);
}
@@ -151,21 +144,22 @@ static void
server_conn_write(struct ns_job_t *job)
{
struct conn_ctx *connctx;
- PRInt32 len;
+ int32_t len;
do_logging(LOG_DEBUG, "job about to write ...\n");
- PR_ASSERT(job);
- PR_ASSERT(ns_job_get_data(job));
+ assert(job != NULL);
connctx = (struct conn_ctx *)ns_job_get_data(job);
+ assert(connctx != NULL);
if (NS_JOB_IS_TIMER(ns_job_get_output_type(job))) {
do_logging(LOG_ERR, "conn_write: job [%p] timeout\n", job);
+ __atomic_add_fetch(&server_fail_count, 1, __ATOMIC_SEQ_CST);
conn_ctx_free(connctx);
assert_int_equal(ns_job_done(job), 0);
return;
}
/* Get the data out of our connctx */
- char *data = PR_Malloc(sizeof(char) * (connctx->offset + 1));
+ char *data = calloc(1, sizeof(char) * (connctx->offset + 1));
memcpy(data, connctx->buffer, connctx->offset);
data[connctx->offset] = '\0';
@@ -193,15 +187,17 @@ server_conn_read(struct ns_job_t *job)
do_logging(LOG_DEBUG, "Reading from connection\n");
struct conn_ctx *connctx;
- PRInt32 len, nbytes;
-
- PR_ASSERT(job);
- PR_ASSERT(ns_job_get_data(job));
+ int32_t len;
+ int32_t nbytes;
+ assert(job != NULL);
connctx = (struct conn_ctx *)ns_job_get_data(job);
+ assert(connctx != NULL);
+
if (NS_JOB_IS_TIMER(ns_job_get_output_type(job))) {
/* The event that triggered this call back is because we timed out waiting for IO
*/
do_logging(LOG_ERR, "conn_read: job [%p] timed out\n", job);
+ __atomic_add_fetch(&server_fail_count, 1, __ATOMIC_SEQ_CST);
conn_ctx_free(connctx);
assert_int_equal(ns_job_done(job), 0);
return;
@@ -232,6 +228,7 @@ server_conn_read(struct ns_job_t *job)
return;
} else {
do_logging(LOG_ERR, "conn_read: read error for job [%p] %d: %s\n",
job, PR_GetError(), PR_ErrorToString(PR_GetError(), PR_LANGUAGE_I_DEFAULT));
+ __atomic_add_fetch(&server_fail_count, 1, __ATOMIC_SEQ_CST);
conn_ctx_free(connctx);
assert_int_equal(ns_job_done(job), 0);
return;
@@ -241,7 +238,7 @@ server_conn_read(struct ns_job_t *job)
/* Didn't read anything */
do_logging(LOG_DEBUG, "conn_read: job [%p] closed\n", job);
/* Increment the success */
- PR_AtomicAdd(&server_success_count, 1);
+ __atomic_add_fetch(&server_success_count, 1, __ATOMIC_SEQ_CST);
conn_ctx_free(connctx);
assert_int_equal(ns_job_done(job), 0);
return;
@@ -265,12 +262,13 @@ server_conn_handler(struct ns_job_t *job)
{
do_logging(LOG_DEBUG, "Handling a connection\n");
- PR_ASSERT(job);
+ assert(job != NULL);
if (NS_JOB_IS_READ(ns_job_get_type(job)) != 0) {
server_conn_read(job);
} else {
/* We should not be able to get here! */
+ assert(0);
}
return;
@@ -333,19 +331,19 @@ static void
client_response_cb(struct ns_job_t *job)
{
- char *buffer = PR_Malloc(20);
- PRInt32 buflen = 20;
- PRInt32 len = 0;
+ char *buffer = calloc(1, 20);
+ int32_t buflen = 20;
+ int32_t len = 0;
len = PR_Read(ns_job_get_fd(job), buffer, buflen);
if (len < 0) {
/* PRErrorCode prerr = PR_GetError(); */
do_logging(LOG_ERR, "FAIL: connection error, no data \n");
- PR_AtomicAdd(&client_fail_count, 1);
+ __atomic_add_fetch(&client_fail_count, 1, __ATOMIC_SEQ_CST);
goto done;
} else if (len == 0) {
do_logging(LOG_ERR, "FAIL: connection closed, no data \n");
- PR_AtomicAdd(&client_fail_count, 1);
+ __atomic_add_fetch(&client_fail_count, 1, __ATOMIC_SEQ_CST);
goto done;
} else {
/* Be paranoid, force last byte null */
@@ -353,7 +351,7 @@ client_response_cb(struct ns_job_t *job)
if (strncmp("this is a test!\n", buffer, strlen("this is a
test!\n")) != 0)
{
do_logging(LOG_ERR, "FAIL: connection incorrect response, no data
\n");
- PR_AtomicAdd(&client_fail_count, 1);
+ __atomic_add_fetch(&client_fail_count, 1, __ATOMIC_SEQ_CST);
goto done;
}
}
@@ -361,11 +359,11 @@ client_response_cb(struct ns_job_t *job)
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
- PR_AtomicAdd(&client_success_count, 1);
+ __atomic_add_fetch(&client_success_count, 1, __ATOMIC_SEQ_CST);
do_logging(LOG_ERR, "PASS: %ld.%ld %d\n", ts.tv_sec, ts.tv_nsec,
client_success_count);
done:
- PR_Free(buffer);
+ free(buffer);
assert_int_equal(ns_job_done(job), 0);
}
@@ -374,7 +372,7 @@ client_initiate_connection_cb(struct ns_job_t *job)
{
/* Create a socket */
PRFileDesc *sock = NULL;
- PRNetAddr netaddr;
+ PRNetAddr netaddr = {{0}};
char *data = "this is a test!\n";
sock = PR_OpenTCPSocket(PR_AF_INET6);
@@ -384,7 +382,6 @@ client_initiate_connection_cb(struct ns_job_t *job)
goto done;
}
- memset(&netaddr, 0, sizeof(netaddr));
PR_SetNetAddr(PR_IpAddrLoopback, PR_AF_INET6, 12345, &netaddr);
/* Connect */
@@ -395,7 +392,7 @@ client_initiate_connection_cb(struct ns_job_t *job)
PR_GetErrorText(err);
do_logging(LOG_ERR, "FAIL: cannot connect, timeout %d -> %s \n",
PR_GetError(), err);
/* Atomic increment fail */
- PR_AtomicAdd(&client_timeout_count, 1);
+ __atomic_add_fetch(&client_timeout_count, 1, __ATOMIC_SEQ_CST);
if (sock != NULL) {
PR_Close(sock);
@@ -418,7 +415,7 @@ client_create_work(struct ns_job_t *job)
PR_Sleep(PR_SecondsToInterval(1));
clock_gettime(CLOCK_MONOTONIC, &ts);
printf("BEGIN: %ld.%ld\n", ts.tv_sec, ts.tv_nsec);
- for (PRInt32 i = 0; i < jobs; i++) {
+ for (int32_t i = 0; i < jobs; i++) {
assert_int_equal(ns_add_job(ns_job_get_tp(job), NS_JOB_NONE|NS_JOB_THREAD,
client_initiate_connection_cb, NULL, NULL), 0);
}
assert_int_equal(ns_job_done(job), 0);
@@ -434,7 +431,7 @@ ns_stress_test(void **state __attribute__((unused)))
/* Client first */
- PRInt32 job_count = jobs * client_thread_count;
+ int32_t job_count = jobs * client_thread_count;
struct ns_thrpool_t *ctp;
struct ns_thrpool_config client_ns_config;
struct ns_job_t *sigterm_job = NULL;
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.