[PATCH] Remove unneccessary __attribute__ ((aligned(8))) from internal headers
by Angus Salkeld
Signed-off-by: Angus Salkeld <asalkeld(a)redhat.com>
---
lib/ipc_int.h | 22 +++++++++++-----------
lib/ipc_sysv_mq.c | 4 ++--
2 files changed, 13 insertions(+), 13 deletions(-)
diff --git a/lib/ipc_int.h b/lib/ipc_int.h
index d90443c..b043159 100644
--- a/lib/ipc_int.h
+++ b/lib/ipc_int.h
@@ -46,23 +46,23 @@ SEND CONN REQ ->
*/
struct qb_ipc_connection_request {
- struct qb_ipc_request_header hdr __attribute__ ((aligned(8)));
- uint32_t max_msg_size __attribute__ ((aligned(8)));
+ struct qb_ipc_request_header hdr;
+ uint32_t max_msg_size;
} __attribute__ ((aligned(8)));
struct qb_ipc_event_connection_request {
- struct qb_ipc_request_header hdr __attribute__ ((aligned(8)));
- intptr_t connection __attribute__ ((aligned(8)));
+ struct qb_ipc_request_header hdr;
+ intptr_t connection;
} __attribute__ ((aligned(8)));
struct qb_ipc_connection_response {
- struct qb_ipc_response_header hdr __attribute__ ((aligned(8)));
- int32_t connection_type __attribute__ ((aligned(8)));
- uint32_t max_msg_size __attribute__ ((aligned(8)));
- intptr_t connection __attribute__ ((aligned(8)));
- char request[PATH_MAX] __attribute__ ((aligned(8)));
- char response[PATH_MAX] __attribute__ ((aligned(8)));
- char event[PATH_MAX] __attribute__ ((aligned(8)));
+ struct qb_ipc_response_header hdr;
+ int32_t connection_type;
+ uint32_t max_msg_size;
+ intptr_t connection;
+ char request[PATH_MAX];
+ char response[PATH_MAX];
+ char event[PATH_MAX];
} __attribute__ ((aligned(8)));
struct qb_ipcc_connection;
diff --git a/lib/ipc_sysv_mq.c b/lib/ipc_sysv_mq.c
index 9dfce1e..7901eff 100644
--- a/lib/ipc_sysv_mq.c
+++ b/lib/ipc_sysv_mq.c
@@ -38,8 +38,8 @@
#define MY_DATA_SIZE 8000
struct my_msgbuf {
- int32_t id __attribute__ ((aligned(8)));
- char data[MY_DATA_SIZE] __attribute__ ((aligned(8)));
+ int32_t id;
+ char data[MY_DATA_SIZE];
} __attribute__ ((aligned(8)));
/*
--
1.7.10.1
11 years, 12 months
[PATCH 1/2] Revert "Add the event queue length to the connection stats."
by Angus Salkeld
This reverts commit 6b7da3f5315473c05b5939903d97be2e8ade4c8c.
---
include/qb/qbipcs.h | 1 -
lib/ipcs.c | 7 -------
2 files changed, 8 deletions(-)
diff --git a/include/qb/qbipcs.h b/include/qb/qbipcs.h
index 4f2934e..0a4caed 100644
--- a/include/qb/qbipcs.h
+++ b/include/qb/qbipcs.h
@@ -71,7 +71,6 @@ struct qb_ipcs_connection_stats {
uint64_t recv_retries;
int32_t flow_control_state;
uint64_t flow_control_count;
- uint32_t event_q_length;
};
typedef int32_t (*qb_ipcs_dispatch_fn_t) (int32_t fd, int32_t revents,
diff --git a/lib/ipcs.c b/lib/ipcs.c
index 03c24ad..b385220 100644
--- a/lib/ipcs.c
+++ b/lib/ipcs.c
@@ -749,14 +749,7 @@ qb_ipcs_connection_stats_get(qb_ipcs_connection_t * c,
if (c == NULL) {
return -EINVAL;
}
-
memcpy(stats, &c->stats, sizeof(struct qb_ipcs_connection_stats));
-
- if (c->service->funcs.q_len_get) {
- stats->event_q_length = c->service->funcs.q_len_get(&c->event);
- } else {
- stats->event_q_length = 0;
- }
if (clear_after_read) {
memset(&c->stats, 0, sizeof(struct qb_ipcs_connection_stats));
c->stats.client_pid = c->pid;
--
1.7.10.1
11 years, 12 months
[PATCH 1/2] Fix some small issues in ./check
by Angus Salkeld
Signed-off-by: Angus Salkeld <asalkeld(a)redhat.com>
---
check | 13 ++++++++++---
1 file changed, 10 insertions(+), 3 deletions(-)
diff --git a/check b/check
index c7c6da2..1b68619 100755
--- a/check
+++ b/check
@@ -50,7 +50,7 @@ fi
cd $up
set -e
-if [ ! -e install-sh ]
+if [ ! -e build-aux/install-sh ]
then
./autogen.sh
fi
@@ -185,7 +185,6 @@ check_coverity() {
--concurrency \
--all \
--aggressiveness-level high \
- --enable MISRA_CAST \
--security \
--wait-for-license
cov-format-errors --dir cov
@@ -210,6 +209,14 @@ check_clang() {
check_abi() {
ver1=$1
ver2=$2
+ if [ -z "$ver1" ] ; then
+ echo need two versions.
+ exit 1
+ fi
+ if [ -z "$ver2" ] ; then
+ echo need two versions.
+ exit 1
+ fi
TMPL=build-aux/abi-check-templ.xml
checker=abi-compliance-checker
@@ -251,7 +258,7 @@ check_abi() {
check_api_sanity() {
make
export CFLAGS="-Wall -ggdb2"
- api-sanity-autotest -l libqb -d build-aux/api-auto-test.xml -gen -build -run
+ api-sanity-checker -l libqb -d build-aux/api-auto-test.xml -gen -build -run
google-chrome test_results/libqb/master/test_results.html
google-chrome test_results/libqb/master/test_results.html
}
--
1.7.10.1
11 years, 12 months
[PATCH 1/6] Test for log facility names
by Angus Salkeld
From: Igor Pashev <pashev.igor(a)gmail.com>
---
lib/log_format.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/lib/log_format.c b/lib/log_format.c
index 6071a30..0cbb543 100644
--- a/lib/log_format.c
+++ b/lib/log_format.c
@@ -51,10 +51,14 @@ static struct syslog_names prioritynames[] = {
struct syslog_names facilitynames[] = {
{"auth", LOG_AUTH},
+#if defined(LOG_AUTHPRIV)
{"authpriv", LOG_AUTHPRIV},
+#endif
{"cron", LOG_CRON},
{"daemon", LOG_DAEMON},
+#if defined(LOG_FTP)
{"ftp", LOG_FTP},
+#endif
{"kern", LOG_KERN},
{"lpr", LOG_LPR},
{"mail", LOG_MAIL},
--
1.7.10.1
11 years, 12 months
[PATCH] IPC: drop log message to debug.
by Angus Salkeld
this can be noisy when there are a lot of transient
connections.
Signed-off-by: Angus Salkeld <asalkeld(a)redhat.com>
---
lib/ipc_us.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ipc_us.c b/lib/ipc_us.c
index 2f36b9a..2db87ec 100644
--- a/lib/ipc_us.c
+++ b/lib/ipc_us.c
@@ -623,7 +623,7 @@ handle_new_connection(struct qb_ipcs_service *s,
goto send_response;
}
- qb_util_log(LOG_INFO, "IPC credentials authenticated");
+ qb_util_log(LOG_DEBUG, "IPC credentials authenticated");
memset(&response, 0, sizeof(response));
if (s->funcs.connect) {
--
1.7.10.1
11 years, 12 months
[PATCH] IPC: fix retrying of partial recv's and sends.
by Angus Salkeld
Move to send() instead of sendmsg() as it's easier
to track resending partial messages.
Signed-off-by: Angus Salkeld <asalkeld(a)redhat.com>
---
lib/ipc_us.c | 120 +++++++++++++++++++++++------------------------------
tests/check_ipc.c | 48 +++++++++++++++------
2 files changed, 86 insertions(+), 82 deletions(-)
diff --git a/lib/ipc_us.c b/lib/ipc_us.c
index 9c8b5be..2f36b9a 100644
--- a/lib/ipc_us.c
+++ b/lib/ipc_us.c
@@ -85,33 +85,21 @@ ssize_t
qb_ipc_us_send(struct qb_ipc_one_way *one_way, const void *msg, size_t len)
{
int32_t result;
- struct msghdr msg_send;
- struct iovec iov_send;
- char *rbuf = (char *)msg;
int32_t processed = 0;
- struct ipc_us_control *ctl = NULL;
-
- msg_send.msg_iov = &iov_send;
- msg_send.msg_iovlen = 1;
- msg_send.msg_name = 0;
- msg_send.msg_namelen = 0;
-
-#if !defined(QB_SOLARIS)
- msg_send.msg_control = 0;
- msg_send.msg_controllen = 0;
- msg_send.msg_flags = 0;
-#else
- msg_send.msg_accrights = NULL;
- msg_send.msg_accrightslen = 0;
-#endif
+ char *rbuf = (char *)msg;
retry_send:
- iov_send.iov_base = &rbuf[processed];
- iov_send.iov_len = len - processed;
+ result = send(one_way->u.us.sock,
+ &rbuf[processed],
+ len - processed,
+ MSG_NOSIGNAL);
- result = sendmsg(one_way->u.us.sock, &msg_send, MSG_NOSIGNAL);
if (result == -1) {
- return -errno;
+ if (errno == EAGAIN && processed > 0) {
+ goto retry_send;
+ } else {
+ return -errno;
+ }
}
processed += result;
@@ -119,6 +107,7 @@ retry_send:
goto retry_send;
}
if (one_way->type == QB_IPC_SOCKET) {
+ struct ipc_us_control *ctl = NULL;
ctl = (struct ipc_us_control *)one_way->u.us.shared_data;
if (ctl) {
qb_atomic_int_inc(&ctl->sent);
@@ -132,46 +121,46 @@ qb_ipc_us_sendv(struct qb_ipc_one_way *one_way, const struct iovec *iov,
size_t iov_len)
{
int32_t result;
- struct msghdr msg_send;
int32_t processed = 0;
- size_t len = 0;
- int32_t i;
- struct ipc_us_control *ctl = NULL;
-
- for (i = 0; i < iov_len; i++) {
- len += iov[i].iov_len;
- }
- msg_send.msg_iov = (struct iovec *)iov;
- msg_send.msg_iovlen = iov_len;
- msg_send.msg_name = 0;
- msg_send.msg_namelen = 0;
-
-#if !defined(QB_SOLARIS)
- msg_send.msg_control = 0;
- msg_send.msg_controllen = 0;
- msg_send.msg_flags = 0;
-#else
- msg_send.msg_accrights = NULL;
- msg_send.msg_accrightslen = 0;
-#endif
+ int32_t total_processed = 0;
+ int32_t iov_p = 0;
+ char *rbuf = (char *)iov[iov_p].iov_base;
retry_send:
- result = sendmsg(one_way->u.us.sock, &msg_send, MSG_NOSIGNAL);
+ result = send(one_way->u.us.sock,
+ &rbuf[processed],
+ iov[iov_p].iov_len - processed,
+ MSG_NOSIGNAL);
+
if (result == -1) {
- return -errno;
+ if (errno == EAGAIN &&
+ (processed > 0 || iov_p > 0)) {
+ goto retry_send;
+ } else {
+ return -errno;
+ }
}
processed += result;
- if (processed != len) {
+ if (processed == iov[iov_p].iov_len) {
+ iov_p++;
+ total_processed += processed;
+ if (iov_p < iov_len) {
+ processed = 0;
+ rbuf = (char *)iov[iov_p].iov_base;
+ goto retry_send;
+ }
+ } else {
goto retry_send;
}
if (one_way->type == QB_IPC_SOCKET) {
+ struct ipc_us_control *ctl;
ctl = (struct ipc_us_control *)one_way->u.us.shared_data;
if (ctl) {
qb_atomic_int_inc(&ctl->sent);
}
}
- return processed;
+ return total_processed;
}
static ssize_t
@@ -245,34 +234,33 @@ qb_ipc_us_recv(struct qb_ipc_one_way * one_way,
int32_t processed = 0;
int32_t to_recv = len;
char *data = msg;
- struct ipc_us_control *ctl = NULL;
retry_recv:
result = recv(one_way->u.us.sock, &data[processed], to_recv,
MSG_NOSIGNAL | MSG_WAITALL);
- if (timeout == -1) {
- if (result == -1 && errno == EAGAIN) {
+
+ if (result == -1) {
+ if (errno == EAGAIN &&
+ (processed > 0 || timeout == -1)) {
goto retry_recv;
+ } else {
+ return -errno;
}
}
+
if (result == 0) {
qb_util_log(LOG_DEBUG,
"recv(fd %d) got 0 bytes assuming ENOTCONN",
one_way->u.us.sock);
return -ENOTCONN;
}
- if (result == -1) {
- if (errno != EAGAIN) {
- return -errno;
- }
- } else {
- processed += result;
- to_recv -= result;
- }
+ processed += result;
+ to_recv -= result;
if (processed != len) {
goto retry_recv;
}
if (one_way->type == QB_IPC_SOCKET) {
+ struct ipc_us_control *ctl = NULL;
ctl = (struct ipc_us_control *)one_way->u.us.shared_data;
if (ctl) {
(void)qb_atomic_int_dec_and_test(&ctl->sent);
@@ -282,7 +270,7 @@ retry_recv:
}
/*
- * recv a message of unknow size.
+ * recv a message of unknown size.
*/
static ssize_t
qb_ipc_us_recv_at_most(struct qb_ipc_one_way * one_way,
@@ -299,17 +287,11 @@ retry_recv:
result = recv(one_way->u.us.sock, &data[processed], to_recv,
MSG_NOSIGNAL | MSG_WAITALL);
if (result == -1) {
- if (errno != EAGAIN) {
- return -errno;
+ if (errno == EAGAIN &&
+ (processed > 0 || timeout == -1)) {
+ goto retry_recv;
} else {
- if (processed > 0 || timeout == -1) {
- /* if we are part way into receiving the
- * message or the call is blocking, retry.
- */
- goto retry_recv;
- } else {
- return -errno;
- }
+ return -errno;
}
} else if (result == 0) {
qb_util_log(LOG_DEBUG,
diff --git a/tests/check_ipc.c b/tests/check_ipc.c
index 4205ff6..1ea59ed 100644
--- a/tests/check_ipc.c
+++ b/tests/check_ipc.c
@@ -117,23 +117,27 @@ s1_msg_process_fn(qb_ipcs_connection_t *c,
}
} else if (req_pt->id == IPC_MSG_REQ_BULK_EVENTS) {
int32_t m;
+ int32_t num;
struct qb_ipcs_connection_stats stats;
response.size = sizeof(struct qb_ipc_response_header);
- response.id = IPC_MSG_RES_BULK_EVENTS;
response.error = 0;
+ qb_ipcs_connection_stats_get(c, &stats, QB_FALSE);
+ num = stats.event_q_length;
+
for (m = 0; m < num_bulk_events; m++) {
res = qb_ipcs_event_send(c, &response,
sizeof(response));
- if (res < 0) {
- qb_perror(LOG_INFO, "qb_ipcs_event_send");
- }
+ ck_assert_int_eq(res, sizeof(response));
+ response.id++;
}
qb_ipcs_connection_stats_get(c, &stats, QB_FALSE);
- ck_assert_int_eq(stats.event_q_length, num_bulk_events);
- qb_ipcs_response_send(c, &response, response.size);
+ ck_assert_int_eq(stats.event_q_length - num, num_bulk_events);
+ response.id = IPC_MSG_RES_BULK_EVENTS;
+ res = qb_ipcs_response_send(c, &response, response.size);
+ ck_assert_int_eq(res, sizeof(response));
} else if (req_pt->id == IPC_MSG_REQ_SERVER_FAIL) {
exit(0);
@@ -293,14 +297,15 @@ repeat_send:
return res;
}
}
- if (req_id == IPC_MSG_REQ_TX_RX) {
- res = qb_ipcc_recv(conn, &res_header,
- sizeof(struct qb_ipc_response_header),
- ms_timeout);
- } else {
+
+ if (req_id == IPC_MSG_REQ_DISPATCH) {
res = qb_ipcc_event_recv(conn, &res_header,
sizeof(struct qb_ipc_response_header),
ms_timeout);
+ } else {
+ res = qb_ipcc_recv(conn, &res_header,
+ sizeof(struct qb_ipc_response_header),
+ ms_timeout);
}
if (res == -EINTR) {
return -1;
@@ -601,6 +606,9 @@ count_bulk_events(int32_t fd, int32_t revents, void *data)
static void
test_ipc_bulk_events(void)
{
+ struct qb_ipc_request_header req_header;
+ struct qb_ipc_response_header res_header;
+ struct iovec iov[1];
int32_t c = 0;
int32_t j = 0;
pid_t pid;
@@ -633,13 +641,26 @@ test_ipc_bulk_events(void)
ck_assert_int_eq(res, 0);
res = send_and_check(IPC_MSG_REQ_BULK_EVENTS,
- sizeof(struct qb_ipc_request_header),
- recv_timeout, QB_TRUE);
+ 0,
+ recv_timeout, QB_TRUE);
ck_assert_int_eq(res, sizeof(struct qb_ipc_response_header));
qb_loop_run(cl);
ck_assert_int_eq(events_received, num_bulk_events);
+ req_header.id = IPC_MSG_REQ_SERVER_FAIL;
+ req_header.size = sizeof(struct qb_ipc_request_header);
+
+ iov[0].iov_len = req_header.size;
+ iov[0].iov_base = &req_header;
+ res = qb_ipcc_sendv_recv(conn, iov, 1,
+ &res_header,
+ sizeof(struct qb_ipc_response_header), -1);
+ if (res != -ECONNRESET && res != -ENOTCONN) {
+ qb_log(LOG_ERR, "id:%d size:%d", res_header.id, res_header.size);
+ ck_assert_int_eq(res, -ENOTCONN);
+ }
+
qb_ipcc_disconnect(conn);
stop_process(pid);
}
@@ -647,6 +668,7 @@ test_ipc_bulk_events(void)
START_TEST(test_ipc_bulk_events_us)
{
qb_enter();
+ send_event_on_created = QB_FALSE;
ipc_type = QB_IPC_SOCKET;
ipc_name = __func__;
test_ipc_bulk_events();
--
1.7.10
12 years
[PATCH] IPC: initialize enough shared mem for all 3 one way connections.
by Angus Salkeld
Signed-off-by: Angus Salkeld <asalkeld(a)redhat.com>
---
lib/ipc_us.c | 35 ++++++++++++++++++++---------------
1 file changed, 20 insertions(+), 15 deletions(-)
diff --git a/lib/ipc_us.c b/lib/ipc_us.c
index d26218a..9c8b5be 100644
--- a/lib/ipc_us.c
+++ b/lib/ipc_us.c
@@ -447,7 +447,7 @@ qb_ipcc_us_connect(struct qb_ipcc_connection *c,
struct qb_ipc_event_connection_request request;
char path[PATH_MAX];
int32_t fd_hdr;
- struct ipc_us_control *ctl;
+ char * shm_ptr;
c->needs_sock_for_poll = QB_FALSE;
c->funcs.send = qb_ipc_us_send;
@@ -468,20 +468,17 @@ qb_ipcc_us_connect(struct qb_ipcc_connection *c,
return res;
}
(void)strlcpy(c->request.u.us.shared_file_name, r->request, NAME_MAX);
- c->request.u.us.shared_data = mmap(0,
- sizeof(struct ipc_us_control),
- PROT_READ | PROT_WRITE, MAP_SHARED,
- fd_hdr, 0);
+ shm_ptr = mmap(0, 3 * sizeof(struct ipc_us_control),
+ PROT_READ | PROT_WRITE, MAP_SHARED, fd_hdr, 0);
- if (c->request.u.us.shared_data == MAP_FAILED) {
+ if (shm_ptr == MAP_FAILED) {
res = -errno;
qb_util_perror(LOG_ERR, "couldn't create mmap for header");
goto cleanup_hdr;
}
-
- ctl = (struct ipc_us_control *)c->request.u.us.shared_data;
- ctl->sent = 0;
- ctl->flow_control = 0;
+ c->request.u.us.shared_data = shm_ptr;
+ c->response.u.us.shared_data = shm_ptr + sizeof(struct ipc_us_control);
+ c->event.u.us.shared_data = shm_ptr + (2 * sizeof(struct ipc_us_control));
close(fd_hdr);
@@ -922,6 +919,7 @@ qb_ipcs_us_connect(struct qb_ipcs_service *s,
int32_t fd_hdr;
int32_t res = 0;
struct ipc_us_control *ctl;
+ char * shm_ptr;
qb_util_log(LOG_DEBUG, "connecting to client [%d]", c->pid);
@@ -940,20 +938,27 @@ qb_ipcs_us_connect(struct qb_ipcs_service *s,
(void)strlcpy(c->request.u.us.shared_file_name, r->request, NAME_MAX);
(void)chown(r->request, c->euid, c->egid);
- c->request.u.us.shared_data = mmap(0,
- sizeof(struct ipc_us_control),
- PROT_READ | PROT_WRITE, MAP_SHARED,
- fd_hdr, 0);
+ shm_ptr = mmap(0, 3 * sizeof(struct ipc_us_control),
+ PROT_READ | PROT_WRITE, MAP_SHARED, fd_hdr, 0);
- if (c->request.u.us.shared_data == MAP_FAILED) {
+ if (shm_ptr == MAP_FAILED) {
res = -errno;
qb_util_perror(LOG_ERR, "couldn't create mmap for header");
goto cleanup_hdr;
}
+ c->request.u.us.shared_data = shm_ptr;
+ c->response.u.us.shared_data = shm_ptr + sizeof(struct ipc_us_control);
+ c->event.u.us.shared_data = shm_ptr + (2 * sizeof(struct ipc_us_control));
ctl = (struct ipc_us_control *)c->request.u.us.shared_data;
ctl->sent = 0;
ctl->flow_control = 0;
+ ctl = (struct ipc_us_control *)c->response.u.us.shared_data;
+ ctl->sent = 0;
+ ctl->flow_control = 0;
+ ctl = (struct ipc_us_control *)c->event.u.us.shared_data;
+ ctl->sent = 0;
+ ctl->flow_control = 0;
close(fd_hdr);
return res;
--
1.7.10
12 years
[PATCH 1/2] IPC: handle the server shutdown better
by Angus Salkeld
Signed-off-by: Angus Salkeld <asalkeld(a)redhat.com>
---
lib/ipc_us.c | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/lib/ipc_us.c b/lib/ipc_us.c
index 48cdf02..ae167f8 100644
--- a/lib/ipc_us.c
+++ b/lib/ipc_us.c
@@ -847,6 +847,13 @@ qb_ipcs_us_connection_acceptor(int fd, int revent, void *data)
struct ipc_auth_ugp ugp;
socklen_t addrlen = sizeof(struct sockaddr_un);
+ if (revent & (POLLNVAL|POLLHUP|POLLERR)) {
+ /*
+ * handle shutdown more cleanly.
+ */
+ return -1;
+ }
+
retry_accept:
errno = 0;
new_fd = accept(fd, (struct sockaddr *)&un_addr, &addrlen);
--
1.7.10
12 years
[PATCH] IPC: handle a connection disconnect from the server better
by Angus Salkeld
Only problem with SOCKET.
Signed-off-by: Angus Salkeld <asalkeld(a)redhat.com>
---
lib/ipc_int.h | 1 +
lib/ipc_us.c | 48 +++++++++++++++++++++++++++-----------
lib/ipcs.c | 19 ++++++---------
tests/check_ipc.c | 67 +++++++++++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 110 insertions(+), 25 deletions(-)
diff --git a/lib/ipc_int.h b/lib/ipc_int.h
index 62559bd..5e0acde 100644
--- a/lib/ipc_int.h
+++ b/lib/ipc_int.h
@@ -200,6 +200,7 @@ int32_t qb_ipcs_us_withdraw(struct qb_ipcs_service *s);
int32_t qb_ipcs_dispatch_connection_request(int32_t fd, int32_t revents, void *data);
int32_t qb_ipcs_dispatch_service_request(int32_t fd, int32_t revents, void *data);
struct qb_ipcs_connection* qb_ipcs_connection_alloc(struct qb_ipcs_service *s);
+void qb_ipcs_sockets_disconnect(struct qb_ipcs_connection *c);
int32_t qb_ipcs_process_request(struct qb_ipcs_service *s,
struct qb_ipc_request_header *hdr);
diff --git a/lib/ipc_us.c b/lib/ipc_us.c
index 47ba1b1..48cdf02 100644
--- a/lib/ipc_us.c
+++ b/lib/ipc_us.c
@@ -221,9 +221,15 @@ qb_ipc_us_recv_ready(struct qb_ipc_one_way * one_way, int32_t ms_timeout)
return -EAGAIN;
} else if (poll_events == -1) {
return -errno;
- } else if (poll_events == 1 && (ufds.revents & (POLLERR | POLLHUP))) {
+ } else if (poll_events == 1 && (ufds.revents & POLLERR)) {
+ qb_util_log(LOG_DEBUG, "poll(fd %d) got POLLERR", one_way->u.us.sock);
+ return -ENOTCONN;
+ } else if (poll_events == 1 && (ufds.revents & POLLHUP)) {
qb_util_log(LOG_DEBUG, "poll(fd %d) got POLLHUP", one_way->u.us.sock);
return -ENOTCONN;
+ } else if (poll_events == 1 && (ufds.revents & POLLNVAL)) {
+ qb_util_log(LOG_DEBUG, "poll(fd %d) got POLLNVAL", one_way->u.us.sock);
+ return -ENOTCONN;
}
return 0;
}
@@ -643,27 +649,22 @@ handle_new_connection(struct qb_ipcs_service *s,
c->state = QB_IPCS_CONNECTION_ACTIVE;
qb_list_add(&c->list, &s->connections);
- if (s->needs_sock_for_poll) {
+ if (s->needs_sock_for_poll || s->type == QB_IPC_SOCKET) {
qb_ipcs_connection_ref(c);
res = s->poll_fns.dispatch_add(s->poll_priority,
c->setup.u.us.sock,
POLLIN | POLLPRI | POLLNVAL,
c,
qb_ipcs_dispatch_connection_request);
- }
- if (s->type == QB_IPC_SOCKET) {
- c->request.u.us.sock = c->setup.u.us.sock;
- c->response.u.us.sock = c->setup.u.us.sock;
- res = s->poll_fns.dispatch_add(s->poll_priority,
- c->request.u.us.sock,
- POLLIN | POLLPRI | POLLNVAL,
- c,
- qb_ipcs_dispatch_connection_request);
if (res < 0) {
qb_util_log(LOG_ERR,
"Error adding socket to mainloop.");
}
}
+ if (s->type == QB_IPC_SOCKET) {
+ c->request.u.us.sock = c->setup.u.us.sock;
+ c->response.u.us.sock = c->setup.u.us.sock;
+ }
send_response:
response.hdr.id = QB_IPC_MSG_AUTHENTICATE;
@@ -974,14 +975,35 @@ qb_ipc_us_q_len_get(struct qb_ipc_one_way *one_way)
return qb_atomic_int_get(&ctl->sent);
}
+void
+qb_ipcs_sockets_disconnect(struct qb_ipcs_connection *c)
+{
+ int sock = -1;
+
+ qb_enter();
+ if (c->service->needs_sock_for_poll && c->setup.u.us.sock > 0) {
+ sock = c->setup.u.us.sock;
+ qb_ipcc_us_sock_close(sock);
+ c->setup.u.us.sock = -1;
+ }
+ if (c->request.type == QB_IPC_SOCKET) {
+ sock = c->request.u.us.sock;
+ }
+ if (sock > 0) {
+ (void)c->service->poll_fns.dispatch_del(sock);
+ qb_ipcs_connection_unref(c);
+ }
+}
+
static void
qb_ipcs_us_disconnect(struct qb_ipcs_connection *c)
{
+ qb_enter();
munmap(c->request.u.us.shared_data, sizeof(struct ipc_us_control));
unlink(c->request.u.us.shared_file_name);
- close(c->request.u.us.sock);
- close(c->event.u.us.sock);
+ qb_ipcc_us_sock_close(c->request.u.us.sock);
+ qb_ipcc_us_sock_close(c->event.u.us.sock);
}
void
diff --git a/lib/ipcs.c b/lib/ipcs.c
index c30a405..03c24ad 100644
--- a/lib/ipcs.c
+++ b/lib/ipcs.c
@@ -511,12 +511,8 @@ qb_ipcs_disconnect(struct qb_ipcs_connection *c)
if (c->state == QB_IPCS_CONNECTION_ACTIVE) {
c->state = QB_IPCS_CONNECTION_INACTIVE;
c->service->stats.closed_connections++;
- if (c->service->needs_sock_for_poll && c->setup.u.us.sock > 0) {
- (void)c->service->poll_fns.dispatch_del(c->setup.u.us.sock);
- qb_ipcc_us_sock_close(c->setup.u.us.sock);
- c->setup.u.us.sock = -1;
- qb_ipcs_connection_unref(c);
- }
+
+ qb_ipcs_sockets_disconnect(c);
/* return early as it's an incomplete connection.
*/
return;
@@ -526,12 +522,7 @@ qb_ipcs_disconnect(struct qb_ipcs_connection *c)
c->service->stats.active_connections--;
c->service->stats.closed_connections++;
- if (c->service->needs_sock_for_poll && c->setup.u.us.sock > 0) {
- (void)c->service->poll_fns.dispatch_del(c->setup.u.us.sock);
- qb_ipcc_us_sock_close(c->setup.u.us.sock);
- c->setup.u.us.sock = -1;
- qb_ipcs_connection_unref(c);
- }
+ qb_ipcs_sockets_disconnect(c);
}
if (c->state == QB_IPCS_CONNECTION_SHUTTING_DOWN) {
res = 0;
@@ -668,6 +659,10 @@ qb_ipcs_dispatch_connection_request(int32_t fd, int32_t revents, void *data)
int32_t recvd = 0;
ssize_t avail;
+ if (revents & POLLNVAL) {
+ qb_util_log(LOG_DEBUG, "NVAL conn:%p fd:%d", c, fd);
+ return -EINVAL;
+ }
if (revents & POLLHUP) {
qb_util_log(LOG_DEBUG, "HUP conn:%p fd:%d", c, fd);
qb_ipcs_disconnect(c);
diff --git a/tests/check_ipc.c b/tests/check_ipc.c
index 21c7f4f..4205ff6 100644
--- a/tests/check_ipc.c
+++ b/tests/check_ipc.c
@@ -46,6 +46,8 @@ enum my_msg_ids {
IPC_MSG_RES_BULK_EVENTS,
IPC_MSG_REQ_SERVER_FAIL,
IPC_MSG_RES_SERVER_FAIL,
+ IPC_MSG_REQ_SERVER_DISCONNECT,
+ IPC_MSG_RES_SERVER_DISCONNECT,
};
/* Test Cases
@@ -71,6 +73,7 @@ static qb_ipcs_service_t* s1;
static int32_t turn_on_fc = QB_FALSE;
static int32_t fc_enabled = 89;
static int32_t send_event_on_created = QB_FALSE;
+static int32_t disconnect_after_created = QB_FALSE;
static int32_t num_bulk_events = 10;
static int32_t
@@ -134,6 +137,8 @@ s1_msg_process_fn(qb_ipcs_connection_t *c,
} else if (req_pt->id == IPC_MSG_REQ_SERVER_FAIL) {
exit(0);
+ } else if (req_pt->id == IPC_MSG_REQ_SERVER_DISCONNECT) {
+ qb_ipcs_disconnect(c);
}
return 0;
}
@@ -704,6 +709,64 @@ START_TEST(test_ipc_event_on_created_us)
END_TEST
static void
+test_ipc_disconnect_after_created(void)
+{
+ struct qb_ipc_request_header req_header;
+ struct qb_ipc_response_header res_header;
+ struct iovec iov[1];
+ int32_t c = 0;
+ int32_t j = 0;
+ pid_t pid;
+ int32_t res;
+
+ pid = run_function_in_new_process(run_ipc_server);
+ fail_if(pid == -1);
+ sleep(1);
+
+ do {
+ conn = qb_ipcc_connect(ipc_name, MAX_MSG_SIZE);
+ if (conn == NULL) {
+ j = waitpid(pid, NULL, WNOHANG);
+ ck_assert_int_eq(j, 0);
+ sleep(1);
+ c++;
+ }
+ } while (conn == NULL && c < 5);
+ fail_if(conn == NULL);
+
+ ck_assert_int_eq(QB_TRUE, qb_ipcc_is_connected(conn));
+
+ req_header.id = IPC_MSG_REQ_SERVER_DISCONNECT;
+ req_header.size = sizeof(struct qb_ipc_request_header);
+
+ iov[0].iov_len = req_header.size;
+ iov[0].iov_base = &req_header;
+
+ res = qb_ipcc_sendv_recv(conn, iov, 1,
+ &res_header,
+ sizeof(struct qb_ipc_response_header), -1);
+ /*
+ * confirm we get -ENOTCONN
+ */
+ ck_assert_int_eq(res, -ENOTCONN);
+ ck_assert_int_eq(QB_FALSE, qb_ipcc_is_connected(conn));
+
+ qb_ipcc_disconnect(conn);
+ stop_process(pid);
+}
+
+START_TEST(test_ipc_disconnect_after_created_us)
+{
+ qb_enter();
+ disconnect_after_created = QB_TRUE;
+ ipc_type = QB_IPC_SOCKET;
+ ipc_name = __func__;
+ test_ipc_disconnect_after_created();
+ qb_leave();
+}
+END_TEST
+
+static void
test_ipc_server_fail(void)
{
struct qb_ipc_request_header req_header;
@@ -905,6 +968,10 @@ ipc_suite(void)
tcase_add_test(tc, test_ipc_event_on_created_us);
suite_add_tcase(s, tc);
+ tc = tcase_create("ipc_disconnect_after_created_us");
+ tcase_add_test(tc, test_ipc_disconnect_after_created_us);
+ suite_add_tcase(s, tc);
+
return s;
}
--
1.7.10
12 years
[PATCH] IPC: make it possible to send events in the connected callback.
by Angus Salkeld
This was only a problem with QB_IPC_SOCKET.
Signed-off-by: Angus Salkeld <asalkeld(a)redhat.com>
---
lib/ipc_us.c | 24 +++++++++----
tests/check_ipc.c | 102 +++++++++++++++++++++++++++++++++++++++++++++++++----
2 files changed, 113 insertions(+), 13 deletions(-)
diff --git a/lib/ipc_us.c b/lib/ipc_us.c
index 15c731f..47ba1b1 100644
--- a/lib/ipc_us.c
+++ b/lib/ipc_us.c
@@ -682,14 +682,16 @@ send_response:
}
if (res == 0) {
- qb_ipcs_connection_ref(c);
- if (s->serv_fns.connection_created) {
- s->serv_fns.connection_created(c);
- }
- if (c->state == QB_IPCS_CONNECTION_ACTIVE) {
- c->state = QB_IPCS_CONNECTION_ESTABLISHED;
+ if (s->type != QB_IPC_SOCKET) {
+ qb_ipcs_connection_ref(c);
+ if (s->serv_fns.connection_created) {
+ s->serv_fns.connection_created(c);
+ }
+ if (c->state == QB_IPCS_CONNECTION_ACTIVE) {
+ c->state = QB_IPCS_CONNECTION_ESTABLISHED;
+ }
+ qb_ipcs_connection_unref(c);
}
- qb_ipcs_connection_unref(c);
} else {
if (res == -EACCES) {
qb_util_log(LOG_ERR, "Invalid IPC credentials.");
@@ -709,7 +711,15 @@ handle_connection_new_sock(struct qb_ipcs_service *s, int32_t sock, void *msg)
struct qb_ipc_event_connection_request *req = msg;
c = (struct qb_ipcs_connection *)req->connection;
+ qb_ipcs_connection_ref(c);
c->event.u.us.sock = sock;
+ if (c->state == QB_IPCS_CONNECTION_ACTIVE) {
+ c->state = QB_IPCS_CONNECTION_ESTABLISHED;
+ }
+ if (s->serv_fns.connection_created) {
+ s->serv_fns.connection_created(c);
+ }
+ qb_ipcs_connection_unref(c);
}
static int32_t
diff --git a/tests/check_ipc.c b/tests/check_ipc.c
index 02f18c9..21c7f4f 100644
--- a/tests/check_ipc.c
+++ b/tests/check_ipc.c
@@ -47,7 +47,6 @@ enum my_msg_ids {
IPC_MSG_REQ_SERVER_FAIL,
IPC_MSG_RES_SERVER_FAIL,
};
-#define NUM_BULK_EVENTS 10
/* Test Cases
*
@@ -71,6 +70,8 @@ static qb_loop_t *my_loop;
static qb_ipcs_service_t* s1;
static int32_t turn_on_fc = QB_FALSE;
static int32_t fc_enabled = 89;
+static int32_t send_event_on_created = QB_FALSE;
+static int32_t num_bulk_events = 10;
static int32_t
exit_handler(int32_t rsignal, void *data)
@@ -119,7 +120,7 @@ s1_msg_process_fn(qb_ipcs_connection_t *c,
response.id = IPC_MSG_RES_BULK_EVENTS;
response.error = 0;
- for (m = 0; m < NUM_BULK_EVENTS; m++) {
+ for (m = 0; m < num_bulk_events; m++) {
res = qb_ipcs_event_send(c, &response,
sizeof(response));
if (res < 0) {
@@ -127,7 +128,7 @@ s1_msg_process_fn(qb_ipcs_connection_t *c,
}
}
qb_ipcs_connection_stats_get(c, &stats, QB_FALSE);
- ck_assert_int_eq(stats.event_q_length, NUM_BULK_EVENTS);
+ ck_assert_int_eq(stats.event_q_length, num_bulk_events);
qb_ipcs_response_send(c, &response, response.size);
@@ -173,6 +174,22 @@ s1_connection_destroyed(qb_ipcs_connection_t *c)
}
static void
+s1_connection_created(qb_ipcs_connection_t *c)
+{
+ if (send_event_on_created) {
+ struct qb_ipc_response_header response;
+ int32_t res;
+
+ response.size = sizeof(struct qb_ipc_response_header);
+ response.id = IPC_MSG_RES_DISPATCH;
+ response.error = 0;
+ res = qb_ipcs_event_send(c, &response,
+ sizeof(response));
+ ck_assert_int_eq(res, response.size);
+ }
+}
+
+static void
run_ipc_server(void)
{
int32_t res;
@@ -180,7 +197,7 @@ run_ipc_server(void)
struct qb_ipcs_service_handlers sh = {
.connection_accept = NULL,
- .connection_created = NULL,
+ .connection_created = s1_connection_created,
.msg_process = s1_msg_process_fn,
.connection_destroyed = s1_connection_destroyed,
.connection_closed = NULL,
@@ -569,7 +586,7 @@ count_bulk_events(int32_t fd, int32_t revents, void *data)
events_received++;
- if (events_received >= NUM_BULK_EVENTS) {
+ if (events_received >= num_bulk_events) {
qb_loop_stop(cl);
return -1;
}
@@ -616,7 +633,7 @@ test_ipc_bulk_events(void)
ck_assert_int_eq(res, sizeof(struct qb_ipc_response_header));
qb_loop_run(cl);
- ck_assert_int_eq(events_received, NUM_BULK_EVENTS);
+ ck_assert_int_eq(events_received, num_bulk_events);
qb_ipcc_disconnect(conn);
stop_process(pid);
@@ -633,6 +650,60 @@ START_TEST(test_ipc_bulk_events_us)
END_TEST
static void
+test_ipc_event_on_created(void)
+{
+ int32_t c = 0;
+ int32_t j = 0;
+ pid_t pid;
+ int32_t res;
+ qb_loop_t *cl;
+ int32_t fd;
+
+ num_bulk_events = 1;
+
+ pid = run_function_in_new_process(run_ipc_server);
+ fail_if(pid == -1);
+ sleep(1);
+
+ do {
+ conn = qb_ipcc_connect(ipc_name, MAX_MSG_SIZE);
+ if (conn == NULL) {
+ j = waitpid(pid, NULL, WNOHANG);
+ ck_assert_int_eq(j, 0);
+ sleep(1);
+ c++;
+ }
+ } while (conn == NULL && c < 5);
+ fail_if(conn == NULL);
+
+ events_received = 0;
+ cl = qb_loop_create();
+ res = qb_ipcc_fd_get(conn, &fd),
+ ck_assert_int_eq(res, 0);
+ res = qb_loop_poll_add(cl, QB_LOOP_MED,
+ fd, POLLIN,
+ cl, count_bulk_events);
+ ck_assert_int_eq(res, 0);
+
+ qb_loop_run(cl);
+ ck_assert_int_eq(events_received, num_bulk_events);
+
+ qb_ipcc_disconnect(conn);
+ stop_process(pid);
+}
+
+START_TEST(test_ipc_event_on_created_us)
+{
+ qb_enter();
+ send_event_on_created = QB_TRUE;
+ ipc_type = QB_IPC_SOCKET;
+ ipc_name = __func__;
+ test_ipc_event_on_created();
+ qb_leave();
+}
+END_TEST
+
+static void
test_ipc_server_fail(void)
{
struct qb_ipc_request_header req_header;
@@ -713,6 +784,17 @@ START_TEST(test_ipc_bulk_events_shm)
}
END_TEST
+START_TEST(test_ipc_event_on_created_shm)
+{
+ qb_enter();
+ send_event_on_created = QB_TRUE;
+ ipc_type = QB_IPC_SHM;
+ ipc_name = __func__;
+ test_ipc_event_on_created();
+ qb_leave();
+}
+END_TEST
+
START_TEST(test_ipc_server_fail_shm)
{
qb_enter();
@@ -766,6 +848,10 @@ ipc_suite(void)
tcase_add_test(tc, test_ipc_exit_shm);
tcase_set_timeout(tc, 3);
suite_add_tcase(s, tc);
+
+ tc = tcase_create("ipc_event_on_created_shm");
+ tcase_add_test(tc, test_ipc_event_on_created_shm);
+ suite_add_tcase(s, tc);
#endif /* HAVE_SEM_TIMEDWAIT */
tc = tcase_create("ipc_server_fail_soc");
@@ -815,6 +901,10 @@ ipc_suite(void)
tcase_set_timeout(tc, 16);
suite_add_tcase(s, tc);
+ tc = tcase_create("ipc_event_on_created_us");
+ tcase_add_test(tc, test_ipc_event_on_created_us);
+ suite_add_tcase(s, tc);
+
return s;
}
--
1.7.10
12 years