[libqb/f19] Rebase to 0.17.0 release
David Vossel
dvossel at fedoraproject.org
Wed Feb 26 05:48:24 UTC 2014
commit da02fdf4d1ef718253543a2093774dd03a30fea8
Author: David Vossel <dvossel at redhat.com>
Date: Tue Feb 25 23:47:41 2014 -0600
Rebase to 0.17.0 release
libqb.spec | 45 ++++-
sources | 2 +-
test-timeout-fix.patch | 590 ++++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 633 insertions(+), 4 deletions(-)
---
diff --git a/libqb.spec b/libqb.spec
index 9f37c26..7b3f1e5 100644
--- a/libqb.spec
+++ b/libqb.spec
@@ -1,12 +1,13 @@
Name: libqb
-Version: 0.14.4
-Release: 2%{?dist}
+Version: 0.17.0
+Release: 1%{?dist}
Summary: An IPC library for high performance servers
Group: System Environment/Libraries
License: LGPLv2+
URL: http://www.libqb.org
Source0: https://fedorahosted.org/releases/q/u/quarterback/%{name}-%{version}.tar.xz
+Patch0: test-timeout-fix.patch
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
BuildRequires: libtool doxygen procps check-devel automake
@@ -19,6 +20,10 @@ Initially these are IPC and poll.
%prep
%setup -q
+%patch0 -p1
+
+# Make sure the timestamps are correct
+find . -exec touch \{\} \;
# work-around for broken epoll in rawhide/f17
%build
@@ -26,7 +31,7 @@ Initially these are IPC and poll.
make %{?_smp_mflags}
%check
-make check
+make -j1 check
%install
rm -rf $RPM_BUILD_ROOT
@@ -66,6 +71,40 @@ developing applications that use %{name}.
%{_mandir}/man8/qb-blackbox.8.gz
%changelog
+
+* Wed Feb 19 2014 David Vossel <dvossel at redhat.com> - 0.17.0-1
+Fix: build: Allow 'make rpm' to work with lightweight tags for release candidates
+Fix: spec: reference correct url in spec file
+Doc: update broken doxygen link to something that exists
+Bump version to 0.17.0
+Low: ipc_socket: further optimize max msg size calculations for fbsd portability tests
+Low: ipc_socket: Allow socket max msg size to be calculated more accurately
+Fix: fixes travis compile time error
+Low: tests: Fixes compile time issue with make check
+High: ipcs: Prevent ipc server use after free.
+Low: ipc: Remove ipc connection reference given to dispatch functions
+High: ipc: Fixes memory leak in server connection accept when client partially connects
+IPC: Increase the listen backlog of IPC server
+Low: ipcs: Clarifications to the ipcs server callback documentation.
+Fix rb.test to avoid overwriting memory during reading.
+Low: example: Update client/server example to use server enforced buffer size
+Low: Client side buffer retrieval regression test
+Feature: New api function to retrieve client buffer size
+Low: check_ipc.c: Verify server enforced buffer sizes work
+Feature: Enforce buffer size limits on the server side
+Low: regession tests for regex log filters
+Feature: Filter logs using regex patter on function, format, or filename
+ipc_setup: Set SO_PASSCRED on listener socket
+Fix: log: Filtering by function and file must match exactly, no substring matches
+Low: blackbox: Abort blackbox logging on ringbuffer overwrite reclaim error
+High: ipcs: Api function allowing server to retrieve client connection's ipc buffer size
+Low: ringbuffer: Abort during chunk reclaim if OVERWRITE flag is set and reclaim fails.
+High: blackbox: unique blackbox ringbuffer files per pid
+Low: ipc_socket: Fixes fd leak in socket ipc client disconnection
+Use sizeof to get the correct size of the sockaddr_un sun_path member in a portable way. Fixes corosync on Mac OS X.
+Detect the max signal value that can be used using NSIG macro
+Avoid double-decrement of level->todo
+
* Thu Feb 14 2013 Fedora Release Engineering <rel-eng at lists.fedoraproject.org> - 0.14.4-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_19_Mass_Rebuild
diff --git a/sources b/sources
index c647879..bac3b5a 100644
--- a/sources
+++ b/sources
@@ -1 +1 @@
-514d07752c7c746dc9a1d88d441f0ec3 libqb-0.14.4.tar.xz
+1cada7e7a9bfcb4cda70099db551e614 libqb-0.17.0.tar.xz
diff --git a/test-timeout-fix.patch b/test-timeout-fix.patch
new file mode 100644
index 0000000..01d6b18
--- /dev/null
+++ b/test-timeout-fix.patch
@@ -0,0 +1,590 @@
+diff --git a/lib/Makefile.am b/lib/Makefile.am
+index 91c6bd1..5bcdc22 100644
+--- a/lib/Makefile.am
++++ b/lib/Makefile.am
+@@ -42,6 +42,8 @@ source_to_lint = util.c hdb.c ringbuffer.c ringbuffer_helper.c \
+ libqb_la_SOURCES = $(source_to_lint) unix.c
+ libqb_la_LIBADD = @LTLIBOBJS@
+
++LDFLAGS = $(LDFLAGS_COPY:-Bsymbolic-functions=)
++
+ if HAVE_SEM_TIMEDWAIT
+ else
+ libqb_la_SOURCES+=rpl_sem.c
+diff --git a/tests/check_ipc.c b/tests/check_ipc.c
+index 6a80fec..e0df9e7 100644
+--- a/tests/check_ipc.c
++++ b/tests/check_ipc.c
+@@ -139,6 +139,7 @@ s1_msg_process_fn(qb_ipcs_connection_t *c,
+ int32_t m;
+ int32_t num;
+ struct qb_ipcs_connection_stats_2 *stats;
++ uint32_t max_size = MAX_MSG_SIZE;
+
+ response.size = sizeof(struct qb_ipc_response_header);
+ response.error = 0;
+@@ -148,8 +149,7 @@ s1_msg_process_fn(qb_ipcs_connection_t *c,
+ free(stats);
+
+ /* crazy large message */
+- res = qb_ipcs_event_send(c, &response,
+- MAX_MSG_SIZE*10);
++ res = qb_ipcs_event_send(c, &response, max_size*10);
+ ck_assert_int_eq(res, -EMSGSIZE);
+
+ /* send one event before responding */
+@@ -157,6 +157,11 @@ s1_msg_process_fn(qb_ipcs_connection_t *c,
+ ck_assert_int_eq(res, sizeof(response));
+ response.id++;
+
++ /* There should be one more item in the event queue now. */
++ stats = qb_ipcs_connection_stats_get_2(c, QB_FALSE);
++ ck_assert_int_eq(stats->event_q_length - num, 1);
++ free(stats);
++
+ /* send response */
+ response.id = IPC_MSG_RES_BULK_EVENTS;
+ res = qb_ipcs_response_send(c, &response, response.size);
+@@ -175,9 +180,6 @@ s1_msg_process_fn(qb_ipcs_connection_t *c,
+ ck_assert_int_eq(res, sizeof(response));
+ response.id++;
+ }
+- stats = qb_ipcs_connection_stats_get_2(c, QB_FALSE);
+- ck_assert_int_eq(stats->event_q_length - num, num_bulk_events);
+- free(stats);
+
+ } else if (req_pt->id == IPC_MSG_REQ_STRESS_EVENT) {
+ struct {
+@@ -313,7 +315,7 @@ s1_connection_destroyed(qb_ipcs_connection_t *c)
+ static void
+ s1_connection_created(qb_ipcs_connection_t *c)
+ {
+- int32_t max = MAX_MSG_SIZE;
++ uint32_t max = MAX_MSG_SIZE;
+
+ if (send_event_on_created) {
+ struct qb_ipc_response_header response;
+@@ -362,6 +364,7 @@ run_ipc_server(void)
+ .dispatch_mod = my_dispatch_mod,
+ .dispatch_del = my_dispatch_del,
+ };
++ uint32_t max_size = MAX_MSG_SIZE;
+
+ qb_loop_signal_add(my_loop, QB_LOOP_HIGH, SIGSTOP,
+ NULL, exit_handler, &handle);
+@@ -374,7 +377,7 @@ run_ipc_server(void)
+ fail_if(s1 == 0);
+
+ if (enforce_server_buffer) {
+- qb_ipcs_enforce_buffer_size(s1, MAX_MSG_SIZE);
++ qb_ipcs_enforce_buffer_size(s1, max_size);
+ }
+ qb_ipcs_poll_handlers_set(s1, &ph);
+
+@@ -402,13 +405,70 @@ run_function_in_new_process(void (*run_ipc_server_fn)(void))
+ return pid;
+ }
+
+-static int32_t
+-stop_process(pid_t pid)
++static void
++request_server_exit(void)
++{
++ struct qb_ipc_request_header req_header;
++ struct qb_ipc_response_header res_header;
++ struct iovec iov[1];
++ int32_t res;
++
++ /*
++ * tell the server to exit
++ */
++ req_header.id = IPC_MSG_REQ_SERVER_FAIL;
++ req_header.size = sizeof(struct qb_ipc_request_header);
++
++ iov[0].iov_len = req_header.size;
++ iov[0].iov_base = &req_header;
++
++ ck_assert_int_eq(QB_TRUE, qb_ipcc_is_connected(conn));
++
++ res = qb_ipcc_sendv_recv(conn, iov, 1,
++ &res_header,
++ sizeof(struct qb_ipc_response_header), -1);
++ /*
++ * confirm we get -ENOTCONN or ECONNRESET
++ */
++ if (res != -ECONNRESET && res != -ENOTCONN) {
++ qb_log(LOG_ERR, "id:%d size:%d", res_header.id, res_header.size);
++ ck_assert_int_eq(res, -ENOTCONN);
++ }
++}
++
++static void
++kill_server(pid_t pid)
+ {
+- /* wait a bit for the server to shutdown by it's self */
+- usleep(100000);
+ kill(pid, SIGTERM);
+ waitpid(pid, NULL, 0);
++}
++
++static int32_t
++verify_graceful_stop(pid_t pid)
++{
++ int wait_rc = 0;
++ int status = 0;
++ int rc = 0;
++ int tries;
++
++ /* We need the server to be able to exit by itself */
++ for (tries = 10; tries >= 0; tries--) {
++ sleep(1);
++ wait_rc = waitpid(pid, &status, WNOHANG);
++ if (wait_rc > 0) {
++ break;
++ }
++ }
++
++ ck_assert_int_eq(wait_rc, pid);
++ rc = WIFEXITED(status);
++ if (rc) {
++ rc = WEXITSTATUS(status);
++ ck_assert_int_eq(rc, 0);
++ } else {
++ fail_if(rc == 0);
++ }
++
+ return 0;
+ }
+
+@@ -425,6 +485,7 @@ send_and_check(int32_t req_id, uint32_t size,
+ struct qb_ipc_response_header res_header;
+ int32_t res;
+ int32_t try_times = 0;
++ uint32_t max_size = MAX_MSG_SIZE;
+
+ request.hdr.id = req_id;
+ request.hdr.size = sizeof(struct qb_ipc_request_header) + size;
+@@ -432,7 +493,7 @@ send_and_check(int32_t req_id, uint32_t size,
+ /* check that we can't send a message that is too big
+ * and we get the right return code.
+ */
+- res = qb_ipcc_send(conn, &request, MAX_MSG_SIZE*2);
++ res = qb_ipcc_send(conn, &request, max_size*2);
+ ck_assert_int_eq(res, -EMSGSIZE);
+
+ repeat_send:
+@@ -484,13 +545,14 @@ test_ipc_txrx(void)
+ int32_t c = 0;
+ size_t size;
+ pid_t pid;
++ uint32_t max_size = MAX_MSG_SIZE;
+
+ pid = run_function_in_new_process(run_ipc_server);
+ fail_if(pid == -1);
+ sleep(1);
+
+ do {
+- conn = qb_ipcc_connect(ipc_name, MAX_MSG_SIZE);
++ conn = qb_ipcc_connect(ipc_name, max_size);
+ if (conn == NULL) {
+ j = waitpid(pid, NULL, WNOHANG);
+ ck_assert_int_eq(j, 0);
+@@ -503,7 +565,7 @@ test_ipc_txrx(void)
+ size = QB_MIN(sizeof(struct qb_ipc_request_header), 64);
+ for (j = 1; j < 19; j++) {
+ size *= 2;
+- if (size >= MAX_MSG_SIZE)
++ if (size >= max_size)
+ break;
+ if (send_and_check(IPC_MSG_REQ_TX_RX, size,
+ recv_timeout, QB_TRUE) < 0) {
+@@ -511,10 +573,17 @@ test_ipc_txrx(void)
+ }
+ }
+ if (turn_on_fc) {
++ /* can't signal server to shutdown if flow control is on */
+ ck_assert_int_eq(fc_enabled, QB_TRUE);
++ qb_ipcc_disconnect(conn);
++ /* TODO - figure out why this sleep is necessary */
++ sleep(1);
++ kill_server(pid);
++ } else {
++ request_server_exit();
++ qb_ipcc_disconnect(conn);
++ verify_graceful_stop(pid);
+ }
+- qb_ipcc_disconnect(conn);
+- stop_process(pid);
+ }
+
+ static void
+@@ -527,13 +596,14 @@ test_ipc_exit(void)
+ int32_t c = 0;
+ int32_t j = 0;
+ pid_t pid;
++ uint32_t max_size = MAX_MSG_SIZE;
+
+ pid = run_function_in_new_process(run_ipc_server);
+ fail_if(pid == -1);
+ sleep(1);
+
+ do {
+- conn = qb_ipcc_connect(ipc_name, MAX_MSG_SIZE);
++ conn = qb_ipcc_connect(ipc_name, max_size);
+ if (conn == NULL) {
+ j = waitpid(pid, NULL, WNOHANG);
+ ck_assert_int_eq(j, 0);
+@@ -554,8 +624,8 @@ test_ipc_exit(void)
+ sizeof(struct qb_ipc_response_header), -1);
+ ck_assert_int_eq(res, sizeof(struct qb_ipc_response_header));
+
+- /* kill the server */
+- stop_process(pid);
++ request_server_exit();
++ verify_graceful_stop(pid);
+
+ /*
+ * wait a bit for the server to die.
+@@ -674,13 +744,14 @@ test_ipc_dispatch(void)
+ int32_t c = 0;
+ pid_t pid;
+ int32_t size;
++ uint32_t max_size = MAX_MSG_SIZE;
+
+ pid = run_function_in_new_process(run_ipc_server);
+ fail_if(pid == -1);
+ sleep(1);
+
+ do {
+- conn = qb_ipcc_connect(ipc_name, MAX_MSG_SIZE);
++ conn = qb_ipcc_connect(ipc_name, max_size);
+ if (conn == NULL) {
+ j = waitpid(pid, NULL, WNOHANG);
+ ck_assert_int_eq(j, 0);
+@@ -693,7 +764,7 @@ test_ipc_dispatch(void)
+ size = QB_MIN(sizeof(struct qb_ipc_request_header), 64);
+ for (j = 1; j < 19; j++) {
+ size *= 2;
+- if (size >= MAX_MSG_SIZE)
++ if (size >= max_size)
+ break;
+ if (send_and_check(IPC_MSG_REQ_DISPATCH, size,
+ recv_timeout, QB_TRUE) < 0) {
+@@ -701,8 +772,9 @@ test_ipc_dispatch(void)
+ }
+ }
+
++ request_server_exit();
+ qb_ipcc_disconnect(conn);
+- stop_process(pid);
++ verify_graceful_stop(pid);
+ }
+
+ START_TEST(test_ipc_disp_us)
+@@ -781,22 +853,20 @@ count_bulk_events(int32_t fd, int32_t revents, void *data)
+ static void
+ test_ipc_bulk_events(void)
+ {
+- struct qb_ipc_request_header req_header;
+- struct qb_ipc_response_header res_header;
+- struct iovec iov[1];
+ int32_t c = 0;
+ int32_t j = 0;
+ pid_t pid;
+ int32_t res;
+ qb_loop_t *cl;
+ int32_t fd;
++ uint32_t max_size = MAX_MSG_SIZE;
+
+ pid = run_function_in_new_process(run_ipc_server);
+ fail_if(pid == -1);
+ sleep(1);
+
+ do {
+- conn = qb_ipcc_connect(ipc_name, MAX_MSG_SIZE);
++ conn = qb_ipcc_connect(ipc_name, max_size);
+ if (conn == NULL) {
+ j = waitpid(pid, NULL, WNOHANG);
+ ck_assert_int_eq(j, 0);
+@@ -823,21 +893,9 @@ test_ipc_bulk_events(void)
+ qb_loop_run(cl);
+ ck_assert_int_eq(events_received, num_bulk_events);
+
+- req_header.id = IPC_MSG_REQ_SERVER_FAIL;
+- req_header.size = sizeof(struct qb_ipc_request_header);
+-
+- iov[0].iov_len = req_header.size;
+- iov[0].iov_base = &req_header;
+- res = qb_ipcc_sendv_recv(conn, iov, 1,
+- &res_header,
+- sizeof(struct qb_ipc_response_header), -1);
+- if (res != -ECONNRESET && res != -ENOTCONN) {
+- qb_log(LOG_ERR, "id:%d size:%d", res_header.id, res_header.size);
+- ck_assert_int_eq(res, -ENOTCONN);
+- }
+-
++ request_server_exit();
+ qb_ipcc_disconnect(conn);
+- stop_process(pid);
++ verify_graceful_stop(pid);
+ }
+
+ static void
+@@ -857,13 +915,14 @@ test_ipc_stress_test(void)
+ int32_t res;
+ qb_loop_t *cl;
+ int32_t fd;
++ uint32_t max_size = MAX_MSG_SIZE;
+ /* This looks strange, but it serves an important purpose.
+ * This test forces the server to enforce the MAX_MSG_SIZE
+ * limit from the server side, which overrides the client's
+ * buffer limit. To verify this functionality is working
+ * we set the client limit lower than what the server
+ * is enforcing. */
+- int32_t client_buf_size = MAX_MSG_SIZE - 1024;
++ int32_t client_buf_size = max_size - 1024;
+ int32_t real_buf_size;
+
+ enforce_server_buffer = 1;
+@@ -884,7 +943,7 @@ test_ipc_stress_test(void)
+ fail_if(conn == NULL);
+
+ real_buf_size = qb_ipcc_get_buffer_size(conn);
+- ck_assert_int_eq(real_buf_size, MAX_MSG_SIZE);
++ ck_assert_int_eq(real_buf_size, max_size);
+
+ qb_log(LOG_DEBUG, "Testing %d iterations of EVENT msg passing.", num_stress_events);
+
+@@ -920,7 +979,7 @@ test_ipc_stress_test(void)
+ }
+
+ qb_ipcc_disconnect(conn);
+- stop_process(pid);
++ verify_graceful_stop(pid);
+ }
+
+ START_TEST(test_ipc_stress_test_us)
+@@ -954,6 +1013,7 @@ test_ipc_event_on_created(void)
+ int32_t res;
+ qb_loop_t *cl;
+ int32_t fd;
++ uint32_t max_size = MAX_MSG_SIZE;
+
+ num_bulk_events = 1;
+
+@@ -962,7 +1022,7 @@ test_ipc_event_on_created(void)
+ sleep(1);
+
+ do {
+- conn = qb_ipcc_connect(ipc_name, MAX_MSG_SIZE);
++ conn = qb_ipcc_connect(ipc_name, max_size);
+ if (conn == NULL) {
+ j = waitpid(pid, NULL, WNOHANG);
+ ck_assert_int_eq(j, 0);
+@@ -984,8 +1044,9 @@ test_ipc_event_on_created(void)
+ qb_loop_run(cl);
+ ck_assert_int_eq(events_received, num_bulk_events);
+
++ request_server_exit();
+ qb_ipcc_disconnect(conn);
+- stop_process(pid);
++ verify_graceful_stop(pid);
+ }
+
+ START_TEST(test_ipc_event_on_created_us)
+@@ -1009,13 +1070,14 @@ test_ipc_disconnect_after_created(void)
+ int32_t j = 0;
+ pid_t pid;
+ int32_t res;
++ uint32_t max_size = MAX_MSG_SIZE;
+
+ pid = run_function_in_new_process(run_ipc_server);
+ fail_if(pid == -1);
+ sleep(1);
+
+ do {
+- conn = qb_ipcc_connect(ipc_name, MAX_MSG_SIZE);
++ conn = qb_ipcc_connect(ipc_name, max_size);
+ if (conn == NULL) {
+ j = waitpid(pid, NULL, WNOHANG);
+ ck_assert_int_eq(j, 0);
+@@ -1046,7 +1108,7 @@ test_ipc_disconnect_after_created(void)
+ ck_assert_int_eq(QB_FALSE, qb_ipcc_is_connected(conn));
+
+ qb_ipcc_disconnect(conn);
+- stop_process(pid);
++ kill_server(pid);
+ }
+
+ START_TEST(test_ipc_disconnect_after_created_us)
+@@ -1063,20 +1125,17 @@ END_TEST
+ static void
+ test_ipc_server_fail(void)
+ {
+- struct qb_ipc_request_header req_header;
+- struct qb_ipc_response_header res_header;
+- struct iovec iov[1];
+- int32_t res;
+ int32_t j;
+ int32_t c = 0;
+ pid_t pid;
++ uint32_t max_size = MAX_MSG_SIZE;
+
+ pid = run_function_in_new_process(run_ipc_server);
+ fail_if(pid == -1);
+ sleep(1);
+
+ do {
+- conn = qb_ipcc_connect(ipc_name, MAX_MSG_SIZE);
++ conn = qb_ipcc_connect(ipc_name, max_size);
+ if (conn == NULL) {
+ j = waitpid(pid, NULL, WNOHANG);
+ ck_assert_int_eq(j, 0);
+@@ -1086,31 +1145,10 @@ test_ipc_server_fail(void)
+ } while (conn == NULL && c < 5);
+ fail_if(conn == NULL);
+
+- /*
+- * tell the server to exit
+- */
+- req_header.id = IPC_MSG_REQ_SERVER_FAIL;
+- req_header.size = sizeof(struct qb_ipc_request_header);
+-
+- iov[0].iov_len = req_header.size;
+- iov[0].iov_base = &req_header;
+-
+- ck_assert_int_eq(QB_TRUE, qb_ipcc_is_connected(conn));
+-
+- res = qb_ipcc_sendv_recv(conn, iov, 1,
+- &res_header,
+- sizeof(struct qb_ipc_response_header), -1);
+- /*
+- * confirm we get -ENOTCONN or ECONNRESET
+- */
+- if (res != -ECONNRESET && res != -ENOTCONN) {
+- qb_log(LOG_ERR, "id:%d size:%d", res_header.id, res_header.size);
+- ck_assert_int_eq(res, -ENOTCONN);
+- }
++ request_server_exit();
+ ck_assert_int_eq(QB_FALSE, qb_ipcc_is_connected(conn));
+-
+ qb_ipcc_disconnect(conn);
+- stop_process(pid);
++ verify_graceful_stop(pid);
+ }
+
+ START_TEST(test_ipc_server_fail_soc)
+@@ -1181,6 +1219,7 @@ test_ipc_service_ref_count(void)
+ int32_t c = 0;
+ int32_t j = 0;
+ pid_t pid;
++ uint32_t max_size = MAX_MSG_SIZE;
+
+ reference_count_test = QB_TRUE;
+
+@@ -1189,7 +1228,7 @@ test_ipc_service_ref_count(void)
+ sleep(1);
+
+ do {
+- conn = qb_ipcc_connect(ipc_name, MAX_MSG_SIZE);
++ conn = qb_ipcc_connect(ipc_name, max_size);
+ if (conn == NULL) {
+ j = waitpid(pid, NULL, WNOHANG);
+ ck_assert_int_eq(j, 0);
+@@ -1201,8 +1240,7 @@ test_ipc_service_ref_count(void)
+
+ sleep(5);
+
+- qb_ipcc_disconnect(conn);
+- stop_process(pid);
++ kill_server(pid);
+ }
+
+
+@@ -1265,22 +1303,22 @@ make_shm_suite(void)
+
+ tc = tcase_create("ipc_server_fail_shm");
+ tcase_add_test(tc, test_ipc_server_fail_shm);
+- tcase_set_timeout(tc, 6);
++ tcase_set_timeout(tc, 8);
+ suite_add_tcase(s, tc);
+
+ tc = tcase_create("ipc_txrx_shm_block");
+ tcase_add_test(tc, test_ipc_txrx_shm_block);
+- tcase_set_timeout(tc, 6);
++ tcase_set_timeout(tc, 8);
+ suite_add_tcase(s, tc);
+
+ tc = tcase_create("ipc_txrx_shm_tmo");
+ tcase_add_test(tc, test_ipc_txrx_shm_tmo);
+- tcase_set_timeout(tc, 6);
++ tcase_set_timeout(tc, 8);
+ suite_add_tcase(s, tc);
+
+ tc = tcase_create("ipc_fc_shm");
+ tcase_add_test(tc, test_ipc_fc_shm);
+- tcase_set_timeout(tc, 6);
++ tcase_set_timeout(tc, 8);
+ suite_add_tcase(s, tc);
+
+ tc = tcase_create("ipc_dispatch_shm");
+@@ -1300,11 +1338,12 @@ make_shm_suite(void)
+
+ tc = tcase_create("ipc_exit_shm");
+ tcase_add_test(tc, test_ipc_exit_shm);
+- tcase_set_timeout(tc, 3);
++ tcase_set_timeout(tc, 8);
+ suite_add_tcase(s, tc);
+
+ tc = tcase_create("ipc_event_on_created_shm");
+ tcase_add_test(tc, test_ipc_event_on_created_shm);
++ tcase_set_timeout(tc, 10);
+ suite_add_tcase(s, tc);
+
+ tc = tcase_create("ipc_service_ref_count_shm");
+@@ -1328,27 +1367,27 @@ make_soc_suite(void)
+
+ tc = tcase_create("ipc_server_fail_soc");
+ tcase_add_test(tc, test_ipc_server_fail_soc);
+- tcase_set_timeout(tc, 6);
++ tcase_set_timeout(tc, 8);
+ suite_add_tcase(s, tc);
+
+ tc = tcase_create("ipc_txrx_us_block");
+ tcase_add_test(tc, test_ipc_txrx_us_block);
+- tcase_set_timeout(tc, 6);
++ tcase_set_timeout(tc, 8);
+ suite_add_tcase(s, tc);
+
+ tc = tcase_create("ipc_txrx_us_tmo");
+ tcase_add_test(tc, test_ipc_txrx_us_tmo);
+- tcase_set_timeout(tc, 6);
++ tcase_set_timeout(tc, 8);
+ suite_add_tcase(s, tc);
+
+ tc = tcase_create("ipc_fc_us");
+ tcase_add_test(tc, test_ipc_fc_us);
+- tcase_set_timeout(tc, 6);
++ tcase_set_timeout(tc, 8);
+ suite_add_tcase(s, tc);
+
+ tc = tcase_create("ipc_exit_us");
+ tcase_add_test(tc, test_ipc_exit_us);
+- tcase_set_timeout(tc, 6);
++ tcase_set_timeout(tc, 8);
+ suite_add_tcase(s, tc);
+
+ tc = tcase_create("ipc_dispatch_us");
+@@ -1368,10 +1407,12 @@ make_soc_suite(void)
+
+ tc = tcase_create("ipc_event_on_created_us");
+ tcase_add_test(tc, test_ipc_event_on_created_us);
++ tcase_set_timeout(tc, 10);
+ suite_add_tcase(s, tc);
+
+ tc = tcase_create("ipc_disconnect_after_created_us");
+ tcase_add_test(tc, test_ipc_disconnect_after_created_us);
++ tcase_set_timeout(tc, 10);
+ suite_add_tcase(s, tc);
+
+ tc = tcase_create("ipc_service_ref_count_us");
More information about the scm-commits
mailing list