This to handle disconnecting in a failure state (before the connection has been established.
Another aspect to this is we don't want to call connection_destroyed() if we haven't called connection_created().
Signed-off-by: Angus Salkeld asalkeld@redhat.com --- lib/ipc_int.h | 3 ++- lib/ipc_us.c | 8 +++++--- lib/ipcs.c | 13 +++++++++++-- 3 files changed, 18 insertions(+), 6 deletions(-)
diff --git a/lib/ipc_int.h b/lib/ipc_int.h index c4eb082..72af15f 100644 --- a/lib/ipc_int.h +++ b/lib/ipc_int.h @@ -165,7 +165,8 @@ struct qb_ipcs_service { enum qb_ipcs_connection_state { QB_IPCS_CONNECTION_INACTIVE, QB_IPCS_CONNECTION_ACTIVE, - QB_IPCS_CONNECTION_DOWN, + QB_IPCS_CONNECTION_ESTABLISHED, + QB_IPCS_CONNECTION_SHUTTING_DOWN, };
struct qb_ipcs_connection { diff --git a/lib/ipc_us.c b/lib/ipc_us.c index 1387b0b..8c989aa 100644 --- a/lib/ipc_us.c +++ b/lib/ipc_us.c @@ -526,6 +526,7 @@ handle_new_connection(struct qb_ipcs_service *s, struct qb_ipcs_connection *c = NULL; struct qb_ipc_connection_request *req = msg; int32_t res = auth_result; + int32_t res2 = 0; struct qb_ipc_connection_response response;
c = qb_ipcs_connection_alloc(s); @@ -605,15 +606,16 @@ send_response: s->stats.active_connections++; }
- res = qb_ipc_us_send(&c->setup, &response, response.hdr.size); - if (res == response.hdr.size) { - res = 0; + res2 = qb_ipc_us_send(&c->setup, &response, response.hdr.size); + if (res == 0 && res2 != response.hdr.size) { + res = res2; }
if (res == 0) { if (s->serv_fns.connection_created) { s->serv_fns.connection_created(c); } + c->state = QB_IPCS_CONNECTION_ESTABLISHED; } else { if (res == -EACCES) { qb_util_log(LOG_ERR, "Invalid IPC credentials."); diff --git a/lib/ipcs.c b/lib/ipcs.c index 569fd45..0710301 100644 --- a/lib/ipcs.c +++ b/lib/ipcs.c @@ -496,7 +496,16 @@ qb_ipcs_disconnect(struct qb_ipcs_connection *c) qb_util_log(LOG_DEBUG, "%s() state:%d", __func__, c->state);
if (c->state == QB_IPCS_CONNECTION_ACTIVE) { - c->state = QB_IPCS_CONNECTION_DOWN; + c->state = QB_IPCS_CONNECTION_INACTIVE; + c->service->stats.closed_connections++; + if (c->service->needs_sock_for_poll && c->setup.u.us.sock > 0) { + qb_ipcc_us_sock_close(c->setup.u.us.sock); + c->setup.u.us.sock = -1; + qb_ipcs_connection_unref(c); + } + } + if (c->state == QB_IPCS_CONNECTION_ESTABLISHED) { + c->state = QB_IPCS_CONNECTION_SHUTTING_DOWN; c->service->stats.active_connections--; c->service->stats.closed_connections++;
@@ -506,7 +515,7 @@ qb_ipcs_disconnect(struct qb_ipcs_connection *c) qb_ipcs_connection_unref(c); } } - if (c->state == QB_IPCS_CONNECTION_DOWN) { + if (c->state == QB_IPCS_CONNECTION_SHUTTING_DOWN) { res = 0; if (c->service->serv_fns.connection_closed) { res = c->service->serv_fns.connection_closed(c);
Until now we have been relying on getting a POLLHUP, but under heavy load we seem to get an old poll event that cause a double deref of the connection object.
Signed-off-by: Angus Salkeld asalkeld@redhat.com --- lib/ipcs.c | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-)
diff --git a/lib/ipcs.c b/lib/ipcs.c index 0710301..5a39e43 100644 --- a/lib/ipcs.c +++ b/lib/ipcs.c @@ -499,6 +499,7 @@ qb_ipcs_disconnect(struct qb_ipcs_connection *c) c->state = QB_IPCS_CONNECTION_INACTIVE; c->service->stats.closed_connections++; if (c->service->needs_sock_for_poll && c->setup.u.us.sock > 0) { + (void)c->service->poll_fns.dispatch_del(c->setup.u.us.sock); qb_ipcc_us_sock_close(c->setup.u.us.sock); c->setup.u.us.sock = -1; qb_ipcs_connection_unref(c); @@ -510,6 +511,7 @@ qb_ipcs_disconnect(struct qb_ipcs_connection *c) c->service->stats.closed_connections++;
if (c->service->needs_sock_for_poll && c->setup.u.us.sock > 0) { + (void)c->service->poll_fns.dispatch_del(c->setup.u.us.sock); qb_ipcc_us_sock_close(c->setup.u.us.sock); c->setup.u.us.sock = -1; qb_ipcs_connection_unref(c);
quarterback-devel@lists.fedorahosted.org