2 commits - ldap/servers
by Noriko Hosoi
ldap/servers/slapd/bind.c | 58 +++++++++++++++++++++++++++++++-------
ldap/servers/slapd/mapping_tree.c | 19 ++++++++++++
ldap/servers/slapd/slapi-plugin.h | 1
3 files changed, 68 insertions(+), 10 deletions(-)
New commits:
commit 8212a8913b748cd1f5e986a754c37ef41db8272a
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Fri Sep 18 15:19:51 2015 -0700
Ticket #48188 - segfault in ns-slapd due to accessing Slapi_DN freed in pre bind plug-in
Description: Additional fixes based upon the comments by rmeggins(a)redhat.com
(Thank you, Rich!!).
https://fedorahosted.org/389/ticket/48188?replyto=24#comment:24
1. Implemented the case 2)
If the plugin changes the SLAPI_BIND_TARGET_SDN *value*,
we need to select a different backend. It is possible
(but not very useful) for the plugin to change the pointer,
but use the same value.
2. Added an api slapi_be_select_exact which returns NULL if
there is no matching backend.
https://fedorahosted.org/389/ticket/48188
Reviewed by rmeggins(a)redhat.com (Thank you!)
diff --git a/ldap/servers/slapd/bind.c b/ldap/servers/slapd/bind.c
index 4ec276a..474b508 100644
--- a/ldap/servers/slapd/bind.c
+++ b/ldap/servers/slapd/bind.c
@@ -107,6 +107,7 @@ do_bind( Slapi_PBlock *pb )
int auto_bind = 0;
int minssf = 0;
int minssf_exclude_rootdse = 0;
+ Slapi_DN *original_sdn = NULL;
LDAPDebug( LDAP_DEBUG_TRACE, "do_bind\n", 0, 0, 0 );
@@ -660,10 +661,9 @@ do_bind( Slapi_PBlock *pb )
goto free_and_return;
}
- if (referral)
- {
- send_referrals_from_entry(pb,referral);
- slapi_entry_free(referral);
+ if (referral) {
+ send_referrals_from_entry(pb,referral);
+ slapi_entry_free(referral);
goto free_and_return;
}
@@ -671,29 +671,50 @@ do_bind( Slapi_PBlock *pb )
/* not root dn - pass to the backend */
if ( be->be_bind != NULL ) {
-
+ original_sdn = slapi_sdn_dup(sdn);
/*
* call the pre-bind plugins. if they succeed, call
* the backend bind function. then call the post-bind
* plugins.
*/
if ( plugin_call_plugins( pb, SLAPI_PLUGIN_PRE_BIND_FN ) == 0 ) {
+ int sdn_updated = 0;
rc = 0;
/* Check if a pre_bind plugin mapped the DN to another backend */
Slapi_DN *pb_sdn;
slapi_pblock_get(pb, SLAPI_BIND_TARGET_SDN, &pb_sdn);
- if (pb_sdn != sdn) {
+ if (!pb_sdn) {
+ PR_snprintf(errorbuf, sizeof(errorbuf), "Pre-bind plug-in set NULL dn\n");
+ send_ldap_result(pb, LDAP_OPERATIONS_ERROR, NULL, errorbuf, 0, NULL);
+ goto free_and_return;
+ } else if ((pb_sdn != sdn) || (sdn_updated = slapi_sdn_compare(original_sdn, pb_sdn))) {
/*
* Slapi_DN set in pblock was changed by a pre bind plug-in.
* It is a plug-in's responsibility to free the original Slapi_DN.
*/
sdn = pb_sdn;
dn = slapi_sdn_get_dn(sdn);
-
- slapi_be_Unlock(be);
- be = slapi_be_select(sdn);
- slapi_be_Rlock(be);
+ if (!dn) {
+ PR_snprintf(errorbuf, sizeof(errorbuf), "Pre-bind plug-in set corrupted dn\n");
+ send_ldap_result(pb, LDAP_OPERATIONS_ERROR, NULL, errorbuf, 0, NULL);
+ goto free_and_return;
+ }
+ if (!sdn_updated) { /* pb_sdn != sdn; need to compare the dn's. */
+ sdn_updated = slapi_sdn_compare(original_sdn, sdn);
+ }
+ if (sdn_updated) { /* call slapi_be_select only when the DN is updated. */
+ slapi_be_Unlock(be);
+ be = slapi_be_select_exact(sdn);
+ if (be) {
+ slapi_be_Rlock(be);
+ slapi_pblock_set( pb, SLAPI_BACKEND, be );
+ } else {
+ PR_snprintf(errorbuf, sizeof(errorbuf), "No matching backend for %s\n", dn);
+ send_ldap_result(pb, LDAP_OPERATIONS_ERROR, NULL, errorbuf, 0, NULL);
+ goto free_and_return;
+ }
+ }
}
/*
@@ -845,10 +866,12 @@ account_locked:
}
free_and_return:;
- if (be)
+ slapi_sdn_free(&original_sdn);
+ if (be) {
slapi_be_Unlock(be);
+ }
if (bind_sdn_in_pb) {
- slapi_pblock_get(pb, SLAPI_BIND_TARGET_SDN, &sdn);
+ slapi_pblock_get(pb, SLAPI_BIND_TARGET_SDN, &sdn);
}
slapi_sdn_free(&sdn);
slapi_ch_free_string( &saslmech );
diff --git a/ldap/servers/slapd/mapping_tree.c b/ldap/servers/slapd/mapping_tree.c
index 165eba1..20c2cc3 100644
--- a/ldap/servers/slapd/mapping_tree.c
+++ b/ldap/servers/slapd/mapping_tree.c
@@ -3095,6 +3095,25 @@ slapi_be_select( const Slapi_DN *sdn ) /* JCM - The name of this should change??
return be;
}
+Slapi_Backend *
+slapi_be_select_exact(const Slapi_DN *sdn)
+{
+ Slapi_Backend *be = NULL;
+ mapping_tree_node *node = NULL;
+
+ if (!sdn) {
+ LDAPDebug0Args(LDAP_DEBUG_ANY, "slapi_be_select_exact: Empty Slapi_DN is given.\n");
+ return NULL;
+ }
+ node = slapi_get_mapping_tree_node_by_dn(sdn);
+
+ if (node && node->mtn_be) {
+ be = node->mtn_be[0];
+ }
+
+ return be;
+}
+
/* Check if the dn targets an internal reserved backends */
int
slapi_on_internal_backends(const Slapi_DN *sdn)
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index 4134c1b..72f3920 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -6339,6 +6339,7 @@ Slapi_Backend *slapi_be_new( const char *type, const char *name,
int isprivate, int logchanges );
void slapi_be_free(Slapi_Backend **be);
Slapi_Backend *slapi_be_select( const Slapi_DN *sdn );
+Slapi_Backend *slapi_be_select_exact(const Slapi_DN *sdn);
Slapi_Backend *slapi_be_select_by_instance_name( const char *name );
int slapi_be_exist(const Slapi_DN *sdn);
void slapi_be_delete_onexit(Slapi_Backend *be);
commit 40e0d0f80d6fd1271431e105580293747c43c327
Author: Simo Sorce <simo(a)redhat.com>
Date: Fri Sep 18 11:13:43 2015 -0700
Ticket #48188 - segfault in ns-slapd due to accessing Slapi_DN freed in pre bind plug-in
This patch is based upon the patch provided by Simo Sorce <simo(a)redhat.com> for
Ticket #48272 - Allow PRE_BIND plugins to mangle DNs
Description:
Allow a pre_bind plugin to map a DN to another
This is useful for plugins that deal with virtual trees or non-standard
clients binding with values that are not proper DNs and similar situations.
Signed-off-by: Simo Sorce <simo(a)redhat.com>
2 changes are made to the original patch:
1. removed "slapi_sdn_free(&sdn)" with this comment:
* It is a plug-in's responsibility to free the original Slapi_DN.
Note: slapi-nis already freed the original sdn.
2. reset dn from the new sdn.
dn = slapi_sdn_get_dn(sdn);
https://fedorahosted.org/389/ticket/48188
Reviewed by rmeggins(a)redhat.com and lkrispen(a)redhat.com.
diff --git a/ldap/servers/slapd/bind.c b/ldap/servers/slapd/bind.c
index 1bd604f..4ec276a 100644
--- a/ldap/servers/slapd/bind.c
+++ b/ldap/servers/slapd/bind.c
@@ -669,7 +669,7 @@ do_bind( Slapi_PBlock *pb )
slapi_pblock_set( pb, SLAPI_BACKEND, be );
- /* not root dn - pass to the backend */
+ /* not root dn - pass to the backend */
if ( be->be_bind != NULL ) {
/*
@@ -677,10 +677,25 @@ do_bind( Slapi_PBlock *pb )
* the backend bind function. then call the post-bind
* plugins.
*/
- if ( plugin_call_plugins( pb, SLAPI_PLUGIN_PRE_BIND_FN )
- == 0 ) {
+ if ( plugin_call_plugins( pb, SLAPI_PLUGIN_PRE_BIND_FN ) == 0 ) {
rc = 0;
+ /* Check if a pre_bind plugin mapped the DN to another backend */
+ Slapi_DN *pb_sdn;
+ slapi_pblock_get(pb, SLAPI_BIND_TARGET_SDN, &pb_sdn);
+ if (pb_sdn != sdn) {
+ /*
+ * Slapi_DN set in pblock was changed by a pre bind plug-in.
+ * It is a plug-in's responsibility to free the original Slapi_DN.
+ */
+ sdn = pb_sdn;
+ dn = slapi_sdn_get_dn(sdn);
+
+ slapi_be_Unlock(be);
+ be = slapi_be_select(sdn);
+ slapi_be_Rlock(be);
+ }
+
/*
* Is this account locked ?
* could be locked through the account inactivation
--
389 commits mailing list
389-commits@%(host_name)s
http://lists.fedoraproject.org/postorius/389-commits@lists.fedoraproject.org
8 years, 7 months
Branch '389-ds-base-1.3.4' - ldap/servers
by Mark Reynolds
ldap/servers/plugins/replication/repl5_replica_config.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
New commits:
commit d9f03f5fddfc8ba7009c9dcc584686e43d6339e8
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Fri Sep 18 11:56:29 2015 -0400
Ticket 48217 - cleanallruv - fix regression with server shutdown
Bug Description: Recent checks for server shutdown were added to cleanallruv task,
but we did not properly check for "shutdown" at the end of the task.
This caused the server to think the task successfully finished,
when in fact it did not.
Fix Description: Properly check for shutdown at the end of the task, and handler it
appropriately.
https://fedorahosted.org/389/ticket/48217
Reviewed by: nhosoi(Thanks!)
(cherry picked from commit c41d36de0ca438bf23e4e810bfec0fd59cbc790b)
diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
index 446da3f..8d3c481 100644
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
@@ -1948,7 +1948,7 @@ done:
/*
* If the replicas are cleaned, release the rid
*/
- if(!aborted){
+ if(!aborted && !slapi_is_shutting_down()){
delete_cleaned_rid_config(data);
/* make sure all the replicas have been "pre_cleaned" before finishing */
check_replicas_are_done_cleaning(data);
@@ -3005,7 +3005,7 @@ replica_abort_task_thread(void *arg)
}
/*
- * Now send the cleanruv extended op to all the agreements
+ * Now send the abort cleanruv extended op to all the agreements
*/
while(agmt_not_notified && !slapi_is_shutting_down()){
agmt_obj = agmtlist_get_first_agreement_for_replica (data->replica);
@@ -3013,7 +3013,7 @@ replica_abort_task_thread(void *arg)
agmt_not_notified = 0;
break;
}
- while (agmt_obj){
+ while (agmt_obj && !slapi_is_shutting_down()){
agmt = (Repl_Agmt*)object_get_data (agmt_obj);
if(!agmt_is_enabled(agmt) || get_agmt_agreement_type(agmt) == REPLICA_TYPE_WINDOWS){
agmt_obj = agmtlist_get_next_agreement_for_replica (data->replica, agmt_obj);
@@ -3058,7 +3058,7 @@ replica_abort_task_thread(void *arg)
} /* while */
done:
- if(agmt_not_notified){
+ if(agmt_not_notified || slapi_is_shutting_down()){
/* failure */
cleanruv_log(data->task, data->rid, ABORT_CLEANALLRUV_ID,"Abort task failed, will resume the task at the next server startup.");
} else {
--
389 commits mailing list
389-commits@%(host_name)s
http://lists.fedoraproject.org/postorius/389-commits@lists.fedoraproject.org
8 years, 7 months
ldap/servers
by Mark Reynolds
ldap/servers/plugins/replication/repl5_replica_config.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
New commits:
commit c41d36de0ca438bf23e4e810bfec0fd59cbc790b
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Fri Sep 18 11:56:29 2015 -0400
Ticket 48217 - cleanallruv - fix regression with server shutdown
Bug Description: Recent checks for server shutdown were added to cleanallruv task,
but we did not properly check for "shutdown" at the end of the task.
This caused the server to think the task successfully finished,
when in fact it did not.
Fix Description: Properly check for shutdown at the end of the task, and handler it
appropriately.
https://fedorahosted.org/389/ticket/48217
Reviewed by: nhosoi(Thanks!)
diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
index 446da3f..8d3c481 100644
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
@@ -1948,7 +1948,7 @@ done:
/*
* If the replicas are cleaned, release the rid
*/
- if(!aborted){
+ if(!aborted && !slapi_is_shutting_down()){
delete_cleaned_rid_config(data);
/* make sure all the replicas have been "pre_cleaned" before finishing */
check_replicas_are_done_cleaning(data);
@@ -3005,7 +3005,7 @@ replica_abort_task_thread(void *arg)
}
/*
- * Now send the cleanruv extended op to all the agreements
+ * Now send the abort cleanruv extended op to all the agreements
*/
while(agmt_not_notified && !slapi_is_shutting_down()){
agmt_obj = agmtlist_get_first_agreement_for_replica (data->replica);
@@ -3013,7 +3013,7 @@ replica_abort_task_thread(void *arg)
agmt_not_notified = 0;
break;
}
- while (agmt_obj){
+ while (agmt_obj && !slapi_is_shutting_down()){
agmt = (Repl_Agmt*)object_get_data (agmt_obj);
if(!agmt_is_enabled(agmt) || get_agmt_agreement_type(agmt) == REPLICA_TYPE_WINDOWS){
agmt_obj = agmtlist_get_next_agreement_for_replica (data->replica, agmt_obj);
@@ -3058,7 +3058,7 @@ replica_abort_task_thread(void *arg)
} /* while */
done:
- if(agmt_not_notified){
+ if(agmt_not_notified || slapi_is_shutting_down()){
/* failure */
cleanruv_log(data->task, data->rid, ABORT_CLEANALLRUV_ID,"Abort task failed, will resume the task at the next server startup.");
} else {
--
389 commits mailing list
389-commits@%(host_name)s
http://lists.fedoraproject.org/postorius/389-commits@lists.fedoraproject.org
8 years, 7 months
Branch '389-ds-base-1.2.11' - ldap/servers
by thierry bordaz
ldap/servers/plugins/replication/repl5_inc_protocol.c | 1 -
1 file changed, 1 deletion(-)
New commits:
commit 8d4e08a270b4f5e3ffb3ad85e98a7283dfc7e4b7
Author: Thierry Bordaz <tbordaz(a)redhat.com>
Date: Fri Sep 18 18:38:19 2015 +0200
Ticket 48266: coverity issue
diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c
index 0dc9f30..3268bfd 100644
--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c
@@ -1688,7 +1688,6 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
int finished = 0;
ConnResult replay_crc;
char csn_str[CSN_STRSIZE];
- PRBool subentry_update_sent = PR_FALSE;
PRBool subentry_update_needed = PR_FALSE;
int skipped_updates = 0;
int fractional_repl;
--
389 commits mailing list
389-commits@%(host_name)s
http://lists.fedoraproject.org/postorius/389-commits@lists.fedoraproject.org
8 years, 7 months
Branch '389-ds-base-1.3.3' - ldap/servers
by thierry bordaz
ldap/servers/plugins/replication/repl5_inc_protocol.c | 1 -
1 file changed, 1 deletion(-)
New commits:
commit 25cbb79b321ded79ee5fa4f2d47af9e2b03cd216
Author: Thierry Bordaz <tbordaz(a)redhat.com>
Date: Fri Sep 18 18:38:19 2015 +0200
Ticket 48266: coverity issue
diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c
index e8838a0..0d20b27 100644
--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c
@@ -1701,7 +1701,6 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
int finished = 0;
ConnResult replay_crc;
char csn_str[CSN_STRSIZE];
- PRBool subentry_update_sent = PR_FALSE;
PRBool subentry_update_needed = PR_FALSE;
int skipped_updates = 0;
int fractional_repl;
--
389 commits mailing list
389-commits@%(host_name)s
http://lists.fedoraproject.org/postorius/389-commits@lists.fedoraproject.org
8 years, 7 months
Branch '389-ds-base-1.3.4' - ldap/servers
by thierry bordaz
ldap/servers/plugins/replication/repl5_inc_protocol.c | 1 -
1 file changed, 1 deletion(-)
New commits:
commit 8cd4f45a9621dfaea7249179919b783857c9f22c
Author: Thierry Bordaz <tbordaz(a)redhat.com>
Date: Fri Sep 18 18:38:19 2015 +0200
Ticket 48266: coverity issue
diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c
index e0599e5..7680340 100644
--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c
@@ -1672,7 +1672,6 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
int finished = 0;
ConnResult replay_crc;
char csn_str[CSN_STRSIZE];
- PRBool subentry_update_sent = PR_FALSE;
PRBool subentry_update_needed = PR_FALSE;
int skipped_updates = 0;
int fractional_repl;
--
389 commits mailing list
389-commits@%(host_name)s
http://lists.fedoraproject.org/postorius/389-commits@lists.fedoraproject.org
8 years, 7 months
ldap/servers
by thierry bordaz
ldap/servers/plugins/replication/repl5_inc_protocol.c | 1 -
1 file changed, 1 deletion(-)
New commits:
commit a8130abac333f3f03312f168d150d9cbca55ef05
Author: Thierry Bordaz <tbordaz(a)redhat.com>
Date: Fri Sep 18 18:38:19 2015 +0200
Ticket 48266: coverity issue
diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c
index e0599e5..7680340 100644
--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c
@@ -1672,7 +1672,6 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
int finished = 0;
ConnResult replay_crc;
char csn_str[CSN_STRSIZE];
- PRBool subentry_update_sent = PR_FALSE;
PRBool subentry_update_needed = PR_FALSE;
int skipped_updates = 0;
int fractional_repl;
--
389 commits mailing list
389-commits@%(host_name)s
http://lists.fedoraproject.org/postorius/389-commits@lists.fedoraproject.org
8 years, 7 months
Branch '389-ds-base-1.2.11' - ldap/servers
by thierry bordaz
ldap/servers/plugins/replication/repl5.h | 2
ldap/servers/plugins/replication/repl5_inc_protocol.c | 39 ++++
ldap/servers/plugins/replication/repl5_replica.c | 156 ++++++++++++++++++
ldap/servers/plugins/replication/repl5_tot_protocol.c | 13 +
4 files changed, 209 insertions(+), 1 deletion(-)
New commits:
commit f04f4c0140c1a970314735cb69b827230136b346
Author: Thierry Bordaz <tbordaz(a)redhat.com>
Date: Fri Sep 11 18:56:53 2015 +0200
Ticket 48266: Fractional replication evaluates several times the same CSN
Bug Description:
In fractional replication if there are only skipped updates and many of them, the supplier
acquire the replica for a long time. At the end of the session, RUV is not updated
so the next session will restart evaluating the same skipped updates
Fix Description:
The fix introduces subentries under the suffix: 'cn=repl keep alive <rid>,$SUFFIX'
During an incremental replication session, if the session only contains skipped updates
and the number of them overpass a threshold (100), it triggers an update on that subentry.
This update will eventually be replicated, moving forward the RUV
https://fedorahosted.org/389/ticket/48266
Reviewed by: Noriko Hosoi, Rich Megginson, Simon Pichugin
Platforms tested: <plat>
Flag Day: no
Doc impact: no
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
index 6cec248..66006f6 100644
--- a/ldap/servers/plugins/replication/repl5.h
+++ b/ldap/servers/plugins/replication/repl5.h
@@ -521,6 +521,8 @@ Replica *windows_replica_new(const Slapi_DN *root);
during addition of the replica over LDAP */
Replica *replica_new_from_entry (Slapi_Entry *e, char *errortext, PRBool is_add_operation);
void replica_destroy(void **arg);
+int replica_subentry_update(Slapi_DN *repl_root, ReplicaId rid);
+int replica_subentry_check(Slapi_DN *repl_root, ReplicaId rid);
PRBool replica_get_exclusive_access(Replica *r, PRBool *isInc, PRUint64 connid, int opid,
const char *locking_purl,
char **current_purl);
diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c
index f5516a3..0dc9f30 100644
--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c
@@ -1688,6 +1688,11 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
int finished = 0;
ConnResult replay_crc;
char csn_str[CSN_STRSIZE];
+ PRBool subentry_update_sent = PR_FALSE;
+ PRBool subentry_update_needed = PR_FALSE;
+ int skipped_updates = 0;
+ int fractional_repl;
+#define FRACTIONAL_SKIPPED_THRESHOLD 100
/* Start the results reading thread */
rd = repl5_inc_rd_new(prp);
@@ -1704,6 +1709,7 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
memset ( (void*)&op, 0, sizeof (op) );
entry.op = &op;
+ fractional_repl = agmt_is_fractional(prp->agmt);
do {
cl5_operation_parameters_done ( entry.op );
memset ( (void*)entry.op, 0, sizeof (op) );
@@ -1799,6 +1805,15 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
csn_as_string(entry.op->csn, PR_FALSE, csn_str);
replica_id = csn_get_replicaid(entry.op->csn);
uniqueid = entry.op->target_address.uniqueid;
+
+ if (fractional_repl && message_id)
+ {
+ /* This update was sent no need to update the subentry
+ * and restart counting the skipped updates
+ */
+ subentry_update_needed = PR_FALSE;
+ skipped_updates = 0;
+ }
if (prp->repl50consumer && message_id)
{
@@ -1829,6 +1844,16 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
agmt_get_long_name(prp->agmt),
entry.op->target_address.uniqueid, csn_str);
agmt_inc_last_update_changecount (prp->agmt, csn_get_replicaid(entry.op->csn), 1 /*skipped*/);
+ if (fractional_repl)
+ {
+ skipped_updates++;
+ if (skipped_updates > FRACTIONAL_SKIPPED_THRESHOLD) {
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
+ "%s: skipped updates is too high (%d) if no other update is sent we will update the subentry\n",
+ agmt_get_long_name(prp->agmt), skipped_updates);
+ subentry_update_needed = PR_TRUE;
+ }
+ }
}
}
break;
@@ -1894,6 +1919,20 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
PR_Unlock(rd->lock);
} while (!finished);
+ if (fractional_repl && subentry_update_needed)
+ {
+ Replica *replica;
+ ReplicaId rid = -1; /* Used to create the replica keep alive subentry */
+ replica = (Replica*) object_get_data(prp->replica_object);
+ if (replica)
+ {
+ rid = replica_get_rid(replica);
+ }
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
+ "%s: skipped updates was definitely too high (%d) update the subentry now\n",
+ agmt_get_long_name(prp->agmt), skipped_updates);
+ replica_subentry_update(agmt_get_replarea(prp->agmt), rid);
+ }
/* Terminate the results reading thread */
if (!prp->repl50consumer)
{
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index b375eb7..a53e685 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -409,6 +409,161 @@ replica_destroy(void **arg)
slapi_ch_free((void **)arg);
}
+#define KEEP_ALIVE_ATTR "keepalivetimestamp"
+#define KEEP_ALIVE_ENTRY "repl keep alive"
+#define KEEP_ALIVE_DN_FORMAT "cn=%s %d,%s"
+
+
+static int
+replica_subentry_create(Slapi_DN *repl_root, ReplicaId rid)
+{
+ char *entry_string = NULL;
+ Slapi_Entry *e = NULL;
+ Slapi_PBlock *pb = NULL;
+ int return_value;
+ int rc = 0;
+
+ entry_string = slapi_ch_smprintf("dn: cn=%s %d,%s\nobjectclass: top\nobjectclass: ldapsubentry\nobjectclass: extensibleObject\ncn: %s %d",
+ KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root), KEEP_ALIVE_ENTRY, rid);
+ if (entry_string == NULL) {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "replica_subentry_create add failed in slapi_ch_smprintf\n");
+ rc = -1;
+ goto done;
+ }
+
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "add %s\n", entry_string);
+ e = slapi_str2entry(entry_string, 0);
+
+ /* create the entry */
+ pb = slapi_pblock_new();
+
+
+ slapi_add_entry_internal_set_pb(pb, e, NULL, /* controls */
+ repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), 0 /* flags */);
+ slapi_add_internal_pb(pb);
+ slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &return_value);
+ if (return_value != LDAP_SUCCESS && return_value != LDAP_ALREADY_EXISTS)
+ {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Warning: unable to "
+ "create replication keep alive entry %s: %s\n", slapi_entry_get_dn_const(e),
+ ldap_err2string(return_value));
+ rc = -1;
+ slapi_entry_free(e); /* The entry was not consumed */
+ goto done;
+ }
+
+done:
+
+ slapi_pblock_destroy(pb);
+ slapi_ch_free_string(&entry_string);
+ return rc;
+
+}
+
+int
+replica_subentry_check(Slapi_DN *repl_root, ReplicaId rid)
+{
+ Slapi_PBlock *pb;
+ char *filter = NULL;
+ Slapi_Entry **entries = NULL;
+ int res;
+ int rc = 0;
+
+ pb = slapi_pblock_new();
+ filter = slapi_ch_smprintf("(&(objectclass=ldapsubentry)(cn=%s %d))", KEEP_ALIVE_ENTRY, rid);
+ slapi_search_internal_set_pb(pb, slapi_sdn_get_dn(repl_root), LDAP_SCOPE_ONELEVEL,
+ filter, NULL, 0, NULL, NULL,
+ repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), 0);
+ slapi_search_internal_pb(pb);
+ slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &res);
+ if (res == LDAP_SUCCESS)
+ {
+ slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries);
+ if (entries && (entries[0] == NULL))
+ {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "Need to create replication keep alive entry <cn=%s %d,%s>\n", KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root));
+ rc = replica_subentry_create(repl_root, rid);
+ } else {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "replication keep alive entry <cn=%s %d,%s> already exists\n", KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root));
+ rc = 0;
+ }
+ } else {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "Error accessing replication keep alive entry <cn=%s %d,%s> res=%d\n",
+ KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root), res);
+ /* The status of the entry is not clear, do not attempt to create it */
+ rc = 1;
+ }
+ slapi_free_search_results_internal(pb);
+
+ slapi_pblock_destroy(pb);
+ slapi_ch_free_string(&filter);
+ return rc;
+}
+
+int
+replica_subentry_update(Slapi_DN *repl_root, ReplicaId rid)
+{
+ int ldrc;
+ int rc = LDAP_SUCCESS; /* Optimistic default */
+ LDAPMod * mods[2];
+ LDAPMod mod;
+ struct berval * vals[2];
+ char buf[20];
+ time_t curtime;
+ struct tm ltm;
+ struct berval val;
+ Slapi_PBlock *modpb = NULL;
+ char *dn;
+
+ replica_subentry_check(repl_root, rid);
+ curtime = current_time();
+ gmtime_r(&curtime, <m);
+ strftime(buf, sizeof (buf), "%Y%m%d%H%M%SZ", <m);
+
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name, "subentry_update called at %s\n", buf);
+
+
+ val.bv_val = buf;
+ val.bv_len = strlen(val.bv_val);
+
+ vals [0] = &val;
+ vals [1] = NULL;
+
+ mod.mod_op = LDAP_MOD_REPLACE | LDAP_MOD_BVALUES;
+ mod.mod_type = KEEP_ALIVE_ATTR;
+ mod.mod_bvalues = vals;
+
+ mods[0] = &mod;
+ mods[1] = NULL;
+
+ modpb = slapi_pblock_new();
+ dn = slapi_ch_smprintf(KEEP_ALIVE_DN_FORMAT, KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root));
+
+ slapi_modify_internal_set_pb(modpb, dn, mods, NULL, NULL,
+ repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), 0);
+ slapi_modify_internal_pb(modpb);
+
+ slapi_pblock_get(modpb, SLAPI_PLUGIN_INTOP_RESULT, &ldrc);
+
+ if (ldrc != LDAP_SUCCESS)
+ {
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
+ "Failure (%d) to update replication keep alive entry \"%s: %s\"\n", ldrc, KEEP_ALIVE_ATTR, buf);
+ rc = ldrc;
+ } else {
+ slapi_log_error(SLAPI_LOG_PLUGIN, repl_plugin_name,
+ "Successful update of replication keep alive entry \"%s: %s\"\n", KEEP_ALIVE_ATTR, buf);
+ }
+
+ slapi_pblock_destroy(modpb);
+ slapi_ch_free_string(&dn);
+ return rc;
+
+}
/*
* Attempt to obtain exclusive access to replica (advisory only)
*
@@ -3620,6 +3775,7 @@ replica_enable_replication (Replica *r)
/* What to do ? */
}
+ replica_subentry_check(r->repl_root, replica_get_rid(r));
/* Replica came back online, Check if the total update was terminated.
If flag is still set, it was not terminated, therefore the data is
very likely to be incorrect, and we should not restart Replication threads...
diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c
index e514dc6..0143e19 100644
--- a/ldap/servers/plugins/replication/repl5_tot_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c
@@ -335,6 +335,9 @@ repl5_tot_run(Private_Repl_Protocol *prp)
int portnum = 0;
Slapi_DN *area_sdn = NULL;
CSN *remote_schema_csn = NULL;
+ int init_retry = 0;
+ Replica *replica;
+ ReplicaId rid = 0; /* Used to create the replica keep alive subentry */
PR_ASSERT(NULL != prp);
@@ -412,7 +415,15 @@ repl5_tot_run(Private_Repl_Protocol *prp)
ctrls = (LDAPControl **)slapi_ch_calloc (3, sizeof (LDAPControl *));
ctrls[0] = create_managedsait_control ();
ctrls[1] = create_backend_control(area_sdn);
-
+
+ /* Time to make sure it exists a keep alive subentry for that replica */
+ replica = (Replica*) object_get_data(prp->replica_object);
+ if (replica)
+ {
+ rid = replica_get_rid(replica);
+ }
+ replica_subentry_check(area_sdn, rid);
+
slapi_search_internal_set_pb (pb, slapi_sdn_get_dn (area_sdn),
LDAP_SCOPE_SUBTREE, "(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))", NULL, 0, ctrls, NULL,
repl_get_plugin_identity (PLUGIN_MULTIMASTER_REPLICATION), 0);
--
389 commits mailing list
389-commits@%(host_name)s
http://lists.fedoraproject.org/postorius/389-commits@lists.fedoraproject.org
8 years, 7 months
Branch '389-ds-base-1.3.3' - ldap/servers
by thierry bordaz
ldap/servers/plugins/replication/repl5.h | 2
ldap/servers/plugins/replication/repl5_inc_protocol.c | 39 ++++
ldap/servers/plugins/replication/repl5_replica.c | 156 ++++++++++++++++++
ldap/servers/plugins/replication/repl5_tot_protocol.c | 13 +
4 files changed, 209 insertions(+), 1 deletion(-)
New commits:
commit 38e0d75a4ce6c1bb711fe109bf5f1c548054eda2
Author: Thierry Bordaz <tbordaz(a)redhat.com>
Date: Fri Sep 11 18:56:53 2015 +0200
Ticket 48266: Fractional replication evaluates several times the same CSN
Bug Description:
In fractional replication if there are only skipped updates and many of them, the supplier
acquire the replica for a long time. At the end of the session, RUV is not updated
so the next session will restart evaluating the same skipped updates
Fix Description:
The fix introduces subentries under the suffix: 'cn=repl keep alive <rid>,$SUFFIX'
During an incremental replication session, if the session only contains skipped updates
and the number of them overpass a threshold (100), it triggers an update on that subentry.
This update will eventually be replicated, moving forward the RUV
https://fedorahosted.org/389/ticket/48266
Reviewed by: Noriko Hosoi, Rich Megginson, Simon Pichugin
Platforms tested: <plat>
Flag Day: no
Doc impact: no
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
index 8381c97..59cc11b 100644
--- a/ldap/servers/plugins/replication/repl5.h
+++ b/ldap/servers/plugins/replication/repl5.h
@@ -556,6 +556,8 @@ Replica *windows_replica_new(const Slapi_DN *root);
during addition of the replica over LDAP */
Replica *replica_new_from_entry (Slapi_Entry *e, char *errortext, PRBool is_add_operation);
void replica_destroy(void **arg);
+int replica_subentry_update(Slapi_DN *repl_root, ReplicaId rid);
+int replica_subentry_check(Slapi_DN *repl_root, ReplicaId rid);
PRBool replica_get_exclusive_access(Replica *r, PRBool *isInc, PRUint64 connid, int opid,
const char *locking_purl,
char **current_purl);
diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c
index bd4edeb..e8838a0 100644
--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c
@@ -1701,6 +1701,11 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
int finished = 0;
ConnResult replay_crc;
char csn_str[CSN_STRSIZE];
+ PRBool subentry_update_sent = PR_FALSE;
+ PRBool subentry_update_needed = PR_FALSE;
+ int skipped_updates = 0;
+ int fractional_repl;
+#define FRACTIONAL_SKIPPED_THRESHOLD 100
/* Start the results reading thread */
rd = repl5_inc_rd_new(prp);
@@ -1717,6 +1722,7 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
memset ( (void*)&op, 0, sizeof (op) );
entry.op = &op;
+ fractional_repl = agmt_is_fractional(prp->agmt);
do {
cl5_operation_parameters_done ( entry.op );
memset ( (void*)entry.op, 0, sizeof (op) );
@@ -1812,6 +1818,15 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
csn_as_string(entry.op->csn, PR_FALSE, csn_str);
replica_id = csn_get_replicaid(entry.op->csn);
uniqueid = entry.op->target_address.uniqueid;
+
+ if (fractional_repl && message_id)
+ {
+ /* This update was sent no need to update the subentry
+ * and restart counting the skipped updates
+ */
+ subentry_update_needed = PR_FALSE;
+ skipped_updates = 0;
+ }
if (prp->repl50consumer && message_id)
{
@@ -1842,6 +1857,16 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
agmt_get_long_name(prp->agmt),
entry.op->target_address.uniqueid, csn_str);
agmt_inc_last_update_changecount (prp->agmt, csn_get_replicaid(entry.op->csn), 1 /*skipped*/);
+ if (fractional_repl)
+ {
+ skipped_updates++;
+ if (skipped_updates > FRACTIONAL_SKIPPED_THRESHOLD) {
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
+ "%s: skipped updates is too high (%d) if no other update is sent we will update the subentry\n",
+ agmt_get_long_name(prp->agmt), skipped_updates);
+ subentry_update_needed = PR_TRUE;
+ }
+ }
}
}
break;
@@ -1907,6 +1932,20 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
PR_Unlock(rd->lock);
} while (!finished);
+ if (fractional_repl && subentry_update_needed)
+ {
+ Replica *replica;
+ ReplicaId rid = -1; /* Used to create the replica keep alive subentry */
+ replica = (Replica*) object_get_data(prp->replica_object);
+ if (replica)
+ {
+ rid = replica_get_rid(replica);
+ }
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
+ "%s: skipped updates was definitely too high (%d) update the subentry now\n",
+ agmt_get_long_name(prp->agmt), skipped_updates);
+ replica_subentry_update(agmt_get_replarea(prp->agmt), rid);
+ }
/* Terminate the results reading thread */
if (!prp->repl50consumer)
{
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index 7661a04..c19365e 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -443,6 +443,161 @@ replica_destroy(void **arg)
slapi_ch_free((void **)arg);
}
+#define KEEP_ALIVE_ATTR "keepalivetimestamp"
+#define KEEP_ALIVE_ENTRY "repl keep alive"
+#define KEEP_ALIVE_DN_FORMAT "cn=%s %d,%s"
+
+
+static int
+replica_subentry_create(Slapi_DN *repl_root, ReplicaId rid)
+{
+ char *entry_string = NULL;
+ Slapi_Entry *e = NULL;
+ Slapi_PBlock *pb = NULL;
+ int return_value;
+ int rc = 0;
+
+ entry_string = slapi_ch_smprintf("dn: cn=%s %d,%s\nobjectclass: top\nobjectclass: ldapsubentry\nobjectclass: extensibleObject\ncn: %s %d",
+ KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root), KEEP_ALIVE_ENTRY, rid);
+ if (entry_string == NULL) {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "replica_subentry_create add failed in slapi_ch_smprintf\n");
+ rc = -1;
+ goto done;
+ }
+
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "add %s\n", entry_string);
+ e = slapi_str2entry(entry_string, 0);
+
+ /* create the entry */
+ pb = slapi_pblock_new();
+
+
+ slapi_add_entry_internal_set_pb(pb, e, NULL, /* controls */
+ repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), 0 /* flags */);
+ slapi_add_internal_pb(pb);
+ slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &return_value);
+ if (return_value != LDAP_SUCCESS && return_value != LDAP_ALREADY_EXISTS)
+ {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Warning: unable to "
+ "create replication keep alive entry %s: %s\n", slapi_entry_get_dn_const(e),
+ ldap_err2string(return_value));
+ rc = -1;
+ slapi_entry_free(e); /* The entry was not consumed */
+ goto done;
+ }
+
+done:
+
+ slapi_pblock_destroy(pb);
+ slapi_ch_free_string(&entry_string);
+ return rc;
+
+}
+
+int
+replica_subentry_check(Slapi_DN *repl_root, ReplicaId rid)
+{
+ Slapi_PBlock *pb;
+ char *filter = NULL;
+ Slapi_Entry **entries = NULL;
+ int res;
+ int rc = 0;
+
+ pb = slapi_pblock_new();
+ filter = slapi_ch_smprintf("(&(objectclass=ldapsubentry)(cn=%s %d))", KEEP_ALIVE_ENTRY, rid);
+ slapi_search_internal_set_pb(pb, slapi_sdn_get_dn(repl_root), LDAP_SCOPE_ONELEVEL,
+ filter, NULL, 0, NULL, NULL,
+ repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), 0);
+ slapi_search_internal_pb(pb);
+ slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &res);
+ if (res == LDAP_SUCCESS)
+ {
+ slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries);
+ if (entries && (entries[0] == NULL))
+ {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "Need to create replication keep alive entry <cn=%s %d,%s>\n", KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root));
+ rc = replica_subentry_create(repl_root, rid);
+ } else {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "replication keep alive entry <cn=%s %d,%s> already exists\n", KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root));
+ rc = 0;
+ }
+ } else {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "Error accessing replication keep alive entry <cn=%s %d,%s> res=%d\n",
+ KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root), res);
+ /* The status of the entry is not clear, do not attempt to create it */
+ rc = 1;
+ }
+ slapi_free_search_results_internal(pb);
+
+ slapi_pblock_destroy(pb);
+ slapi_ch_free_string(&filter);
+ return rc;
+}
+
+int
+replica_subentry_update(Slapi_DN *repl_root, ReplicaId rid)
+{
+ int ldrc;
+ int rc = LDAP_SUCCESS; /* Optimistic default */
+ LDAPMod * mods[2];
+ LDAPMod mod;
+ struct berval * vals[2];
+ char buf[20];
+ time_t curtime;
+ struct tm ltm;
+ struct berval val;
+ Slapi_PBlock *modpb = NULL;
+ char *dn;
+
+ replica_subentry_check(repl_root, rid);
+ curtime = current_time();
+ gmtime_r(&curtime, <m);
+ strftime(buf, sizeof (buf), "%Y%m%d%H%M%SZ", <m);
+
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name, "subentry_update called at %s\n", buf);
+
+
+ val.bv_val = buf;
+ val.bv_len = strlen(val.bv_val);
+
+ vals [0] = &val;
+ vals [1] = NULL;
+
+ mod.mod_op = LDAP_MOD_REPLACE | LDAP_MOD_BVALUES;
+ mod.mod_type = KEEP_ALIVE_ATTR;
+ mod.mod_bvalues = vals;
+
+ mods[0] = &mod;
+ mods[1] = NULL;
+
+ modpb = slapi_pblock_new();
+ dn = slapi_ch_smprintf(KEEP_ALIVE_DN_FORMAT, KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root));
+
+ slapi_modify_internal_set_pb(modpb, dn, mods, NULL, NULL,
+ repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), 0);
+ slapi_modify_internal_pb(modpb);
+
+ slapi_pblock_get(modpb, SLAPI_PLUGIN_INTOP_RESULT, &ldrc);
+
+ if (ldrc != LDAP_SUCCESS)
+ {
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
+ "Failure (%d) to update replication keep alive entry \"%s: %s\"\n", ldrc, KEEP_ALIVE_ATTR, buf);
+ rc = ldrc;
+ } else {
+ slapi_log_error(SLAPI_LOG_PLUGIN, repl_plugin_name,
+ "Successful update of replication keep alive entry \"%s: %s\"\n", KEEP_ALIVE_ATTR, buf);
+ }
+
+ slapi_pblock_destroy(modpb);
+ slapi_ch_free_string(&dn);
+ return rc;
+
+}
/*
* Attempt to obtain exclusive access to replica (advisory only)
*
@@ -3845,6 +4000,7 @@ replica_enable_replication (Replica *r)
/* What to do ? */
}
+ replica_subentry_check(r->repl_root, replica_get_rid(r));
/* Replica came back online, Check if the total update was terminated.
If flag is still set, it was not terminated, therefore the data is
very likely to be incorrect, and we should not restart Replication threads...
diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c
index 43b0de5..c41d832 100644
--- a/ldap/servers/plugins/replication/repl5_tot_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c
@@ -347,6 +347,9 @@ repl5_tot_run(Private_Repl_Protocol *prp)
int portnum = 0;
Slapi_DN *area_sdn = NULL;
CSN *remote_schema_csn = NULL;
+ int init_retry = 0;
+ Replica *replica;
+ ReplicaId rid = 0; /* Used to create the replica keep alive subentry */
PR_ASSERT(NULL != prp);
@@ -424,7 +427,15 @@ repl5_tot_run(Private_Repl_Protocol *prp)
ctrls = (LDAPControl **)slapi_ch_calloc (3, sizeof (LDAPControl *));
ctrls[0] = create_managedsait_control ();
ctrls[1] = create_backend_control(area_sdn);
-
+
+ /* Time to make sure it exists a keep alive subentry for that replica */
+ replica = (Replica*) object_get_data(prp->replica_object);
+ if (replica)
+ {
+ rid = replica_get_rid(replica);
+ }
+ replica_subentry_check(area_sdn, rid);
+
slapi_search_internal_set_pb (pb, slapi_sdn_get_dn (area_sdn),
LDAP_SCOPE_SUBTREE, "(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))", NULL, 0, ctrls, NULL,
repl_get_plugin_identity (PLUGIN_MULTIMASTER_REPLICATION), 0);
--
389 commits mailing list
389-commits@%(host_name)s
http://lists.fedoraproject.org/postorius/389-commits@lists.fedoraproject.org
8 years, 7 months
Branch '389-ds-base-1.3.4' - ldap/servers
by thierry bordaz
ldap/servers/plugins/replication/repl5.h | 2
ldap/servers/plugins/replication/repl5_inc_protocol.c | 39 ++++
ldap/servers/plugins/replication/repl5_replica.c | 156 ++++++++++++++++++
ldap/servers/plugins/replication/repl5_tot_protocol.c | 14 +
4 files changed, 209 insertions(+), 2 deletions(-)
New commits:
commit 6343e4cba17802e19daa5c971120fa352ff80ad4
Author: Thierry Bordaz <tbordaz(a)redhat.com>
Date: Fri Sep 11 18:56:53 2015 +0200
Ticket 48266: Fractional replication evaluates several times the same CSN
Bug Description:
In fractional replication if there are only skipped updates and many of them, the supplier
acquire the replica for a long time. At the end of the session, RUV is not updated
so the next session will restart evaluating the same skipped updates
Fix Description:
The fix introduces subentries under the suffix: 'cn=repl keep alive <rid>,$SUFFIX'
During an incremental replication session, if the session only contains skipped updates
and the number of them overpass a threshold (100), it triggers an update on that subentry.
This update will eventually be replicated, moving forward the RUV
https://fedorahosted.org/389/ticket/48266
Reviewed by: Noriko Hosoi, Rich Megginson, Simon Pichugin
Platforms tested: <plat>
Flag Day: no
Doc impact: no
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
index 0b0f26b..17282bb 100644
--- a/ldap/servers/plugins/replication/repl5.h
+++ b/ldap/servers/plugins/replication/repl5.h
@@ -523,6 +523,8 @@ Replica *windows_replica_new(const Slapi_DN *root);
during addition of the replica over LDAP */
Replica *replica_new_from_entry (Slapi_Entry *e, char *errortext, PRBool is_add_operation);
void replica_destroy(void **arg);
+int replica_subentry_update(Slapi_DN *repl_root, ReplicaId rid);
+int replica_subentry_check(Slapi_DN *repl_root, ReplicaId rid);
PRBool replica_get_exclusive_access(Replica *r, PRBool *isInc, PRUint64 connid, int opid,
const char *locking_purl,
char **current_purl);
diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c
index 216de3c..e0599e5 100644
--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c
@@ -1672,6 +1672,11 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
int finished = 0;
ConnResult replay_crc;
char csn_str[CSN_STRSIZE];
+ PRBool subentry_update_sent = PR_FALSE;
+ PRBool subentry_update_needed = PR_FALSE;
+ int skipped_updates = 0;
+ int fractional_repl;
+#define FRACTIONAL_SKIPPED_THRESHOLD 100
/* Start the results reading thread */
rd = repl5_inc_rd_new(prp);
@@ -1688,6 +1693,7 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
memset ( (void*)&op, 0, sizeof (op) );
entry.op = &op;
+ fractional_repl = agmt_is_fractional(prp->agmt);
do {
cl5_operation_parameters_done ( entry.op );
memset ( (void*)entry.op, 0, sizeof (op) );
@@ -1783,6 +1789,15 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
csn_as_string(entry.op->csn, PR_FALSE, csn_str);
replica_id = csn_get_replicaid(entry.op->csn);
uniqueid = entry.op->target_address.uniqueid;
+
+ if (fractional_repl && message_id)
+ {
+ /* This update was sent no need to update the subentry
+ * and restart counting the skipped updates
+ */
+ subentry_update_needed = PR_FALSE;
+ skipped_updates = 0;
+ }
if (prp->repl50consumer && message_id)
{
@@ -1813,6 +1828,16 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
agmt_get_long_name(prp->agmt),
entry.op->target_address.uniqueid, csn_str);
agmt_inc_last_update_changecount (prp->agmt, csn_get_replicaid(entry.op->csn), 1 /*skipped*/);
+ if (fractional_repl)
+ {
+ skipped_updates++;
+ if (skipped_updates > FRACTIONAL_SKIPPED_THRESHOLD) {
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
+ "%s: skipped updates is too high (%d) if no other update is sent we will update the subentry\n",
+ agmt_get_long_name(prp->agmt), skipped_updates);
+ subentry_update_needed = PR_TRUE;
+ }
+ }
}
}
break;
@@ -1878,6 +1903,20 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
PR_Unlock(rd->lock);
} while (!finished);
+ if (fractional_repl && subentry_update_needed)
+ {
+ Replica *replica;
+ ReplicaId rid = -1; /* Used to create the replica keep alive subentry */
+ replica = (Replica*) object_get_data(prp->replica_object);
+ if (replica)
+ {
+ rid = replica_get_rid(replica);
+ }
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
+ "%s: skipped updates was definitely too high (%d) update the subentry now\n",
+ agmt_get_long_name(prp->agmt), skipped_updates);
+ replica_subentry_update(agmt_get_replarea(prp->agmt), rid);
+ }
/* Terminate the results reading thread */
if (!prp->repl50consumer)
{
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index 92b4e96..6ac28c1 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -414,6 +414,161 @@ replica_destroy(void **arg)
slapi_ch_free((void **)arg);
}
+#define KEEP_ALIVE_ATTR "keepalivetimestamp"
+#define KEEP_ALIVE_ENTRY "repl keep alive"
+#define KEEP_ALIVE_DN_FORMAT "cn=%s %d,%s"
+
+
+static int
+replica_subentry_create(Slapi_DN *repl_root, ReplicaId rid)
+{
+ char *entry_string = NULL;
+ Slapi_Entry *e = NULL;
+ Slapi_PBlock *pb = NULL;
+ int return_value;
+ int rc = 0;
+
+ entry_string = slapi_ch_smprintf("dn: cn=%s %d,%s\nobjectclass: top\nobjectclass: ldapsubentry\nobjectclass: extensibleObject\ncn: %s %d",
+ KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root), KEEP_ALIVE_ENTRY, rid);
+ if (entry_string == NULL) {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "replica_subentry_create add failed in slapi_ch_smprintf\n");
+ rc = -1;
+ goto done;
+ }
+
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "add %s\n", entry_string);
+ e = slapi_str2entry(entry_string, 0);
+
+ /* create the entry */
+ pb = slapi_pblock_new();
+
+
+ slapi_add_entry_internal_set_pb(pb, e, NULL, /* controls */
+ repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), 0 /* flags */);
+ slapi_add_internal_pb(pb);
+ slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &return_value);
+ if (return_value != LDAP_SUCCESS && return_value != LDAP_ALREADY_EXISTS)
+ {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Warning: unable to "
+ "create replication keep alive entry %s: %s\n", slapi_entry_get_dn_const(e),
+ ldap_err2string(return_value));
+ rc = -1;
+ slapi_entry_free(e); /* The entry was not consumed */
+ goto done;
+ }
+
+done:
+
+ slapi_pblock_destroy(pb);
+ slapi_ch_free_string(&entry_string);
+ return rc;
+
+}
+
+int
+replica_subentry_check(Slapi_DN *repl_root, ReplicaId rid)
+{
+ Slapi_PBlock *pb;
+ char *filter = NULL;
+ Slapi_Entry **entries = NULL;
+ int res;
+ int rc = 0;
+
+ pb = slapi_pblock_new();
+ filter = slapi_ch_smprintf("(&(objectclass=ldapsubentry)(cn=%s %d))", KEEP_ALIVE_ENTRY, rid);
+ slapi_search_internal_set_pb(pb, slapi_sdn_get_dn(repl_root), LDAP_SCOPE_ONELEVEL,
+ filter, NULL, 0, NULL, NULL,
+ repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), 0);
+ slapi_search_internal_pb(pb);
+ slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &res);
+ if (res == LDAP_SUCCESS)
+ {
+ slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries);
+ if (entries && (entries[0] == NULL))
+ {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "Need to create replication keep alive entry <cn=%s %d,%s>\n", KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root));
+ rc = replica_subentry_create(repl_root, rid);
+ } else {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "replication keep alive entry <cn=%s %d,%s> already exists\n", KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root));
+ rc = 0;
+ }
+ } else {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "Error accessing replication keep alive entry <cn=%s %d,%s> res=%d\n",
+ KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root), res);
+ /* The status of the entry is not clear, do not attempt to create it */
+ rc = 1;
+ }
+ slapi_free_search_results_internal(pb);
+
+ slapi_pblock_destroy(pb);
+ slapi_ch_free_string(&filter);
+ return rc;
+}
+
+int
+replica_subentry_update(Slapi_DN *repl_root, ReplicaId rid)
+{
+ int ldrc;
+ int rc = LDAP_SUCCESS; /* Optimistic default */
+ LDAPMod * mods[2];
+ LDAPMod mod;
+ struct berval * vals[2];
+ char buf[20];
+ time_t curtime;
+ struct tm ltm;
+ struct berval val;
+ Slapi_PBlock *modpb = NULL;
+ char *dn;
+
+ replica_subentry_check(repl_root, rid);
+ curtime = current_time();
+ gmtime_r(&curtime, <m);
+ strftime(buf, sizeof (buf), "%Y%m%d%H%M%SZ", <m);
+
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name, "subentry_update called at %s\n", buf);
+
+
+ val.bv_val = buf;
+ val.bv_len = strlen(val.bv_val);
+
+ vals [0] = &val;
+ vals [1] = NULL;
+
+ mod.mod_op = LDAP_MOD_REPLACE | LDAP_MOD_BVALUES;
+ mod.mod_type = KEEP_ALIVE_ATTR;
+ mod.mod_bvalues = vals;
+
+ mods[0] = &mod;
+ mods[1] = NULL;
+
+ modpb = slapi_pblock_new();
+ dn = slapi_ch_smprintf(KEEP_ALIVE_DN_FORMAT, KEEP_ALIVE_ENTRY, rid, slapi_sdn_get_dn(repl_root));
+
+ slapi_modify_internal_set_pb(modpb, dn, mods, NULL, NULL,
+ repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), 0);
+ slapi_modify_internal_pb(modpb);
+
+ slapi_pblock_get(modpb, SLAPI_PLUGIN_INTOP_RESULT, &ldrc);
+
+ if (ldrc != LDAP_SUCCESS)
+ {
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
+ "Failure (%d) to update replication keep alive entry \"%s: %s\"\n", ldrc, KEEP_ALIVE_ATTR, buf);
+ rc = ldrc;
+ } else {
+ slapi_log_error(SLAPI_LOG_PLUGIN, repl_plugin_name,
+ "Successful update of replication keep alive entry \"%s: %s\"\n", KEEP_ALIVE_ATTR, buf);
+ }
+
+ slapi_pblock_destroy(modpb);
+ slapi_ch_free_string(&dn);
+ return rc;
+
+}
/*
* Attempt to obtain exclusive access to replica (advisory only)
*
@@ -3816,6 +3971,7 @@ replica_enable_replication (Replica *r)
/* What to do ? */
}
+ replica_subentry_check(r->repl_root, replica_get_rid(r));
/* Replica came back online, Check if the total update was terminated.
If flag is still set, it was not terminated, therefore the data is
very likely to be incorrect, and we should not restart Replication threads...
diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c
index da73ac4..9059efe 100644
--- a/ldap/servers/plugins/replication/repl5_tot_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c
@@ -320,7 +320,9 @@ repl5_tot_run(Private_Repl_Protocol *prp)
int portnum = 0;
Slapi_DN *area_sdn = NULL;
CSN *remote_schema_csn = NULL;
- int init_retry = 0;
+ int init_retry = 0;
+ Replica *replica;
+ ReplicaId rid = 0; /* Used to create the replica keep alive subentry */
PR_ASSERT(NULL != prp);
@@ -413,7 +415,15 @@ retry:
ctrls = (LDAPControl **)slapi_ch_calloc (3, sizeof (LDAPControl *));
ctrls[0] = create_managedsait_control ();
ctrls[1] = create_backend_control(area_sdn);
-
+
+ /* Time to make sure it exists a keep alive subentry for that replica */
+ replica = (Replica*) object_get_data(prp->replica_object);
+ if (replica)
+ {
+ rid = replica_get_rid(replica);
+ }
+ replica_subentry_check(area_sdn, rid);
+
slapi_search_internal_set_pb (pb, slapi_sdn_get_dn (area_sdn),
LDAP_SCOPE_SUBTREE, "(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))", NULL, 0, ctrls, NULL,
repl_get_plugin_identity (PLUGIN_MULTIMASTER_REPLICATION), 0);
--
389 commits mailing list
389-commits@%(host_name)s
http://lists.fedoraproject.org/postorius/389-commits@lists.fedoraproject.org
8 years, 7 months