ldap/servers
by Mark Reynolds
ldap/servers/plugins/replication/repl5_agmtlist.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
New commits:
commit 3a4d39e166449177c85b92af8b47c5c6848c4d02
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Tue Dec 17 14:01:34 2013 -0500
Ticket 47620 - Fix missing left bracket
https://fedorahosted.org/389/ticket/47620
diff --git a/ldap/servers/plugins/replication/repl5_agmtlist.c b/ldap/servers/plugins/replication/repl5_agmtlist.c
index e75ff24..334f8a1 100644
--- a/ldap/servers/plugins/replication/repl5_agmtlist.c
+++ b/ldap/servers/plugins/replication/repl5_agmtlist.c
@@ -245,7 +245,7 @@ agmtlist_modify_callback(Slapi_PBlock *pb, Slapi_Entry *entryBefore, Slapi_Entry
for (i = 0; NULL != mods && NULL != mods[i]; i++)
{
slapi_ch_free_string(&val);
- if (mods[i]->mod_bvalues && mods[i]->mod_bvalues[0])
+ if (mods[i]->mod_bvalues && mods[i]->mod_bvalues[0]){
val = slapi_berval_get_string_copy (mods[i]->mod_bvalues[0]);
}
if (slapi_attr_types_equivalent(mods[i]->mod_type, type_nsds5ReplicaInitialize))
10 years, 4 months
Branch '389-ds-base-1.3.1' - ldap/servers
by Mark Reynolds
ldap/servers/plugins/replication/repl5_agmtlist.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
New commits:
commit 60d263f7bc52e4b5186a01c38868763a275abadc
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Tue Dec 17 13:11:03 2013 -0500
Ticket 47620 - Fix dereferenced NULL pointer in agmtlist_modify_callback()
The server would dereference a NULL point if an attribute was deleted from a replication
agreement.
https://fedorahosted.org/389/ticket/47620
Reviewed by: rmeggins(Thanks!)
(cherry picked from commit 8baed897f504e75478b5dbbe736c1eaf6d2d7fa9)
diff --git a/ldap/servers/plugins/replication/repl5_agmtlist.c b/ldap/servers/plugins/replication/repl5_agmtlist.c
index 5219c92..6e8b82c 100644
--- a/ldap/servers/plugins/replication/repl5_agmtlist.c
+++ b/ldap/servers/plugins/replication/repl5_agmtlist.c
@@ -245,7 +245,9 @@ agmtlist_modify_callback(Slapi_PBlock *pb, Slapi_Entry *entryBefore, Slapi_Entry
for (i = 0; NULL != mods && NULL != mods[i]; i++)
{
slapi_ch_free_string(&val);
- val = slapi_berval_get_string_copy (mods[i]->mod_bvalues[0]);
+ if (mods[i]->mod_bvalues && mods[i]->mod_bvalues[0])
+ val = slapi_berval_get_string_copy (mods[i]->mod_bvalues[0]);
+ }
if (slapi_attr_types_equivalent(mods[i]->mod_type, type_nsds5ReplicaInitialize))
{
/* we don't allow delete attribute operations unless it was issued by
10 years, 4 months
Branch '389-ds-base-1.3.2' - ldap/servers
by Mark Reynolds
ldap/servers/plugins/replication/repl5_agmtlist.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
New commits:
commit 887fd19db49f75a3751521edca5ed8b787d885cb
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Tue Dec 17 13:11:03 2013 -0500
Ticket 47620 - Fix dereferenced NULL pointer in agmtlist_modify_callback()
The server would dereference a NULL point if an attribute was deleted from a replication
agreement.
https://fedorahosted.org/389/ticket/47620
Reviewed by: rmeggins(Thanks!)
(cherry picked from commit 8baed897f504e75478b5dbbe736c1eaf6d2d7fa9)
diff --git a/ldap/servers/plugins/replication/repl5_agmtlist.c b/ldap/servers/plugins/replication/repl5_agmtlist.c
index 18540b4..3180353 100644
--- a/ldap/servers/plugins/replication/repl5_agmtlist.c
+++ b/ldap/servers/plugins/replication/repl5_agmtlist.c
@@ -245,7 +245,9 @@ agmtlist_modify_callback(Slapi_PBlock *pb, Slapi_Entry *entryBefore, Slapi_Entry
for (i = 0; NULL != mods && NULL != mods[i]; i++)
{
slapi_ch_free_string(&val);
- val = slapi_berval_get_string_copy (mods[i]->mod_bvalues[0]);
+ if (mods[i]->mod_bvalues && mods[i]->mod_bvalues[0])
+ val = slapi_berval_get_string_copy (mods[i]->mod_bvalues[0]);
+ }
if (slapi_attr_types_equivalent(mods[i]->mod_type, type_nsds5ReplicaInitialize))
{
/* we don't allow delete attribute operations unless it was issued by
10 years, 4 months
ldap/servers
by Mark Reynolds
ldap/servers/plugins/replication/repl5_agmtlist.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
New commits:
commit 8baed897f504e75478b5dbbe736c1eaf6d2d7fa9
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Tue Dec 17 13:11:03 2013 -0500
Ticket 47620 - Fix dereferenced NULL pointer in agmtlist_modify_callback()
The server would dereference a NULL point if an attribute was deleted from a replication
agreement.
https://fedorahosted.org/389/ticket/47620
Reviewed by: rmeggins(Thanks!)
diff --git a/ldap/servers/plugins/replication/repl5_agmtlist.c b/ldap/servers/plugins/replication/repl5_agmtlist.c
index 59eb84f..e75ff24 100644
--- a/ldap/servers/plugins/replication/repl5_agmtlist.c
+++ b/ldap/servers/plugins/replication/repl5_agmtlist.c
@@ -245,7 +245,9 @@ agmtlist_modify_callback(Slapi_PBlock *pb, Slapi_Entry *entryBefore, Slapi_Entry
for (i = 0; NULL != mods && NULL != mods[i]; i++)
{
slapi_ch_free_string(&val);
- val = slapi_berval_get_string_copy (mods[i]->mod_bvalues[0]);
+ if (mods[i]->mod_bvalues && mods[i]->mod_bvalues[0])
+ val = slapi_berval_get_string_copy (mods[i]->mod_bvalues[0]);
+ }
if (slapi_attr_types_equivalent(mods[i]->mod_type, type_nsds5ReplicaInitialize))
{
/* we don't allow delete attribute operations unless it was issued by
10 years, 4 months
ldap/servers
by Ludwig Krispenz
ldap/servers/plugins/referint/referint.c | 86 +++++++++++++++++++++++++------
1 file changed, 71 insertions(+), 15 deletions(-)
New commits:
commit 16dc94f11e644e8829817489e92f814757058b82
Author: Ludwig Krispenz <lkrispen(a)redhat.com>
Date: Tue Dec 17 09:57:05 2013 +0100
Ticket 47621 - v2 make referential integrity configuration more flexible
Bug Description: Request to make entry scope multivalued and
to define exclusion scope
Fix Description: The fix makes the configuration parameters
nsslapd-pluginEntryScope: <dn> multivalued and adds
nsslapd-pluginExcludeEntryScope: <dn>
The logic implemented is specified in:
http://port389.org/wiki/Configuring_scope_for_referential_integrity_plugin
https://fedorahosted.org/389/ticket/47621
Reviewed by: richm, thanks
diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c
index 6576c88..4c99666 100644
--- a/ldap/servers/plugins/referint/referint.c
+++ b/ldap/servers/plugins/referint/referint.c
@@ -122,7 +122,8 @@ static Slapi_DN* _pluginDN = NULL;
static Slapi_PluginDesc pdesc = { "referint", VENDOR, DS_PACKAGE_VERSION, "referential integrity plugin" };
static int allow_repl = 0;
-static Slapi_DN *plugin_EntryScope = NULL;
+static Slapi_DN **plugin_EntryScope = NULL;
+static Slapi_DN *plugin_ExcludeEntryScope = NULL;
static Slapi_DN *plugin_ContainerScope = NULL;
static void* referint_plugin_identity = NULL;
static int use_txn = 0;
@@ -235,6 +236,7 @@ referint_postop_init( Slapi_PBlock *pb )
if(plugin_entry){
char *plugin_attr_value;
+ char **plugin_attr_values;
plugin_attr_value = slapi_entry_attr_get_charptr(plugin_entry, "nsslapd-pluginAllowReplUpdates");
if(plugin_attr_value && strcasecmp(plugin_attr_value,"on")==0){
@@ -242,14 +244,32 @@ referint_postop_init( Slapi_PBlock *pb )
}
slapi_ch_free_string(&plugin_attr_value);
- plugin_attr_value = slapi_entry_attr_get_charptr(plugin_entry, "nsslapd-pluginEntryScope");
+ plugin_attr_values = slapi_entry_attr_get_charray(plugin_entry, "nsslapd-pluginEntryScope");
+ if(plugin_attr_values) {
+ int i,j=0;;
+ for (i=0; plugin_attr_values[i];i++);
+ plugin_EntryScope = (Slapi_DN **)slapi_ch_calloc(sizeof(Slapi_DN *),i+1);
+ for (i=0; plugin_attr_values[i];i++) {
+ if (slapi_dn_syntax_check(NULL, plugin_attr_values[i], 1) == 1) {
+ slapi_log_error(SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM,
+ "Error: Ignoring invalid DN used as plugin entry scope: [%s]\n",
+ plugin_attr_values[i]);
+ slapi_ch_free_string(&plugin_attr_values[i]);
+ } else {
+ plugin_EntryScope[j++] = slapi_sdn_new_dn_passin(plugin_attr_values[i]);
+ }
+ }
+ slapi_ch_free((void**)&plugin_attr_values);
+ }
+ plugin_attr_value = slapi_entry_attr_get_charptr(plugin_entry, "nsslapd-pluginExcludeEntryScope");
if(plugin_attr_value) {
if (slapi_dn_syntax_check(NULL, plugin_attr_value, 1) == 1) {
slapi_log_error(SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM,
- "Error: Ignoring invalid DN used as plugin entry scope: [%s]\n",
+ "Error: Ignoring invalid DN used as plugin exclude entry scope: [%s]\n",
plugin_attr_value);
+ slapi_ch_free_string(&plugin_attr_value);
} else {
- plugin_EntryScope = slapi_sdn_new_dn_byref(plugin_attr_value);
+ plugin_ExcludeEntryScope = slapi_sdn_new_dn_passin(plugin_attr_value);
}
}
@@ -259,8 +279,9 @@ referint_postop_init( Slapi_PBlock *pb )
slapi_log_error(SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM,
"Error: Ignoring invalid DN used as plugin container scope: [%s]\n",
plugin_attr_value);
+ slapi_ch_free_string(&plugin_attr_value);
} else {
- plugin_ContainerScope = slapi_sdn_new_dn_byref(plugin_attr_value);
+ plugin_ContainerScope = slapi_sdn_new_dn_passin(plugin_attr_value);
}
}
@@ -590,6 +611,42 @@ referint_sdn_config_cmp(Slapi_DN *sdn)
}
int
+referint_sdn_in_container_scope(Slapi_DN *sdn)
+{
+ if (plugin_ContainerScope == NULL) {
+ return(1);
+ } else {
+ return(slapi_sdn_issuffix(sdn, plugin_ContainerScope));
+ }
+}
+
+int
+referint_sdn_in_entry_scope(Slapi_DN *sdn)
+{
+ int rc = 0;
+
+ if (plugin_ExcludeEntryScope && slapi_sdn_issuffix(sdn, plugin_ExcludeEntryScope))
+ return (0);
+
+ if (plugin_EntryScope == NULL) {
+ /* no scope defined, all sdns match */
+ return(1);
+ } else {
+ int i = 0;
+ while (plugin_EntryScope[i]) {
+ if (slapi_sdn_issuffix(sdn, plugin_EntryScope[i]) ) {
+ rc = 1;
+ break;
+ } else {
+ i++;
+ }
+ }
+ }
+
+ return (rc);
+}
+
+int
referint_postop_del( Slapi_PBlock *pb )
{
Slapi_DN *sdn = NULL;
@@ -628,7 +685,7 @@ referint_postop_del( Slapi_PBlock *pb )
rc = SLAPI_PLUGIN_SUCCESS;
} else if(delay == 0){ /* no delay */
/* call function to update references to entry */
- if (plugin_EntryScope && slapi_sdn_issuffix(sdn, plugin_EntryScope)) {
+ if (referint_sdn_in_entry_scope(sdn)) {
rc = update_integrity(sdn, NULL, NULL, logChanges);
}
} else {
@@ -681,20 +738,20 @@ referint_postop_modrdn( Slapi_PBlock *pb )
rc = SLAPI_PLUGIN_SUCCESS;
} else if(delay == 0){ /* no delay */
/* call function to update references to entry */
- if (!plugin_EntryScope) {
+ if (!plugin_EntryScope && !plugin_ExcludeEntryScope) {
/* no scope defined, default always process referint */
rc = update_integrity(sdn, newrdn, newsuperior, logChanges);
} else {
const char *newsuperiordn = slapi_sdn_get_dn(newsuperior);
- if ( (newsuperiordn == NULL && slapi_sdn_issuffix(sdn, plugin_EntryScope)) ||
- ( newsuperiordn && slapi_sdn_issuffix(newsuperior, plugin_EntryScope)))
+ if ( (newsuperiordn == NULL && referint_sdn_in_entry_scope(sdn)) ||
+ ( newsuperiordn && referint_sdn_in_entry_scope(newsuperior)))
{
/*
* It is a modrdn inside the scope or into the scope,
* process normal modrdn
*/
rc = update_integrity(sdn, newrdn, newsuperior, logChanges);
- } else if (slapi_sdn_issuffix(sdn, plugin_EntryScope)) {
+ } else if (referint_sdn_in_entry_scope(sdn)) {
/* the entry is moved out of scope, treat as delete */
rc = update_integrity(sdn, NULL, NULL, logChanges);
}
@@ -1556,9 +1613,8 @@ writeintegritylog(Slapi_PBlock *pb, char *logfilename, Slapi_DN *sdn,
const char *newsuperiordn = NULL;
size_t reqdn_len = 0;
- if (plugin_EntryScope &&
- !(slapi_sdn_issuffix(sdn, plugin_EntryScope) ||
- (newsuperior && slapi_sdn_issuffix(newsuperior, plugin_EntryScope)))) {
+ if (!(referint_sdn_in_entry_scope(sdn) ||
+ (newsuperior && referint_sdn_in_entry_scope(newsuperior)))) {
return;
}
/*
@@ -1586,8 +1642,8 @@ writeintegritylog(Slapi_PBlock *pb, char *logfilename, Slapi_DN *sdn,
len_to_write = slapi_sdn_get_ndn_len(sdn) + 5;
newsuperiordn = slapi_sdn_get_dn(newsuperior);
- if (plugin_EntryScope && newsuperiordn &&
- !slapi_sdn_issuffix(newsuperior, plugin_EntryScope)) {
+ if (newsuperiordn &&
+ !referint_sdn_in_entry_scope(newsuperior)) {
/* this is a modrdn which moves the entry out of scope, handle like a delete */
newsuperiordn = NULL;
newrdn = NULL;
10 years, 4 months
Branch '389-ds-base-1.3.1' - ldap/servers
by Noriko Hosoi
ldap/servers/plugins/replication/repl5_tot_protocol.c | 3 +
ldap/servers/slapd/back-ldbm/import-threads.c | 8 ++--
ldap/servers/slapd/connection.c | 36 +++++++++++++++---
ldap/servers/slapd/openldapber.h | 25 ++++++++++++
4 files changed, 62 insertions(+), 10 deletions(-)
New commits:
commit c9d0b6ccad84dd56a536da883f5a8e5acb01bc4e
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Mon Dec 16 13:03:19 2013 -0800
Ticket #47606 - replica init/bulk import errors should be more verbose
Description:
1. maxbersize: If the size of an entry is larger than the consumer's
maxbersize, the following error used to be logged:
Incoming BER Element was too long, max allowable is ### bytes.
Change the nsslapd-maxbersize attribute in cn=config to increase.
This message does not indicate how large the maxbersize needs to be.
This patch adds the code to retrieve the failed ber size.
Revised message:
Incoming BER Element was @@@ bytes, max allowable is ### bytes.
Change the nsslapd-maxbersize attribute in cn=config to increase.
Note: There is no lber API that returns the ber size if it fails to
handle the ber. This patch borrows the internal structure of ber
and get the size. This could be risky since the size or structure
of the ber could be updated in the openldap/mozldap lber.
2. cache size: The bulk import depends upon the nsslapd-cachememsize
value in the backend instance entry (e.g., cn=userRoot,cn=ldbm
database,cn=plugins,cn=config). If an entry size is larger than
the cachememsize, the bulk import used to fail with this message:
import userRoot: REASON: entry too large (@@@ bytes) for the
import buffer size (### bytes). Try increasing nsslapd-
cachememsize.
Also, the message follows the skipping entry message:
import userRoot: WARNING: skipping entry "<DN>"
but actually, it did NOT "skip" the entry and continue the bulk
import, but it failed there and completely wiped out the backend
database.
This patch modifies the message as follows:
import userRoot: REASON: entry too large (@@@ bytes) for the
effective import buffer size (### bytes). Try increasing nsslapd-
cachememsize for the backend instance "userRoot".
and as the message mentions, it just skips the failed entry and
continues the bulk import.
3. In repl5_tot_result_threadmain, when conn_read_result_ex returns
non zero (non SUCCESS), it sets abort, but does not set any error
code to rc (return code), which is not considered as "finished" in
repl5_tot_waitfor_async_results and it contines waiting until the
code reaches the max loop count (about 5 minutes). This patch sets
LDAP_CONNECT_ERROR to the return code along with setting abort, if
conn_read_result_ex returns CONN_NOT_CONNECTED. This makes the bulk
import finishes quickly when it fails.
https://fedorahosted.org/389/ticket/47606
Reviewed by rmeggins(a)redhat.com (Thank you, Rich!!)
(cherry picked from commit 1119083d3d99993421609783efcb8962d78724fc)
(cherry picked from commit fde9ed5bf74b4ea1fff875bcb421137c78af1227)
diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c
index a241128..3895ace 100644
--- a/ldap/servers/plugins/replication/repl5_tot_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c
@@ -203,6 +203,9 @@ static void repl5_tot_result_threadmain(void *param)
/* If so then we need to take steps to abort the update process */
PR_Lock(cb->lock);
cb->abort = 1;
+ if (conres == CONN_NOT_CONNECTED) {
+ cb->rc = LDAP_CONNECT_ERROR;
+ }
PR_Unlock(cb->lock);
}
/* Should we stop ? */
diff --git a/ldap/servers/slapd/back-ldbm/import-threads.c b/ldap/servers/slapd/back-ldbm/import-threads.c
index c0475c6..95433aa 100644
--- a/ldap/servers/slapd/back-ldbm/import-threads.c
+++ b/ldap/servers/slapd/back-ldbm/import-threads.c
@@ -3330,11 +3330,11 @@ static int bulk_import_queue(ImportJob *job, Slapi_Entry *entry)
newesize = (slapi_entry_size(ep->ep_entry) + sizeof(struct backentry));
if (newesize > job->fifo.bsize) { /* entry too big */
- import_log_notice(job, "WARNING: skipping entry \"%s\"",
- slapi_entry_get_dn(ep->ep_entry));
import_log_notice(job, "REASON: entry too large (%lu bytes) for "
- "the import buffer size (%lu bytes). Try increasing nsslapd-cachememsize.",
- (long unsigned int)newesize, (long unsigned int)job->fifo.bsize);
+ "the effective import buffer size (%lu bytes). "
+ "Try increasing nsslapd-cachememsize for the backend instance \"%s\".",
+ (long unsigned int)newesize, (long unsigned int)job->fifo.bsize,
+ job->inst->inst_name);
backentry_clear_entry(ep); /* entry is released in the frontend on failure*/
backentry_free( &ep ); /* release the backend wrapper, here */
PR_Unlock(job->wire_lock);
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
index fed3512..02c86c5 100644
--- a/ldap/servers/slapd/connection.c
+++ b/ldap/servers/slapd/connection.c
@@ -1749,6 +1749,32 @@ void connection_make_new_pb(Slapi_PBlock **ppb, Connection *conn)
}
+#ifdef USE_OPENLDAP
+#include "openldapber.h"
+#else
+#include "mozldap.h"
+#endif
+
+static ber_tag_t
+_ber_get_len(BerElement *ber, ber_len_t *lenp)
+{
+#ifdef USE_OPENLDAP
+ OLBerElement *lber = (OLBerElement *)ber;
+#else
+ MozElement *lber = (MozElement *)ber;
+#endif
+
+ if (NULL == lenp) {
+ return LBER_DEFAULT;
+ }
+ *lenp = 0;
+ if (NULL == lber) {
+ return LBER_DEFAULT;
+ }
+ *lenp = lber->ber_len;
+ return lber->ber_tag;
+}
+
/*
* Utility function called by connection_read_operation(). This is a
* small wrapper on top of libldap's ber_get_next_buffer_ext().
@@ -1787,18 +1813,16 @@ get_next_from_buffer( void *buffer, size_t buffer_size, ber_len_t *lenp,
if ((LBER_OVERFLOW == *tagp || LBER_DEFAULT == *tagp) && 0 == bytes_scanned &&
!SLAPD_SYSTEM_WOULD_BLOCK_ERROR(errno))
{
- if (LBER_OVERFLOW == *tagp)
- {
- err = SLAPD_DISCONNECT_BER_TOO_BIG;
- }
- else if (errno == ERANGE)
+ if ((LBER_OVERFLOW == *tagp) || (errno == ERANGE))
{
ber_len_t maxbersize = config_get_maxbersize();
+ ber_len_t tmplen = 0;
+ (void)_ber_get_len(ber, &tmplen);
/* openldap does not differentiate between length == 0
and length > max - all we know is that there was a
problem with the length - assume too big */
err = SLAPD_DISCONNECT_BER_TOO_BIG;
- log_ber_too_big_error(conn, 0, maxbersize);
+ log_ber_too_big_error(conn, tmplen, maxbersize);
}
else
{
diff --git a/ldap/servers/slapd/openldapber.h b/ldap/servers/slapd/openldapber.h
new file mode 100644
index 0000000..52644a5
--- /dev/null
+++ b/ldap/servers/slapd/openldapber.h
@@ -0,0 +1,25 @@
+/*
+ * openldap lber library does not provide an API which returns the ber size
+ * (ber->ber_len) when the ber tag is LBER_DEFAULT or LBER_OVERFLOW.
+ * The ber size is useful when issuing an error message to indicate how
+ * large the maxbersize needs to be set.
+ * Borrowed from liblber/lber-int.h
+ */
+struct lber_options {
+ short lbo_valid;
+ unsigned short lbo_options;
+ int lbo_debug;
+};
+struct berelement {
+ struct lber_options ber_opts;
+ ber_tag_t ber_tag;
+ ber_len_t ber_len;
+ ber_tag_t ber_usertag;
+ char *ber_buf;
+ char *ber_ptr;
+ char *ber_end;
+ char *ber_sos_ptr;
+ char *ber_rwptr;
+ void *ber_memctx;
+};
+typedef struct berelement OLBerElement;
10 years, 4 months
Branch '389-ds-base-1.3.2' - ldap/servers
by Noriko Hosoi
ldap/servers/plugins/replication/repl5_tot_protocol.c | 3 +
ldap/servers/slapd/back-ldbm/import-threads.c | 8 ++--
ldap/servers/slapd/connection.c | 36 +++++++++++++++---
ldap/servers/slapd/openldapber.h | 25 ++++++++++++
4 files changed, 62 insertions(+), 10 deletions(-)
New commits:
commit fde9ed5bf74b4ea1fff875bcb421137c78af1227
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Mon Dec 16 13:03:19 2013 -0800
Ticket #47606 - replica init/bulk import errors should be more verbose
Description:
1. maxbersize: If the size of an entry is larger than the consumer's
maxbersize, the following error used to be logged:
Incoming BER Element was too long, max allowable is ### bytes.
Change the nsslapd-maxbersize attribute in cn=config to increase.
This message does not indicate how large the maxbersize needs to be.
This patch adds the code to retrieve the failed ber size.
Revised message:
Incoming BER Element was @@@ bytes, max allowable is ### bytes.
Change the nsslapd-maxbersize attribute in cn=config to increase.
Note: There is no lber API that returns the ber size if it fails to
handle the ber. This patch borrows the internal structure of ber
and get the size. This could be risky since the size or structure
of the ber could be updated in the openldap/mozldap lber.
2. cache size: The bulk import depends upon the nsslapd-cachememsize
value in the backend instance entry (e.g., cn=userRoot,cn=ldbm
database,cn=plugins,cn=config). If an entry size is larger than
the cachememsize, the bulk import used to fail with this message:
import userRoot: REASON: entry too large (@@@ bytes) for the
import buffer size (### bytes). Try increasing nsslapd-
cachememsize.
Also, the message follows the skipping entry message:
import userRoot: WARNING: skipping entry "<DN>"
but actually, it did NOT "skip" the entry and continue the bulk
import, but it failed there and completely wiped out the backend
database.
This patch modifies the message as follows:
import userRoot: REASON: entry too large (@@@ bytes) for the
effective import buffer size (### bytes). Try increasing nsslapd-
cachememsize for the backend instance "userRoot".
and as the message mentions, it just skips the failed entry and
continues the bulk import.
3. In repl5_tot_result_threadmain, when conn_read_result_ex returns
non zero (non SUCCESS), it sets abort, but does not set any error
code to rc (return code), which is not considered as "finished" in
repl5_tot_waitfor_async_results and it contines waiting until the
code reaches the max loop count (about 5 minutes). This patch sets
LDAP_CONNECT_ERROR to the return code along with setting abort, if
conn_read_result_ex returns CONN_NOT_CONNECTED. This makes the bulk
import finishes quickly when it fails.
https://fedorahosted.org/389/ticket/47606
Reviewed by rmeggins(a)redhat.com (Thank you, Rich!!)
(cherry picked from commit 1119083d3d99993421609783efcb8962d78724fc)
diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c
index a241128..3895ace 100644
--- a/ldap/servers/plugins/replication/repl5_tot_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c
@@ -203,6 +203,9 @@ static void repl5_tot_result_threadmain(void *param)
/* If so then we need to take steps to abort the update process */
PR_Lock(cb->lock);
cb->abort = 1;
+ if (conres == CONN_NOT_CONNECTED) {
+ cb->rc = LDAP_CONNECT_ERROR;
+ }
PR_Unlock(cb->lock);
}
/* Should we stop ? */
diff --git a/ldap/servers/slapd/back-ldbm/import-threads.c b/ldap/servers/slapd/back-ldbm/import-threads.c
index 3a3aab8..9705d9e 100644
--- a/ldap/servers/slapd/back-ldbm/import-threads.c
+++ b/ldap/servers/slapd/back-ldbm/import-threads.c
@@ -3330,11 +3330,11 @@ static int bulk_import_queue(ImportJob *job, Slapi_Entry *entry)
newesize = (slapi_entry_size(ep->ep_entry) + sizeof(struct backentry));
if (newesize > job->fifo.bsize) { /* entry too big */
- import_log_notice(job, "WARNING: skipping entry \"%s\"",
- slapi_entry_get_dn(ep->ep_entry));
import_log_notice(job, "REASON: entry too large (%lu bytes) for "
- "the import buffer size (%lu bytes). Try increasing nsslapd-cachememsize.",
- (long unsigned int)newesize, (long unsigned int)job->fifo.bsize);
+ "the effective import buffer size (%lu bytes). "
+ "Try increasing nsslapd-cachememsize for the backend instance \"%s\".",
+ (long unsigned int)newesize, (long unsigned int)job->fifo.bsize,
+ job->inst->inst_name);
backentry_clear_entry(ep); /* entry is released in the frontend on failure*/
backentry_free( &ep ); /* release the backend wrapper, here */
PR_Unlock(job->wire_lock);
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
index 4397c2a..2d8c91a 100644
--- a/ldap/servers/slapd/connection.c
+++ b/ldap/servers/slapd/connection.c
@@ -1815,6 +1815,32 @@ int connection_wait_for_new_work(Slapi_PBlock *pb, PRIntervalTime interval)
return ret;
}
+#ifdef USE_OPENLDAP
+#include "openldapber.h"
+#else
+#include "mozldap.h"
+#endif
+
+static ber_tag_t
+_ber_get_len(BerElement *ber, ber_len_t *lenp)
+{
+#ifdef USE_OPENLDAP
+ OLBerElement *lber = (OLBerElement *)ber;
+#else
+ MozElement *lber = (MozElement *)ber;
+#endif
+
+ if (NULL == lenp) {
+ return LBER_DEFAULT;
+ }
+ *lenp = 0;
+ if (NULL == lber) {
+ return LBER_DEFAULT;
+ }
+ *lenp = lber->ber_len;
+ return lber->ber_tag;
+}
+
/*
* Utility function called by connection_read_operation(). This is a
* small wrapper on top of libldap's ber_get_next_buffer_ext().
@@ -1855,18 +1881,16 @@ get_next_from_buffer( void *buffer, size_t buffer_size, ber_len_t *lenp,
if ((LBER_OVERFLOW == *tagp || LBER_DEFAULT == *tagp) && 0 == bytes_scanned &&
!SLAPD_SYSTEM_WOULD_BLOCK_ERROR(errno))
{
- if (LBER_OVERFLOW == *tagp)
- {
- err = SLAPD_DISCONNECT_BER_TOO_BIG;
- }
- else if (errno == ERANGE)
+ if ((LBER_OVERFLOW == *tagp) || (errno == ERANGE))
{
ber_len_t maxbersize = config_get_maxbersize();
+ ber_len_t tmplen = 0;
+ (void)_ber_get_len(ber, &tmplen);
/* openldap does not differentiate between length == 0
and length > max - all we know is that there was a
problem with the length - assume too big */
err = SLAPD_DISCONNECT_BER_TOO_BIG;
- log_ber_too_big_error(conn, 0, maxbersize);
+ log_ber_too_big_error(conn, tmplen, maxbersize);
}
else
{
diff --git a/ldap/servers/slapd/openldapber.h b/ldap/servers/slapd/openldapber.h
new file mode 100644
index 0000000..52644a5
--- /dev/null
+++ b/ldap/servers/slapd/openldapber.h
@@ -0,0 +1,25 @@
+/*
+ * openldap lber library does not provide an API which returns the ber size
+ * (ber->ber_len) when the ber tag is LBER_DEFAULT or LBER_OVERFLOW.
+ * The ber size is useful when issuing an error message to indicate how
+ * large the maxbersize needs to be set.
+ * Borrowed from liblber/lber-int.h
+ */
+struct lber_options {
+ short lbo_valid;
+ unsigned short lbo_options;
+ int lbo_debug;
+};
+struct berelement {
+ struct lber_options ber_opts;
+ ber_tag_t ber_tag;
+ ber_len_t ber_len;
+ ber_tag_t ber_usertag;
+ char *ber_buf;
+ char *ber_ptr;
+ char *ber_end;
+ char *ber_sos_ptr;
+ char *ber_rwptr;
+ void *ber_memctx;
+};
+typedef struct berelement OLBerElement;
10 years, 4 months
ldap/servers
by Noriko Hosoi
ldap/servers/plugins/replication/repl5_tot_protocol.c | 3 +
ldap/servers/slapd/back-ldbm/import-threads.c | 8 ++--
ldap/servers/slapd/connection.c | 36 +++++++++++++++---
ldap/servers/slapd/openldapber.h | 25 ++++++++++++
4 files changed, 62 insertions(+), 10 deletions(-)
New commits:
commit 1119083d3d99993421609783efcb8962d78724fc
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Mon Dec 16 13:03:19 2013 -0800
Ticket #47606 - replica init/bulk import errors should be more verbose
Description:
1. maxbersize: If the size of an entry is larger than the consumer's
maxbersize, the following error used to be logged:
Incoming BER Element was too long, max allowable is ### bytes.
Change the nsslapd-maxbersize attribute in cn=config to increase.
This message does not indicate how large the maxbersize needs to be.
This patch adds the code to retrieve the failed ber size.
Revised message:
Incoming BER Element was @@@ bytes, max allowable is ### bytes.
Change the nsslapd-maxbersize attribute in cn=config to increase.
Note: There is no lber API that returns the ber size if it fails to
handle the ber. This patch borrows the internal structure of ber
and get the size. This could be risky since the size or structure
of the ber could be updated in the openldap/mozldap lber.
2. cache size: The bulk import depends upon the nsslapd-cachememsize
value in the backend instance entry (e.g., cn=userRoot,cn=ldbm
database,cn=plugins,cn=config). If an entry size is larger than
the cachememsize, the bulk import used to fail with this message:
import userRoot: REASON: entry too large (@@@ bytes) for the
import buffer size (### bytes). Try increasing nsslapd-
cachememsize.
Also, the message follows the skipping entry message:
import userRoot: WARNING: skipping entry "<DN>"
but actually, it did NOT "skip" the entry and continue the bulk
import, but it failed there and completely wiped out the backend
database.
This patch modifies the message as follows:
import userRoot: REASON: entry too large (@@@ bytes) for the
effective import buffer size (### bytes). Try increasing nsslapd-
cachememsize for the backend instance "userRoot".
and as the message mentions, it just skips the failed entry and
continues the bulk import.
3. In repl5_tot_result_threadmain, when conn_read_result_ex returns
non zero (non SUCCESS), it sets abort, but does not set any error
code to rc (return code), which is not considered as "finished" in
repl5_tot_waitfor_async_results and it contines waiting until the
code reaches the max loop count (about 5 minutes). This patch sets
LDAP_CONNECT_ERROR to the return code along with setting abort, if
conn_read_result_ex returns CONN_NOT_CONNECTED. This makes the bulk
import finishes quickly when it fails.
https://fedorahosted.org/389/ticket/47606
Reviewed by rmeggins(a)redhat.com (Thank you, Rich!!)
diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c
index a241128..3895ace 100644
--- a/ldap/servers/plugins/replication/repl5_tot_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c
@@ -203,6 +203,9 @@ static void repl5_tot_result_threadmain(void *param)
/* If so then we need to take steps to abort the update process */
PR_Lock(cb->lock);
cb->abort = 1;
+ if (conres == CONN_NOT_CONNECTED) {
+ cb->rc = LDAP_CONNECT_ERROR;
+ }
PR_Unlock(cb->lock);
}
/* Should we stop ? */
diff --git a/ldap/servers/slapd/back-ldbm/import-threads.c b/ldap/servers/slapd/back-ldbm/import-threads.c
index 9191ee3..6eae676 100644
--- a/ldap/servers/slapd/back-ldbm/import-threads.c
+++ b/ldap/servers/slapd/back-ldbm/import-threads.c
@@ -3331,11 +3331,11 @@ static int bulk_import_queue(ImportJob *job, Slapi_Entry *entry)
newesize = (slapi_entry_size(ep->ep_entry) + sizeof(struct backentry));
if (newesize > job->fifo.bsize) { /* entry too big */
- import_log_notice(job, "WARNING: skipping entry \"%s\"",
- slapi_entry_get_dn(ep->ep_entry));
import_log_notice(job, "REASON: entry too large (%lu bytes) for "
- "the import buffer size (%lu bytes). Try increasing nsslapd-cachememsize.",
- (long unsigned int)newesize, (long unsigned int)job->fifo.bsize);
+ "the effective import buffer size (%lu bytes). "
+ "Try increasing nsslapd-cachememsize for the backend instance \"%s\".",
+ (long unsigned int)newesize, (long unsigned int)job->fifo.bsize,
+ job->inst->inst_name);
backentry_clear_entry(ep); /* entry is released in the frontend on failure*/
backentry_free( &ep ); /* release the backend wrapper, here */
PR_Unlock(job->wire_lock);
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
index 4397c2a..2d8c91a 100644
--- a/ldap/servers/slapd/connection.c
+++ b/ldap/servers/slapd/connection.c
@@ -1815,6 +1815,32 @@ int connection_wait_for_new_work(Slapi_PBlock *pb, PRIntervalTime interval)
return ret;
}
+#ifdef USE_OPENLDAP
+#include "openldapber.h"
+#else
+#include "mozldap.h"
+#endif
+
+static ber_tag_t
+_ber_get_len(BerElement *ber, ber_len_t *lenp)
+{
+#ifdef USE_OPENLDAP
+ OLBerElement *lber = (OLBerElement *)ber;
+#else
+ MozElement *lber = (MozElement *)ber;
+#endif
+
+ if (NULL == lenp) {
+ return LBER_DEFAULT;
+ }
+ *lenp = 0;
+ if (NULL == lber) {
+ return LBER_DEFAULT;
+ }
+ *lenp = lber->ber_len;
+ return lber->ber_tag;
+}
+
/*
* Utility function called by connection_read_operation(). This is a
* small wrapper on top of libldap's ber_get_next_buffer_ext().
@@ -1855,18 +1881,16 @@ get_next_from_buffer( void *buffer, size_t buffer_size, ber_len_t *lenp,
if ((LBER_OVERFLOW == *tagp || LBER_DEFAULT == *tagp) && 0 == bytes_scanned &&
!SLAPD_SYSTEM_WOULD_BLOCK_ERROR(errno))
{
- if (LBER_OVERFLOW == *tagp)
- {
- err = SLAPD_DISCONNECT_BER_TOO_BIG;
- }
- else if (errno == ERANGE)
+ if ((LBER_OVERFLOW == *tagp) || (errno == ERANGE))
{
ber_len_t maxbersize = config_get_maxbersize();
+ ber_len_t tmplen = 0;
+ (void)_ber_get_len(ber, &tmplen);
/* openldap does not differentiate between length == 0
and length > max - all we know is that there was a
problem with the length - assume too big */
err = SLAPD_DISCONNECT_BER_TOO_BIG;
- log_ber_too_big_error(conn, 0, maxbersize);
+ log_ber_too_big_error(conn, tmplen, maxbersize);
}
else
{
diff --git a/ldap/servers/slapd/openldapber.h b/ldap/servers/slapd/openldapber.h
new file mode 100644
index 0000000..52644a5
--- /dev/null
+++ b/ldap/servers/slapd/openldapber.h
@@ -0,0 +1,25 @@
+/*
+ * openldap lber library does not provide an API which returns the ber size
+ * (ber->ber_len) when the ber tag is LBER_DEFAULT or LBER_OVERFLOW.
+ * The ber size is useful when issuing an error message to indicate how
+ * large the maxbersize needs to be set.
+ * Borrowed from liblber/lber-int.h
+ */
+struct lber_options {
+ short lbo_valid;
+ unsigned short lbo_options;
+ int lbo_debug;
+};
+struct berelement {
+ struct lber_options ber_opts;
+ ber_tag_t ber_tag;
+ ber_len_t ber_len;
+ ber_tag_t ber_usertag;
+ char *ber_buf;
+ char *ber_ptr;
+ char *ber_end;
+ char *ber_sos_ptr;
+ char *ber_rwptr;
+ void *ber_memctx;
+};
+typedef struct berelement OLBerElement;
10 years, 4 months
ldap/servers
by Richard Allen Megginson
ldap/servers/slapd/schema.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
New commits:
commit 97cf45d236a8a333af2895b2bf04cc5940a14e37
Author: Rich Megginson <rmeggins(a)redhat.com>
Date: Mon Dec 16 08:38:43 2013 -0700
Ticket #47631 objectclass may, must lists skip rest of objectclass once first is found in sup
https://fedorahosted.org/389/ticket/47631
Reviewed by: nkinder (Thanks!)
Branch: master
Fix Description: Once a match is found, reset the found flag to 0 so that we
will keep looking for the next match.
Platforms tested: RHEL6 x86_64
Flag Day: no
Doc impact: no
diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c
index ef85be3..414e7f0 100644
--- a/ldap/servers/slapd/schema.c
+++ b/ldap/servers/slapd/schema.c
@@ -4333,7 +4333,8 @@ parse_objclass_str ( const char *input, struct objclass **oc, char *errorbuf,
OrigAllowedAttrsArray = (char **) slapi_ch_malloc (1 * sizeof(char *)) ;
OrigAllowedAttrsArray[0] = NULL;
if (psup_oc->oc_required && objClass->oc_at_oids_must) {
- for (i = 0, found_it = 0; objClass->oc_at_oids_must[i]; i++) {
+ for (i = 0; objClass->oc_at_oids_must[i]; i++) {
+ found_it = 0;
for (j = 0; psup_oc->oc_required[j]; j++) {
if (strcasecmp (psup_oc->oc_required[j], objClass->oc_at_oids_must[i]) == 0) {
found_it = 1;
@@ -4350,7 +4351,8 @@ parse_objclass_str ( const char *input, struct objclass **oc, char *errorbuf,
OrigRequiredAttrsArray = charray_dup(objClass->oc_at_oids_must);
}
if (psup_oc->oc_allowed && objClass->oc_at_oids_may) {
- for (i = 0, found_it = 0; objClass->oc_at_oids_may[i]; i++) {
+ for (i = 0; objClass->oc_at_oids_may[i]; i++) {
+ found_it = 0;
for (j = 0; psup_oc->oc_allowed[j]; j++) {
if (strcasecmp (psup_oc->oc_allowed[j], objClass->oc_at_oids_may[i]) == 0) {
found_it = 1;
10 years, 4 months
Branch '389-ds-base-1.3.2' - ldap/servers
by Richard Allen Megginson
ldap/servers/slapd/schema.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
New commits:
commit 4ea2fde8f9face3d090ea5335571da1c0400c5f1
Author: Rich Megginson <rmeggins(a)redhat.com>
Date: Mon Dec 16 08:38:43 2013 -0700
Ticket #47631 objectclass may, must lists skip rest of objectclass once first is found in sup
https://fedorahosted.org/389/ticket/47631
Reviewed by: nkinder (Thanks!)
Branch: 389-ds-base-1.3.2
Fix Description: Once a match is found, reset the found flag to 0 so that we
will keep looking for the next match.
Platforms tested: RHEL6 x86_64
Flag Day: no
Doc impact: no
(cherry picked from commit 97cf45d236a8a333af2895b2bf04cc5940a14e37)
diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c
index 6fdb99f..6245680 100644
--- a/ldap/servers/slapd/schema.c
+++ b/ldap/servers/slapd/schema.c
@@ -4309,7 +4309,8 @@ parse_objclass_str ( const char *input, struct objclass **oc, char *errorbuf,
OrigAllowedAttrsArray = (char **) slapi_ch_malloc (1 * sizeof(char *)) ;
OrigAllowedAttrsArray[0] = NULL;
if (psup_oc->oc_required && objClass->oc_at_oids_must) {
- for (i = 0, found_it = 0; objClass->oc_at_oids_must[i]; i++) {
+ for (i = 0; objClass->oc_at_oids_must[i]; i++) {
+ found_it = 0;
for (j = 0; psup_oc->oc_required[j]; j++) {
if (strcasecmp (psup_oc->oc_required[j], objClass->oc_at_oids_must[i]) == 0) {
found_it = 1;
@@ -4326,7 +4327,8 @@ parse_objclass_str ( const char *input, struct objclass **oc, char *errorbuf,
OrigRequiredAttrsArray = charray_dup(objClass->oc_at_oids_must);
}
if (psup_oc->oc_allowed && objClass->oc_at_oids_may) {
- for (i = 0, found_it = 0; objClass->oc_at_oids_may[i]; i++) {
+ for (i = 0; objClass->oc_at_oids_may[i]; i++) {
+ found_it = 0;
for (j = 0; psup_oc->oc_allowed[j]; j++) {
if (strcasecmp (psup_oc->oc_allowed[j], objClass->oc_at_oids_may[i]) == 0) {
found_it = 1;
10 years, 4 months