dirsrvtests/tests ldap/servers
by thierry bordaz
dirsrvtests/tests/tickets/ticket49073_test.py | 258 ++++++++++++++++++++++++++
ldap/servers/plugins/replication/repl5_agmt.c | 19 +
2 files changed, 272 insertions(+), 5 deletions(-)
New commits:
commit 81c6e66514ab743d55a7ee4adb5a566f4d796fe0
Author: Thierry Bordaz <tbordaz(a)redhat.com>
Date: Tue Dec 20 11:59:35 2016 +0100
Ticket 49073: nsDS5ReplicatedAttributeListTotal fails when excluding no attribute
Bug Description:
When nsDS5ReplicatedAttributeListTotal defines an empty list of excluded attribute
this is to send all entry attributes during the total initialization.
When evaluating the attributes to send (total init), if the attribute list
is empty, the replica agreement assumes that it was not defined and fallback
to nsDS5ReplicatedAttributeList value.
Fix Description:
When nsDS5ReplicatedAttributeListTotal is defined, sets a flag to considere
its value even if its value is empty
https://bugzilla.redhat.com/show_bug.cgi?id=1405257
Reviewed by: Mark Reynolds (Thanks Mark !)
Platforms tested: F23
Flag Day: no
Doc impact: no
diff --git a/dirsrvtests/tests/tickets/ticket49073_test.py b/dirsrvtests/tests/tickets/ticket49073_test.py
new file mode 100644
index 0000000..0c594a9
--- /dev/null
+++ b/dirsrvtests/tests/tickets/ticket49073_test.py
@@ -0,0 +1,258 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+
+DEBUGGING = False
+GROUP_DN = ("cn=group," + DEFAULT_SUFFIX)
+
+if DEBUGGING:
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
+else:
+ logging.getLogger(__name__).setLevel(logging.INFO)
+log = logging.getLogger(__name__)
+
+
+class TopologyReplication(object):
+ def __init__(self, master1, master2):
+ master1.open()
+ self.master1 = master1
+ master2.open()
+ self.master2 = master2
+
+
+(a)pytest.fixture(scope="module")
+def topology(request):
+ """Create Replication Deployment"""
+
+ # Creating master 1...
+ if DEBUGGING:
+ master1 = DirSrv(verbose=True)
+ else:
+ master1 = DirSrv(verbose=False)
+ args_instance[SER_HOST] = HOST_MASTER_1
+ args_instance[SER_PORT] = PORT_MASTER_1
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_master = args_instance.copy()
+ master1.allocate(args_master)
+ instance_master1 = master1.exists()
+ if instance_master1:
+ master1.delete()
+ master1.create()
+ master1.open()
+ master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
+
+ # Creating master 2...
+ if DEBUGGING:
+ master2 = DirSrv(verbose=True)
+ else:
+ master2 = DirSrv(verbose=False)
+ args_instance[SER_HOST] = HOST_MASTER_2
+ args_instance[SER_PORT] = PORT_MASTER_2
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_master = args_instance.copy()
+ master2.allocate(args_master)
+ instance_master2 = master2.exists()
+ if instance_master2:
+ master2.delete()
+ master2.create()
+ master2.open()
+ master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
+
+ def fin():
+ """If we are debugging just stop the instances,
+ otherwise remove them
+ """
+
+ if DEBUGGING:
+ master1.stop()
+ master2.stop()
+ else:
+ #master1.delete()
+ #master2.delete()
+ pass
+
+ request.addfinalizer(fin)
+
+ # Create all the agreements
+
+ # Creating agreement from master 1 to master 2
+ properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
+ if not m1_m2_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m1_m2_agmt)
+
+ # Creating agreement from master 2 to master 1
+ properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
+ if not m2_m1_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m2_m1_agmt)
+
+ # Allow the replicas to get situated with the new agreements...
+ time.sleep(5)
+
+ # Initialize all the agreements
+ master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
+ master1.waitForReplInit(m1_m2_agmt)
+
+ # Check replication is working...
+ if master1.testReplication(DEFAULT_SUFFIX, master2):
+ log.info('Replication is working.')
+ else:
+ log.fatal('Replication is not working.')
+ assert False
+
+ # Clear out the tmp dir
+ master1.clearTmpDir(__file__)
+
+ return TopologyReplication(master1, master2)
+
+def _add_group_with_members(topology):
+ # Create group
+ try:
+ topology.master1.add_s(Entry((GROUP_DN,
+ {'objectclass': 'top groupofnames'.split(),
+ 'cn': 'group'})))
+ except ldap.LDAPError as e:
+ log.fatal('Failed to add group: error ' + e.message['desc'])
+ assert False
+
+ # Add members to the group - set timeout
+ log.info('Adding members to the group...')
+ for idx in range(1, 5):
+ try:
+ MEMBER_VAL = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX))
+ topology.master1.modify_s(GROUP_DN,
+ [(ldap.MOD_ADD,
+ 'member',
+ MEMBER_VAL)])
+ except ldap.LDAPError as e:
+ log.fatal('Failed to update group: member (%s) - error: %s' %
+ (MEMBER_VAL, e.message['desc']))
+ assert False
+
+def _check_memberof(master, presence_flag):
+ # Check that members have memberof attribute on M1
+ for idx in range(1, 5):
+ try:
+ USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX))
+ ent = master.getEntry(USER_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ if presence_flag:
+ assert ent.hasAttr('memberof') and ent.getValue('memberof') == GROUP_DN
+ else:
+ assert not ent.hasAttr('memberof')
+ except ldap.LDAPError as e:
+ log.fatal('Failed to retrieve user (%s): error %s' % (USER_DN, e.message['desc']))
+ assert False
+
+def _check_entry_exist(master, dn):
+ attempt = 0
+ while attempt <= 10:
+ try:
+ dn
+ ent = master.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
+ break
+ except ldap.NO_SUCH_OBJECT:
+ attempt = attempt + 1
+ time.sleep(1)
+ except ldap.LDAPError as e:
+ log.fatal('Failed to retrieve user (%s): error %s' % (dn, e.message['desc']))
+ assert False
+ assert attempt != 10
+
+def test_ticket49073(topology):
+ """Write your replication test here.
+
+ To access each DirSrv instance use: topology.master1, topology.master2,
+ ..., topology.hub1, ..., topology.consumer1,...
+
+ Also, if you need any testcase initialization,
+ please, write additional fixture for that(include finalizer).
+ """
+ topology.master1.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology.master1.restart(timeout=10)
+ topology.master2.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology.master2.restart(timeout=10)
+
+ # Configure fractional to prevent total init to send memberof
+ ents = topology.master1.agreement.list(suffix=SUFFIX)
+ assert len(ents) == 1
+ log.info('update %s to add nsDS5ReplicatedAttributeListTotal' % ents[0].dn)
+ topology.master1.modify_s(ents[0].dn,
+ [(ldap.MOD_REPLACE,
+ 'nsDS5ReplicatedAttributeListTotal',
+ '(objectclass=*) $ EXCLUDE '),
+ (ldap.MOD_REPLACE,
+ 'nsDS5ReplicatedAttributeList',
+ '(objectclass=*) $ EXCLUDE memberOf')])
+ topology.master1.restart(timeout=10)
+
+ #
+ # create some users and a group
+ #
+ log.info('create users and group...')
+ for idx in range(1, 5):
+ try:
+ USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX))
+ topology.master1.add_s(Entry((USER_DN,
+ {'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'member%d' % (idx)})))
+ except ldap.LDAPError as e:
+ log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.message['desc']))
+ assert False
+
+ _check_entry_exist(topology.master2, "uid=member4,%s" % (DEFAULT_SUFFIX))
+ _add_group_with_members(topology)
+ _check_entry_exist(topology.master2, GROUP_DN)
+
+ # Check that for regular update memberof was on both side (because plugin is enabled both)
+ time.sleep(5)
+ _check_memberof(topology.master1, True)
+ _check_memberof(topology.master2, True)
+
+
+ # reinit with fractional definition
+ ents = topology.master1.agreement.list(suffix=SUFFIX)
+ assert len(ents) == 1
+ topology.master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
+ topology.master1.waitForReplInit(ents[0].dn)
+
+ # Check that for total update memberof was on both side
+ # because memberof is NOT excluded from total init
+ time.sleep(5)
+ _check_memberof(topology.master1, True)
+ _check_memberof(topology.master2, True)
+
+ if DEBUGGING:
+ # Add debugging steps(if any)...
+ pass
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
+
diff --git a/ldap/servers/plugins/replication/repl5_agmt.c b/ldap/servers/plugins/replication/repl5_agmt.c
index 067bf39..6aee261 100644
--- a/ldap/servers/plugins/replication/repl5_agmt.c
+++ b/ldap/servers/plugins/replication/repl5_agmt.c
@@ -78,6 +78,7 @@ typedef struct repl5agmt {
Slapi_DN *replarea; /* DN of replicated area */
char **frac_attrs; /* list of fractional attributes to be replicated */
char **frac_attrs_total; /* list of fractional attributes to be replicated for total update protocol */
+ PRBool frac_attr_total_defined; /* TRUE if frac_attrs_total is defined */
Schedule *schedule; /* Scheduling information */
int auto_initialize; /* 1 = automatically re-initialize replica */
const Slapi_DN *dn; /* DN of replication agreement entry */
@@ -621,6 +622,7 @@ agmt_delete(void **rap)
slapi_ch_free_string(&ra->binddn);
slapi_ch_array_free(ra->frac_attrs);
slapi_ch_array_free(ra->frac_attrs_total);
+ ra->frac_attr_total_defined = PR_FALSE;
if (NULL != ra->creds)
{
@@ -1044,8 +1046,7 @@ agmt_get_fractional_attrs_total(const Repl_Agmt *ra)
{
char ** return_value = NULL;
PR_ASSERT(NULL != ra);
- if (NULL == ra->frac_attrs_total)
- {
+ if (!ra->frac_attr_total_defined) {
return agmt_get_fractional_attrs(ra);
}
PR_Lock(ra->lock);
@@ -1074,7 +1075,7 @@ int agmt_is_fractional_attr_total(const Repl_Agmt *ra, const char *attrname)
{
int return_value;
PR_ASSERT(NULL != ra);
- if (NULL == ra->frac_attrs_total)
+ if (!ra->frac_attr_total_defined)
{
return agmt_is_fractional_attr(ra, attrname);
}
@@ -1611,6 +1612,7 @@ agmt_set_replicated_attributes_total_from_entry(Repl_Agmt *ra, const Slapi_Entry
{
slapi_ch_array_free(ra->frac_attrs_total);
ra->frac_attrs_total = NULL;
+ ra->frac_attr_total_defined = PR_FALSE;
}
if (NULL != sattr)
{
@@ -1620,6 +1622,9 @@ agmt_set_replicated_attributes_total_from_entry(Repl_Agmt *ra, const Slapi_Entry
{
const char *val = slapi_value_get_string(sval);
return_value = agmt_parse_excluded_attrs_config_attr(val,&(ra->frac_attrs_total));
+ if (return_value == 0) {
+ ra->frac_attr_total_defined = PR_TRUE;
+ }
}
}
PR_Unlock(ra->lock);
@@ -1675,6 +1680,7 @@ agmt_set_replicated_attributes_total_from_attr(Repl_Agmt *ra, Slapi_Attr *sattr)
{
slapi_ch_array_free(ra->frac_attrs_total);
ra->frac_attrs_total = NULL;
+ ra->frac_attr_total_defined = PR_FALSE;
}
if (NULL != sattr)
{
@@ -1684,6 +1690,9 @@ agmt_set_replicated_attributes_total_from_attr(Repl_Agmt *ra, Slapi_Attr *sattr)
{
const char *val = slapi_value_get_string(sval);
return_value = agmt_parse_excluded_attrs_config_attr(val,&(ra->frac_attrs_total));
+ if (return_value == 0) {
+ ra->frac_attr_total_defined = PR_TRUE;
+ }
}
}
PR_Unlock(ra->lock);
@@ -1706,9 +1715,9 @@ agmt_validate_replicated_attributes(Repl_Agmt *ra, int total)
char **frac_attrs = NULL;
/* If checking for total update, use the total attr list
- * if it exists. If oen is not set, use the incremental
+ * if it exists. If total attr list is not set, use the incremental
* attr list. */
- if (total && ra->frac_attrs_total)
+ if (total && ra->frac_attr_total_defined)
{
frac_attrs = ra->frac_attrs_total;
}
7 years, 4 months
ldap/schema
by thierry bordaz
ldap/schema/01core389.ldif | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
New commits:
commit 64a425e4ea868bc1f08145490a7c8c9cf5c91581
Author: Thierry Bordaz <tbordaz(a)redhat.com>
Date: Wed Dec 21 16:31:48 2016 +0100
Ticket 49074 - incompatible nsEncryptionConfig object definition prevents RHEL 7->6 schema replication
Bug Description:
nsEncryptionConfig schema definition diverge since 1.3.x and 1.2.11.15-83.
Schema learning mechanism does not merge definition so the schema can not be pushed RHEL7->6.
This triggers schema violation errors
Fix Description:
Defines nsTLS10, nsTLS11 and nsTLS12 attributetypes and add them to the allowed
attributes list of nsEncryptionConfig
https://fedorahosted.org/389/ticket/49074
Reviewed by: Noriko Hosoi (thanks!!)
Platforms tested: RHEL7.3 vs RHEL6.8 and RHEL6.9
Flag Day: no
Doc impact: no
diff --git a/ldap/schema/01core389.ldif b/ldap/schema/01core389.ldif
index dfa4729..5e5f69f 100644
--- a/ldap/schema/01core389.ldif
+++ b/ldap/schema/01core389.ldif
@@ -91,6 +91,9 @@ attributeTypes: ( nsKeyfile-oid NAME 'nsKeyfile' DESC 'Netscape defined attribut
attributeTypes: ( nsSSL2-oid NAME 'nsSSL2' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape' )
attributeTypes: ( nsSSL3-oid NAME 'nsSSL3' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape' )
attributeTypes: ( nsTLS1-oid NAME 'nsTLS1' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape' )
+attributeTypes: ( nsTLS10-oid NAME 'nsTLS10' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape' )
+attributeTypes: ( nsTLS11-oid NAME 'nsTLS11' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape' )
+attributeTypes: ( nsTLS12-oid NAME 'nsTLS12' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape' )
attributeTypes: ( sslVersionMin-oid NAME 'sslVersionMin' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape' )
attributeTypes: ( sslVersionMax-oid NAME 'sslVersionMax' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape' )
attributeTypes: ( nsSSLClientAuth-oid NAME 'nsSSLClientAuth' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape' )
@@ -314,7 +317,7 @@ objectClasses: ( 2.16.840.1.113730.3.2.103 NAME 'nsDS5ReplicationAgreement' DESC
objectClasses: ( 2.16.840.1.113730.3.2.39 NAME 'nsslapdConfig' DESC 'Netscape defined objectclass' SUP top MAY ( cn ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.317 NAME 'nsSaslMapping' DESC 'Netscape defined objectclass' SUP top MUST ( cn $ nsSaslMapRegexString $ nsSaslMapBaseDNTemplate $ nsSaslMapFilterTemplate ) MAY ( nsSaslMapPriority ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.43 NAME 'nsSNMP' DESC 'Netscape defined objectclass' SUP top MUST ( cn $ nsSNMPEnabled ) MAY ( nsSNMPOrganization $ nsSNMPLocation $ nsSNMPContact $ nsSNMPDescription $ nsSNMPName $ nsSNMPMasterHost $ nsSNMPMasterPort ) X-ORIGIN 'Netscape Directory Server' )
-objectClasses: ( nsEncryptionConfig-oid NAME 'nsEncryptionConfig' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( nsCertfile $ nsKeyfile $ nsSSL2 $ nsSSL3 $ nsTLS1 $ sslVersionMin $ sslVersionMax $ nsSSLSessionTimeout $ nsSSL3SessionTimeout $ nsSSLClientAuth $ nsSSL2Ciphers $ nsSSL3Ciphers $ nsSSLSupportedCiphers $ allowWeakCipher $ CACertExtractFile $ allowWeakDHParam ) X-ORIGIN 'Netscape' )
+objectClasses: ( nsEncryptionConfig-oid NAME 'nsEncryptionConfig' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( nsCertfile $ nsKeyfile $ nsSSL2 $ nsSSL3 $ nsTLS1 $ nsTLS10 $ nsTLS11 $ nsTLS12 $ sslVersionMin $ sslVersionMax $ nsSSLSessionTimeout $ nsSSL3SessionTimeout $ nsSSLClientAuth $ nsSSL2Ciphers $ nsSSL3Ciphers $ nsSSLSupportedCiphers $ allowWeakCipher $ CACertExtractFile $ allowWeakDHParam ) X-ORIGIN 'Netscape' )
objectClasses: ( nsEncryptionModule-oid NAME 'nsEncryptionModule' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( nsSSLToken $ nsSSLPersonalityssl $ nsSSLActivation $ ServerKeyExtractFile $ ServerCertExtractFile ) X-ORIGIN 'Netscape' )
objectClasses: ( 2.16.840.1.113730.3.2.327 NAME 'rootDNPluginConfig' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( rootdn-open-time $ rootdn-close-time $ rootdn-days-allowed $ rootdn-allow-host $ rootdn-deny-host $ rootdn-allow-ip $ rootdn-deny-ip ) X-ORIGIN 'Netscape' )
objectClasses: ( 2.16.840.1.113730.3.2.328 NAME 'nsSchemaPolicy' DESC 'Netscape defined objectclass' SUP top MAY ( cn $ schemaUpdateObjectclassAccept $ schemaUpdateObjectclassReject $ schemaUpdateAttributeAccept $ schemaUpdateAttributeReject) X-ORIGIN 'Netscape Directory Server' )
7 years, 4 months
Makefile.am rpm/389-ds-base.spec.in
by William Brown
Makefile.am | 2 ++
rpm/389-ds-base.spec.in | 25 +++++++++++++++++++------
2 files changed, 21 insertions(+), 6 deletions(-)
New commits:
commit 896c6db9644d016c7cde212d003376fd46c2cd89
Author: William Brown <firstyear(a)redhat.com>
Date: Tue Dec 20 16:27:57 2016 +1000
Ticket 48835 - package tests into python site packages - fix rpm
Bug Description: While building this, I only tested on fedora. Due to changes
in python packaging, on RHEL this failed to build the rpms.
Fix Description: fix the python build macros to work on RHEL7 (without epel)
and fix the makefile to correctly create the dist file.
https://fedorahosted.org/389/ticket/48835
Author: wibrown
Review by: mreynolds (Thanks!)
diff --git a/Makefile.am b/Makefile.am
index 1fa117c..7d49e88 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -405,6 +405,7 @@ dist_noinst_HEADERS = \
ldap/servers/slapd/protect_db.h \
ldap/servers/slapd/proto-slap.h \
ldap/servers/slapd/pw.h \
+ ldap/servers/slapd/pw_verify.h \
ldap/servers/slapd/secerrstrs.h \
ldap/servers/slapd/slap.h \
ldap/servers/slapd/slapi-plugin-compat4.h \
@@ -494,6 +495,7 @@ dist_noinst_DATA = \
$(srcdir)/LICENSE \
$(srcdir)/LICENSE.* \
$(srcdir)/VERSION.sh \
+ $(srcdir)/setup.py.in \
$(srcdir)/wrappers/*.in \
$(srcdir)/wrappers/systemd.template.sysconfig \
$(srcdir)/dirsrvtests \
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
index 2445767..77c77fb 100644
--- a/rpm/389-ds-base.spec.in
+++ b/rpm/389-ds-base.spec.in
@@ -233,13 +233,18 @@ Obsoletes: %{name} <= 1.3.5.4
%description snmp
SNMP Agent for the 389 Directory Server base package.
-
-
%package -n python2-%{srcname}-tests
Summary: The lib389 Continuous Integration Tests
Group: Development/Libraries
-Requires: python2-lib389
BuildArch: noarch
+Requires: python2-lib389
+%if 0%{?rhel}
+BuildRequires: python-devel
+BuildRequires: python-setuptools
+%else
+BuildRequires: python2-devel
+BuildRequires: python2-setuptools
+%endif
%description -n python2-%{srcname}-tests
The lib389 CI tests that can be run against the Directory Server.
@@ -250,8 +255,10 @@ The lib389 CI tests that can be run against the Directory Server.
%package -n python%{python3_pkgversion}-%{srcname}-tests
Summary: The lib389 Continuous Integration Tests
Group: Development/Libraries
-Requires: python%{python3_pkgversion}-lib389
BuildArch: noarch
+Requires: python%{python3_pkgversion}-lib389
+BuildRequires: python%{python3_pkgversion}-devel
+BuildRequires: python%{python3_pkgversion}-setuptools
%description -n python%{python3_pkgversion}-%{srcname}-tests
The lib389 CI tests that can be run against the Directory Server.
@@ -318,9 +325,11 @@ make %{?_smp_mflags}
make setup.py
-%py2_build
%if 0%{?rhel} >= 8 || 0%{?fedora}
+%py2_build
%py3_build
+%else
+%py_build
%endif
%install
@@ -335,9 +344,11 @@ popd
make DESTDIR="$RPM_BUILD_ROOT" install
-%py2_install
%if 0%{?rhel} >= 8 || 0%{?fedora}
+%py2_install
%py3_install
+%else
+%py_install
%endif
mkdir -p $RPM_BUILD_ROOT/var/log/%{pkgname}
@@ -535,10 +546,12 @@ fi
%doc LICENSE LICENSE.GPLv3+
%{python2_sitelib}/*
+%if 0%{?rhel} >= 8 || 0%{?fedora}
%files -n python%{python3_pkgversion}-%{srcname}-tests
%defattr(-,root,root,-)
%doc LICENSE LICENSE.GPLv3+
%{python3_sitelib}/*
+%endif
%changelog
* Mon Dec 21 2015 Mark Reynolds <mreynolds(a)redhat.com> - 1.3.4.1-3
7 years, 4 months
ldap/servers
by William Brown
ldap/servers/plugins/replication/cl5_api.c | 5 +++
ldap/servers/plugins/replication/repl5_mtnode_ext.c | 28 +++++++++++++++++---
ldap/servers/plugins/replication/repl5_replica.c | 28 +++-----------------
ldap/servers/slapd/back-ldbm/ldbm_config.c | 6 ++++
ldap/servers/slapd/backend_manager.c | 12 ++++++++
ldap/servers/slapd/connection.c | 15 ++++++++++
ldap/servers/slapd/mapping_tree.c | 13 ++++++---
ldap/servers/slapd/pblock.c | 25 +++++++++++++++++
ldap/servers/slapd/plugin.c | 12 +++++---
9 files changed, 109 insertions(+), 35 deletions(-)
New commits:
commit c87c57e024646c1f51bc2f04aa8af559557c0f4c
Author: William Brown <firstyear(a)redhat.com>
Date: Tue Dec 20 16:26:58 2016 +1000
Ticket 49066 - Memory leaks in server - part 2
Bug Description: This resolves a number of memory leaks and code cleanups in
the server.
Fix Description: This fixes leaks from server shutdown especially in ldbm
which was not freeing a number of mutexes correctly, and replication which
was not freeing configurations and certain changelog items correctly.
This also resolves a schema issue in plugin configuration creation, and a
segfault in server shutdown when replication has been disabled.
https://fedorahosted.org/389/ticket/49066
Author: wibrown
Review by: mreynolds, tbordaz (Thanks!)
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
index 5cf95a0..1fe4500 100644
--- a/ldap/servers/plugins/replication/cl5_api.c
+++ b/ldap/servers/plugins/replication/cl5_api.c
@@ -433,6 +433,11 @@ void cl5Cleanup ()
s_cl5Desc.clLock = NULL;
}
+ if (s_cl5Desc.clCvar != NULL) {
+ PR_DestroyCondVar(s_cl5Desc.clCvar);
+ s_cl5Desc.clCvar = NULL;
+ }
+
memset (&s_cl5Desc, 0, sizeof (s_cl5Desc));
}
diff --git a/ldap/servers/plugins/replication/repl5_mtnode_ext.c b/ldap/servers/plugins/replication/repl5_mtnode_ext.c
index 8d0c514..70fe2cc 100644
--- a/ldap/servers/plugins/replication/repl5_mtnode_ext.c
+++ b/ldap/servers/plugins/replication/repl5_mtnode_ext.c
@@ -35,10 +35,30 @@ multimaster_mtnode_extension_init ()
}
void
+multimaster_mtnode_free_replica_object(const Slapi_DN *root) {
+ mapping_tree_node *mtnode;
+ multimaster_mtnode_extension *ext;
+
+ /* In some cases, root can be an empty SDN */
+ /* Othertimes, a bug is setting root to 0x8, and I can't see where... */
+ if (root != NULL) {
+ mtnode = slapi_get_mapping_tree_node_by_dn(root);
+ if (mtnode != NULL) {
+ ext = (multimaster_mtnode_extension *)repl_con_get_ext (REPL_CON_EXT_MTNODE, mtnode);
+ if (ext != NULL && ext->replica != NULL) {
+ object_release(ext->replica);
+ }
+ }
+ }
+}
+
+void
multimaster_mtnode_extension_destroy ()
{
- dl_cleanup (root_list, (FREEFN)slapi_sdn_free);
- dl_free (&root_list);
+ /* First iterate over the list to free the replica infos */
+ /* dl_cleanup (root_list, (FREEFN)multimaster_mtnode_free_replica_object); */
+ dl_cleanup (root_list, (FREEFN)slapi_sdn_free);
+ dl_free (&root_list);
}
/* This function loops over the list of node roots, constructing replica objects
@@ -59,7 +79,7 @@ multimaster_mtnode_construct_replicas ()
if (r)
{
- mtnode = slapi_get_mapping_tree_node_by_dn(root);
+ mtnode = slapi_get_mapping_tree_node_by_dn(root);
if (mtnode == NULL)
{
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
@@ -110,7 +130,7 @@ multimaster_mtnode_extension_constructor (void *object, void *parent)
we can't fully initialize replica here since backends
are not yet started. Instead, replica objects are created
during replication plugin startup */
- if (root)
+ if (root != NULL && !slapi_sdn_isempty(root))
{
/* for now just store node root in the root list */
dl_add (root_list, slapi_sdn_dup (root));
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index b66eb8b..04e7e8f 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -156,7 +156,6 @@ replica_new_from_entry (Slapi_Entry *e, char *errortext, PRBool is_add_operation
{
int rc = 0;
Replica *r;
- char *repl_name = NULL;
if (e == NULL)
{
@@ -245,8 +244,7 @@ replica_new_from_entry (Slapi_Entry *e, char *errortext, PRBool is_add_operation
/* ONREPL - the state update can occur before the entry is added to the DIT.
In that case the updated would fail but nothing bad would happen. The next
scheduled update would save the state */
- repl_name = slapi_ch_strdup (r->repl_name);
- r->repl_eqcxt_rs = slapi_eq_repeat(replica_update_state, repl_name,
+ r->repl_eqcxt_rs = slapi_eq_repeat(replica_update_state, r->repl_name,
current_time () + START_UPDATE_DELAY, RUV_SAVE_INTERVAL);
if (r->tombstone_reap_interval > 0)
@@ -255,8 +253,7 @@ replica_new_from_entry (Slapi_Entry *e, char *errortext, PRBool is_add_operation
* Reap Tombstone should be started some time after the plugin started.
* This will allow the server to fully start before consuming resources.
*/
- repl_name = slapi_ch_strdup (r->repl_name);
- r->repl_eqcxt_tr = slapi_eq_repeat(eq_cb_reap_tombstones, repl_name,
+ r->repl_eqcxt_tr = slapi_eq_repeat(eq_cb_reap_tombstones, r->repl_name,
current_time() + r->tombstone_reap_interval,
1000 * r->tombstone_reap_interval);
}
@@ -313,7 +310,6 @@ void
replica_destroy(void **arg)
{
Replica *r;
- void *repl_name;
if (arg == NULL)
return;
@@ -333,16 +329,12 @@ replica_destroy(void **arg)
if (r->repl_eqcxt_rs)
{
- repl_name = slapi_eq_get_arg (r->repl_eqcxt_rs);
- slapi_ch_free (&repl_name);
slapi_eq_cancel(r->repl_eqcxt_rs);
r->repl_eqcxt_rs = NULL;
}
if (r->repl_eqcxt_tr)
{
- repl_name = slapi_eq_get_arg (r->repl_eqcxt_tr);
- slapi_ch_free (&repl_name);
slapi_eq_cancel(r->repl_eqcxt_tr);
r->repl_eqcxt_tr = NULL;
}
@@ -1623,8 +1615,6 @@ consumer5_set_mapping_tree_state_for_replica(const Replica *r, RUV *supplierRuv)
void
replica_set_enabled (Replica *r, PRBool enable)
{
- char *repl_name = NULL;
-
PR_ASSERT (r);
replica_lock(r->repl_lock);
@@ -1633,8 +1623,7 @@ replica_set_enabled (Replica *r, PRBool enable)
{
if (r->repl_eqcxt_rs == NULL) /* event is not already registered */
{
- repl_name = slapi_ch_strdup (r->repl_name);
- r->repl_eqcxt_rs = slapi_eq_repeat(replica_update_state, repl_name,
+ r->repl_eqcxt_rs = slapi_eq_repeat(replica_update_state, r->repl_name,
current_time() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL);
}
}
@@ -1642,8 +1631,6 @@ replica_set_enabled (Replica *r, PRBool enable)
{
if (r->repl_eqcxt_rs) /* event is still registerd */
{
- repl_name = slapi_eq_get_arg (r->repl_eqcxt_rs);
- slapi_ch_free ((void**)&repl_name);
slapi_eq_cancel(r->repl_eqcxt_rs);
r->repl_eqcxt_rs = NULL;
}
@@ -1898,7 +1885,7 @@ int replica_check_for_data_reload (Replica *r, void *arg)
slapi_sdn_get_dn(r->repl_root));
rc = 0;
}
- } // slapi_disordely_shutdown
+ } /* slapi_disordely_shutdown */
object_release (ruv_obj);
}
@@ -3868,8 +3855,6 @@ replica_set_purge_delay(Replica *r, PRUint32 purge_delay)
void
replica_set_tombstone_reap_interval (Replica *r, long interval)
{
- char *repl_name;
-
replica_lock(r->repl_lock);
/*
@@ -3880,8 +3865,6 @@ replica_set_tombstone_reap_interval (Replica *r, long interval)
{
int found;
- repl_name = slapi_eq_get_arg (r->repl_eqcxt_tr);
- slapi_ch_free ((void**)&repl_name);
found = slapi_eq_cancel (r->repl_eqcxt_tr);
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
"replica_set_tombstone_reap_interval - tombstone_reap event (interval=%ld) was %s\n",
@@ -3891,8 +3874,7 @@ replica_set_tombstone_reap_interval (Replica *r, long interval)
r->tombstone_reap_interval = interval;
if ( interval > 0 && r->repl_eqcxt_tr == NULL )
{
- repl_name = slapi_ch_strdup (r->repl_name);
- r->repl_eqcxt_tr = slapi_eq_repeat (eq_cb_reap_tombstones, repl_name,
+ r->repl_eqcxt_tr = slapi_eq_repeat (eq_cb_reap_tombstones, r->repl_name,
current_time() + r->tombstone_reap_interval,
1000 * r->tombstone_reap_interval);
slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name,
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c
index 686b232..bf1b9c7 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c
@@ -2251,6 +2251,12 @@ ldbm_config_destroy(struct ldbminfo *li) {
}
slapi_ch_free((void **) &(li->li_new_directory));
slapi_ch_free((void **) &(li->li_directory));
+ /* Destroy the mutexes and cond var */
+ PR_DestroyLock(li->li_dbcache_mutex);
+ PR_DestroyLock(li->li_shutdown_mutex);
+ PR_DestroyLock(li->li_config_mutex);
+ PR_DestroyCondVar(li->li_dbcache_cv);
+
/* Finally free the ldbminfo */
slapi_ch_free((void **)&li);
}
diff --git a/ldap/servers/slapd/backend_manager.c b/ldap/servers/slapd/backend_manager.c
index e2b524c..c9b98e2 100644
--- a/ldap/servers/slapd/backend_manager.c
+++ b/ldap/servers/slapd/backend_manager.c
@@ -303,8 +303,20 @@ be_cleanupall()
slapi_ch_free((void**)&backends);
}
+/*
+ * This ifdef is needed to resolve a gcc 6 issue which throws a false positive
+ * here. See also: https://bugzilla.redhat.com/show_bug.cgi?id=1386445
+ *
+ * It's a good idea to run this in EL7 to check the overflows etc, but with
+ * GCC 6 and lsan to find memory leaks ....
+ */
void
be_flushall()
+#if defined(__has_feature)
+# if __has_feature(address_sanitizer) && __GNUC__ == 6
+__attribute__((no_sanitize("address")))
+# endif
+#endif
{
int i;
Slapi_PBlock pb = {0};
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
index 9e68a59..7c83c66 100644
--- a/ldap/servers/slapd/connection.c
+++ b/ldap/servers/slapd/connection.c
@@ -155,6 +155,21 @@ connection_done(Connection *conn)
}
/* PAGED_RESULTS */
pagedresults_cleanup_all(conn, 0);
+
+ /*
+ * WARNING: There is a memory leak here! During a shutdown, connections
+ * can still have events in ns add io timeout job because of post connection
+ * or closing. The issue is that we can track the *jobs*, we only have the
+ * connection, and we can have 1:N connection:jobs. So we can lose IO jobs
+ * here. Thankfully, it's only for existing connections, and they are closed
+ * anyway, so it's just a mem / mutex leak.
+ *
+ * To fix it, involves the rewrite of connection handling, which will happen
+ * soon anyway, so please be patient while I undertake this!
+ *
+ * - wibrown December 2016.
+ */
+
}
/*
diff --git a/ldap/servers/slapd/mapping_tree.c b/ldap/servers/slapd/mapping_tree.c
index 4e91757..1b8d2d9 100644
--- a/ldap/servers/slapd/mapping_tree.c
+++ b/ldap/servers/slapd/mapping_tree.c
@@ -2064,7 +2064,11 @@ done:
static int sdn_is_nulldn(const Slapi_DN *sdn){
if(sdn){
- const char *dn= slapi_sdn_get_ndn(sdn);
+ /*
+ * Use get_dn rather than get_ndn, because an issue in get_ndn exists
+ * where ndn can be set to 0x8
+ */
+ const char *dn= slapi_sdn_get_dn(sdn);
if(dn && ( '\0' == *dn)){
return 1;
}
@@ -2900,7 +2904,7 @@ slapi_get_mapping_tree_node_by_dn(const Slapi_DN *dn)
* it has been assigned to a different backend.
* e.g: a container backend
*/
- if ( sdn_is_nulldn(dn) && mapping_tree_root && mapping_tree_root->mtn_be[0] &&
+ if (sdn_is_nulldn(dn) && mapping_tree_root && mapping_tree_root->mtn_be[0] &&
mapping_tree_root->mtn_be[0] != slapi_be_select_by_instance_name(DSE_BACKEND)) {
return( mapping_tree_root );
}
@@ -2911,10 +2915,11 @@ slapi_get_mapping_tree_node_by_dn(const Slapi_DN *dn)
next_best_match = best_matching_child(current_best_match, dn);
}
- if (current_best_match == mapping_tree_root)
+ if (current_best_match == mapping_tree_root) {
return NULL;
- else
+ } else {
return current_best_match;
+ }
}
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
index 1c35148..f074394 100644
--- a/ldap/servers/slapd/pblock.c
+++ b/ldap/servers/slapd/pblock.c
@@ -113,9 +113,21 @@ if ( PBLOCK ->pb_plugin->plg_type != TYPE) return( -1 )
#define SLAPI_PBLOCK_GET_PLUGIN_RELATED_POINTER( pb, element ) \
((pb)->pb_plugin == NULL ? NULL : (pb)->pb_plugin->element)
+/*
+ * This ifdef is needed to resolve a gcc 6 issue which throws a false positive
+ * here. See also: https://bugzilla.redhat.com/show_bug.cgi?id=1386445
+ *
+ * It's a good idea to run this in EL7 to check the overflows etc, but with
+ * GCC 6 and lsan to find memory leaks ....
+ */
int
slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
+#if defined(__has_feature)
+# if __has_feature(address_sanitizer) && __GNUC__ == 6
+__attribute__((no_sanitize("address")))
+# endif
+#endif
{
char *authtype;
Slapi_Backend *be;
@@ -1985,8 +1997,21 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
return( 0 );
}
+/*
+ * This ifdef is needed to resolve a gcc 6 issue which throws a false positive
+ * here. See also: https://bugzilla.redhat.com/show_bug.cgi?id=1386445
+ *
+ * It's a good idea to run this in EL7 to check the overflows etc, but with
+ * GCC 6 and lsan to find memory leaks ....
+ */
+
int
slapi_pblock_set( Slapi_PBlock *pblock, int arg, void *value )
+#if defined(__has_feature)
+# if __has_feature(address_sanitizer) && __GNUC__ == 6
+__attribute__((no_sanitize("address")))
+# endif
+#endif
{
char *authtype;
diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c
index 0416ff8..fea1a2a 100644
--- a/ldap/servers/slapd/plugin.c
+++ b/ldap/servers/slapd/plugin.c
@@ -301,10 +301,14 @@ slapi_register_plugin_ext(
slapi_entry_init_ext(e, sdn, NULL);
slapi_sdn_free(&sdn);
- slapi_entry_attr_set_charptr(e, "cn", name);
- slapi_entry_attr_set_charptr(e, ATTR_PLUGIN_TYPE, plugintype);
- if (!enabled)
- slapi_entry_attr_set_charptr(e, ATTR_PLUGIN_ENABLED, "off");
+ slapi_entry_attr_set_charptr(e, "cn", name);
+ /* Need a valid objectClass! No plugin OC so just use extensible :( */
+ slapi_entry_add_string(e, "objectclass", "top");
+ slapi_entry_add_string(e, "objectclass", "extensibleObject");
+ slapi_entry_attr_set_charptr(e, ATTR_PLUGIN_TYPE, plugintype);
+ if (!enabled) {
+ slapi_entry_attr_set_charptr(e, ATTR_PLUGIN_ENABLED, "off");
+ }
slapi_entry_attr_set_charptr(e, ATTR_PLUGIN_INITFN, initsymbol);
7 years, 4 months
Branch '389-ds-base-1.2.11' - ldap/servers
by Mark Reynolds
ldap/servers/plugins/memberof/memberof.c | 48 +++++++++++++++++++++----------
ldap/servers/slapd/plugin_internal_op.c | 27 +++++++----------
2 files changed, 46 insertions(+), 29 deletions(-)
New commits:
commit 8efcc704c42fb11e0950b12d7abaf65e77050070
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Tue Dec 20 14:59:02 2016 -0500
Ticket 49072 - validate memberof fixup task args
Bug Description: If an invalid base dn, or invalid filter was provided
in the task there was no way to tell thathte task
actually failed.
Fix Description: Log an error, and properly update the task status/exit
code when an error occurs.
Added CI test (also fixed some issues in the dynamic
plugins test suite).
https://fedorahosted.org/389/ticket/49072
Reviewed by: nhosoi(Thanks!)
(cherry picked from commit a79ae70df6b20cd288fca511f784c414e8c52df4)
(cherry picked from commit b0020b73d34bdd630fb5b1a3e4fcebbb4b81f9c9)
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
index 7cb0e27..fd080da 100644
--- a/ldap/servers/plugins/memberof/memberof.c
+++ b/ldap/servers/plugins/memberof/memberof.c
@@ -92,6 +92,13 @@ typedef struct _memberof_get_groups_data
Slapi_ValueSet **groupvals;
} memberof_get_groups_data;
+typedef struct _task_data
+{
+ char *dn;
+ char *bind_dn;
+ char *filter_str;
+} task_data;
+
/*** function prototypes ***/
/* exported functions */
@@ -169,7 +176,7 @@ static void memberof_task_destructor(Slapi_Task *task);
static const char *fetch_attr(Slapi_Entry *e, const char *attrname,
const char *default_val);
static void memberof_fixup_task_thread(void *arg);
-static int memberof_fix_memberof(MemberOfConfig *config, char *dn, char *filter_str);
+static int memberof_fix_memberof(MemberOfConfig *config, Slapi_Task *task, task_data *td);
static int memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data);
@@ -2271,13 +2278,6 @@ void memberof_unlock()
PR_ExitMonitor(memberof_operation_lock);
}
-typedef struct _task_data
-{
- char *dn;
- char *bind_dn;
- char *filter_str;
-} task_data;
-
void memberof_fixup_task_thread(void *arg)
{
MemberOfConfig configCopy = {0, 0, 0, 0};
@@ -2285,6 +2285,11 @@ void memberof_fixup_task_thread(void *arg)
task_data *td = NULL;
int rc = 0;
+
+ if (!task) {
+ return; /* no task */
+ }
+
/* Fetch our task data from the task */
td = (task_data *)slapi_task_get_data(task);
@@ -2292,8 +2297,10 @@ void memberof_fixup_task_thread(void *arg)
slapi_td_set_dn(slapi_ch_strdup(td->bind_dn));
slapi_task_begin(task, 1);
- slapi_task_log_notice(task, "Memberof task starts (arg: %s) ...\n",
- td->filter_str);
+ slapi_task_log_notice(task, "Memberof task starts (filter: %s) ...\n",
+ td->filter_str);
+ slapi_log_error(SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM,
+ "Memberof task starts (filter: \"%s\") ...\n", td->filter_str);
/* We need to get the config lock first. Trying to get the
* config lock after we already hold the op lock can cause
@@ -2310,7 +2317,7 @@ void memberof_fixup_task_thread(void *arg)
memberof_lock();
/* do real work */
- rc = memberof_fix_memberof(&configCopy, td->dn, td->filter_str);
+ rc = memberof_fix_memberof(&configCopy, task, td);
/* release the memberOf operation lock */
memberof_unlock();
@@ -2320,6 +2327,9 @@ void memberof_fixup_task_thread(void *arg)
slapi_task_log_notice(task, "Memberof task finished.");
slapi_task_log_status(task, "Memberof task finished.");
slapi_task_inc_progress(task);
+ slapi_log_error(SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM,
+ "Memberof task finished (filter: %s) result: %d\n",
+ td->filter_str, rc);
/* this will queue the destruction of the task */
slapi_task_finish(task, rc);
@@ -2434,13 +2444,13 @@ memberof_task_destructor(Slapi_Task *task)
}
}
-int memberof_fix_memberof(MemberOfConfig *config, char *dn, char *filter_str)
+int memberof_fix_memberof(MemberOfConfig *config, Slapi_Task *task, task_data *td)
{
int rc = 0;
Slapi_PBlock *search_pb = slapi_pblock_new();
- slapi_search_internal_set_pb(search_pb, dn,
- LDAP_SCOPE_SUBTREE, filter_str, 0, 0,
+ slapi_search_internal_set_pb(search_pb, td->dn,
+ LDAP_SCOPE_SUBTREE, td->filter_str, 0, 0,
0, 0,
memberof_get_plugin_id(),
0);
@@ -2449,6 +2459,16 @@ int memberof_fix_memberof(MemberOfConfig *config, char *dn, char *filter_str)
config,
0, memberof_fix_memberof_callback,
0);
+ if (rc){
+ char *errmsg;
+ int result;
+
+ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_RESULT, &result);
+ errmsg = ldap_err2string(result);
+ slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM,
+ "memberof_fix_memberof - Failed (%s)\n", errmsg );
+ slapi_task_log_notice(task, "Memberof task failed (%s)\n", errmsg );
+ }
slapi_pblock_destroy(search_pb);
diff --git a/ldap/servers/slapd/plugin_internal_op.c b/ldap/servers/slapd/plugin_internal_op.c
index 4c7462d..f916caf 100644
--- a/ldap/servers/slapd/plugin_internal_op.c
+++ b/ldap/servers/slapd/plugin_internal_op.c
@@ -757,7 +757,7 @@ search_internal_callback_pb (Slapi_PBlock *pb, void *callback_data,
if (ifstr == NULL || (scope != LDAP_SCOPE_BASE && scope != LDAP_SCOPE_ONELEVEL
&& scope != LDAP_SCOPE_SUBTREE))
{
- opresult = LDAP_PARAM_ERROR;
+ opresult = LDAP_PARAM_ERROR;
slapi_pblock_set(pb, SLAPI_PLUGIN_INTOP_RESULT, &opresult);
return -1;
}
@@ -774,19 +774,19 @@ search_internal_callback_pb (Slapi_PBlock *pb, void *callback_data,
op->o_search_referral_handler = internal_ref_entry_callback;
filter = slapi_str2filter((fstr = slapi_ch_strdup(ifstr)));
- if(scope == LDAP_SCOPE_BASE) {
- filter->f_flags |= (SLAPI_FILTER_LDAPSUBENTRY |
- SLAPI_FILTER_TOMBSTONE | SLAPI_FILTER_RUV);
+ if (NULL == filter) {
+ int result = LDAP_FILTER_ERROR;
+ send_ldap_result(pb, result, NULL, NULL, 0, NULL);
+ slapi_pblock_set(pb, SLAPI_PLUGIN_INTOP_RESULT, &result);
+ rc = -1;
+ goto done;
}
- if (NULL == filter)
- {
- send_ldap_result(pb, LDAP_FILTER_ERROR, NULL, NULL, 0, NULL);
- rc = -1;
- goto done;
+ if (scope == LDAP_SCOPE_BASE) {
+ filter->f_flags |= (SLAPI_FILTER_LDAPSUBENTRY |
+ SLAPI_FILTER_TOMBSTONE | SLAPI_FILTER_RUV);
}
filter_normalize(filter);
-
slapi_pblock_set(pb, SLAPI_SEARCH_FILTER, filter);
slapi_pblock_set(pb, SLAPI_REQCONTROLS, controls);
@@ -814,11 +814,8 @@ search_internal_callback_pb (Slapi_PBlock *pb, void *callback_data,
slapi_pblock_get(pb, SLAPI_SEARCH_FILTER, &filter);
done:
- slapi_ch_free((void **) & fstr);
- if (filter != NULL)
- {
- slapi_filter_free(filter, 1 /* recurse */);
- }
+ slapi_ch_free_string(&fstr);
+ slapi_filter_free(filter, 1 /* recurse */);
slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &tmp_attrs);
slapi_ch_array_free(tmp_attrs);
slapi_pblock_set(pb, SLAPI_SEARCH_ATTRS, NULL);
7 years, 4 months
Branch '389-ds-base-1.3.4' - dirsrvtests/tests ldap/servers
by Mark Reynolds
dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py | 79 +++++++++-
dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py | 22 +-
dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py | 3
ldap/servers/plugins/memberof/memberof.c | 59 ++++---
ldap/servers/slapd/plugin_internal_op.c | 27 +--
5 files changed, 143 insertions(+), 47 deletions(-)
New commits:
commit f708c7f5e01c6fb62a2125aa039b2d13f5f66697
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Tue Dec 20 14:59:02 2016 -0500
Ticket 49072 - validate memberof fixup task args
Bug Description: If an invalid base dn, or invalid filter was provided
in the task there was no way to tell thathte task
actually failed.
Fix Description: Log an error, and properly update the task status/exit
code when an error occurs.
Added CI test (also fixed some issues in the dynamic
plugins test suite).
https://fedorahosted.org/389/ticket/49072
Reviewed by: nhosoi(Thanks!)
(cherry picked from commit a79ae70df6b20cd288fca511f784c414e8c52df4)
(cherry picked from commit b0020b73d34bdd630fb5b1a3e4fcebbb4b81f9c9)
diff --git a/dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py b/dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py
index 30dfa88..0a74eef 100644
--- a/dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py
+++ b/dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py
@@ -96,6 +96,7 @@ def test_dependency(inst, plugin):
################################################################################
def wait_for_task(conn, task_dn):
finished = False
+ exitcode = 0
count = 0
while count < 60:
try:
@@ -105,6 +106,7 @@ def wait_for_task(conn, task_dn):
assert False
if task_entry[0].hasAttr('nstaskexitcode'):
# task is done
+ exitcode = task_entry[0].nsTaskExitCode
finished = True
break
except ldap.LDAPError as e:
@@ -117,6 +119,8 @@ def wait_for_task(conn, task_dn):
log.fatal('wait_for_task: Task (%s) did not complete!' % task_dn)
assert False
+ return exitcode
+
################################################################################
#
@@ -1416,9 +1420,82 @@ def test_memberof(inst, args=None):
log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc'])
assert False
- # Enable the plugin, and run the task
+ # Enable memberof plugin
inst.plugins.enable(name=PLUGIN_MEMBER_OF)
+ #############################################################
+ # Test memberOf fixup arg validation: Test the DN and filter
+ #############################################################
+
+ #
+ # Test bad/nonexistant DN
+ #
+ TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK
+ try:
+ inst.add_s(Entry((TASK_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'basedn': DEFAULT_SUFFIX + "bad",
+ 'filter': 'objectclass=top'})))
+ except ldap.LDAPError as e:
+ log.fatal('test_memberof: Failed to add task(bad dn): error ' +
+ e.message['desc'])
+ assert False
+
+ exitcode = wait_for_task(inst, TASK_DN)
+ if exitcode == "0":
+ # We should an error
+ log.fatal('test_memberof: Task with invalid DN still reported success')
+ assert False
+
+ #
+ # Test invalid DN syntax
+ #
+ TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK
+ try:
+ inst.add_s(Entry((TASK_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'basedn': "bad",
+ 'filter': 'objectclass=top'})))
+ except ldap.LDAPError as e:
+ log.fatal('test_memberof: Failed to add task(invalid dn syntax): ' +
+ e.message['desc'])
+ assert False
+
+ exitcode = wait_for_task(inst, TASK_DN)
+ if exitcode == "0":
+ # We should an error
+ log.fatal('test_memberof: Task with invalid DN syntax still reported' +
+ ' success')
+ assert False
+
+ #
+ # Test bad filter (missing closing parenthesis)
+ #
+ TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK
+ try:
+ inst.add_s(Entry((TASK_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'basedn': DEFAULT_SUFFIX,
+ 'filter': '(objectclass=top'})))
+ except ldap.LDAPError as e:
+ log.fatal('test_memberof: Failed to add task(bad filter: error ' +
+ e.message['desc'])
+ assert False
+
+ exitcode = wait_for_task(inst, TASK_DN)
+ if exitcode == "0":
+ # We should an error
+ log.fatal('test_memberof: Task with invalid filter still reported ' +
+ 'success')
+ assert False
+
+ ####################################################
+ # Test fixup works
+ ####################################################
+
+ #
+ # Run the task and validate that it worked
+ #
TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK
try:
inst.add_s(Entry((TASK_DN, {
diff --git a/dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py b/dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py
index 920d3f6..79a8086 100644
--- a/dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py
+++ b/dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py
@@ -88,8 +88,9 @@ class DelUsers(threading.Thread):
try:
conn.delete_s(USER_DN)
except ldap.LDAPError as e:
- log.fatal('DeleteUsers: failed to delete (' + USER_DN + ') error: ' + e.message['desc'])
- assert False
+ if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN:
+ log.fatal('DeleteUsers: failed to delete (' + USER_DN + ') error: ' + e.message['desc'])
+ assert False
idx += 1
@@ -115,11 +116,10 @@ class AddUsers(threading.Thread):
conn.add_s(Entry((GROUP_DN,
{'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
'uid': 'user' + str(idx)})))
- except ldap.ALREADY_EXISTS:
- pass
except ldap.LDAPError as e:
- log.fatal('AddUsers: failed to add group (' + USER_DN + ') error: ' + e.message['desc'])
- assert False
+ if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN:
+ log.fatal('AddUsers: failed to add group (' + USER_DN + ') error: ' + e.message['desc'])
+ assert False
log.info('AddUsers - Adding ' + str(NUM_USERS) + ' entries (' + self.rdnval + ')...')
@@ -129,16 +129,18 @@ class AddUsers(threading.Thread):
conn.add_s(Entry((USER_DN, {'objectclass': 'top extensibleObject'.split(),
'uid': 'user' + str(idx)})))
except ldap.LDAPError as e:
- log.fatal('AddUsers: failed to add (' + USER_DN + ') error: ' + e.message['desc'])
- assert False
+ if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN:
+ log.fatal('AddUsers: failed to add (' + USER_DN + ') error: ' + e.message['desc'])
+ assert False
if self.addToGroup:
# Add the user to the group
try:
conn.modify_s(GROUP_DN, [(ldap.MOD_ADD, 'uniquemember', USER_DN)])
except ldap.LDAPError as e:
- log.fatal('AddUsers: Failed to add user' + USER_DN + ' to group: error ' + e.message['desc'])
- assert False
+ if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN:
+ log.fatal('AddUsers: Failed to add user' + USER_DN + ' to group: error ' + e.message['desc'])
+ assert False
idx += 1
diff --git a/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py b/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py
index c05c402..f67e5b2 100644
--- a/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py
+++ b/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py
@@ -293,7 +293,8 @@ def test_dynamic_plugins(topology):
except:
log.info('Stress test failed!')
- repl_fail(replica_inst)
+ if replication_run:
+ repl_fail(replica_inst)
stress_count += 1
log.info('####################################################################')
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
index aad300f..6ff9c9a 100644
--- a/ldap/servers/plugins/memberof/memberof.c
+++ b/ldap/servers/plugins/memberof/memberof.c
@@ -65,6 +65,13 @@ typedef struct _memberof_get_groups_data
Slapi_ValueSet **group_norm_vals;
} memberof_get_groups_data;
+typedef struct _task_data
+{
+ char *dn;
+ char *bind_dn;
+ char *filter_str;
+} task_data;
+
/*** function prototypes ***/
/* exported functions */
@@ -142,7 +149,7 @@ static void memberof_task_destructor(Slapi_Task *task);
static const char *fetch_attr(Slapi_Entry *e, const char *attrname,
const char *default_val);
static void memberof_fixup_task_thread(void *arg);
-static int memberof_fix_memberof(MemberOfConfig *config, char *dn, char *filter_str);
+static int memberof_fix_memberof(MemberOfConfig *config, Slapi_Task *task, task_data *td);
static int memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data);
static int memberof_entry_in_scope(MemberOfConfig *config, Slapi_DN *sdn);
static int memberof_add_objectclass(char *auto_add_oc, const char *dn);
@@ -2625,13 +2632,6 @@ void memberof_unlock()
}
}
-typedef struct _task_data
-{
- char *dn;
- char *bind_dn;
- char *filter_str;
-} task_data;
-
void memberof_fixup_task_thread(void *arg)
{
MemberOfConfig configCopy = {0, 0, 0, 0};
@@ -2646,6 +2646,7 @@ void memberof_fixup_task_thread(void *arg)
slapi_task_inc_refcount(task);
slapi_log_error(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
"memberof_fixup_task_thread --> refcount incremented.\n" );
+
/* Fetch our task data from the task */
td = (task_data *)slapi_task_get_data(task);
@@ -2653,10 +2654,10 @@ void memberof_fixup_task_thread(void *arg)
slapi_td_set_dn(slapi_ch_strdup(td->bind_dn));
slapi_task_begin(task, 1);
- slapi_task_log_notice(task, "Memberof task starts (arg: %s) ...\n",
+ slapi_task_log_notice(task, "Memberof task starts (filter: %s) ...\n",
td->filter_str);
slapi_log_error(SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM,
- "Memberof task starts (arg: %s) ...\n", td->filter_str);
+ "Memberof task starts (filter: \"%s\") ...\n", td->filter_str);
/* We need to get the config lock first. Trying to get the
* config lock after we already hold the op lock can cause
@@ -2671,7 +2672,7 @@ void memberof_fixup_task_thread(void *arg)
if (usetxn) {
Slapi_DN *sdn = slapi_sdn_new_dn_byref(td->dn);
- Slapi_Backend *be = slapi_be_select(sdn);
+ Slapi_Backend *be = slapi_be_select_exact(sdn);
slapi_sdn_free(&sdn);
if (be) {
fixup_pb = slapi_pblock_new();
@@ -2679,12 +2680,18 @@ void memberof_fixup_task_thread(void *arg)
rc = slapi_back_transaction_begin(fixup_pb);
if (rc) {
slapi_log_error(SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM,
- "memberof_fixup_task_thread: failed to start transaction\n");
+ "memberof_fixup_task_thread: failed to start transaction\n");
+ goto done;
}
} else {
slapi_log_error(SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM,
- "memberof_fixup_task_thread: failed to get be backend from %s\n",
- td->dn);
+ "memberof_fixup_task_thread: failed to get be backend from (%s)\n",
+ td->dn);
+ slapi_task_log_notice(task, "Memberof task - Failed to get be backend from (%s)\n",
+ td->dn);
+ rc = -1;
+ goto done;
+
}
}
@@ -2692,13 +2699,14 @@ void memberof_fixup_task_thread(void *arg)
memberof_lock();
/* do real work */
- rc = memberof_fix_memberof(&configCopy, td->dn, td->filter_str);
+ rc = memberof_fix_memberof(&configCopy, task, td);
/* release the memberOf operation lock */
memberof_unlock();
+done:
if (usetxn && fixup_pb) {
- if (rc) { /* failes */
+ if (rc) { /* failed */
slapi_back_transaction_abort(fixup_pb);
} else {
slapi_back_transaction_commit(fixup_pb);
@@ -2711,7 +2719,8 @@ void memberof_fixup_task_thread(void *arg)
slapi_task_log_status(task, "Memberof task finished.");
slapi_task_inc_progress(task);
slapi_log_error(SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM,
- "Memberof task finished (arg: %s) ...\n", td->filter_str);
+ "Memberof task finished (filter: %s) result: %d\n",
+ td->filter_str, rc);
/* this will queue the destruction of the task */
slapi_task_finish(task, rc);
@@ -2829,13 +2838,13 @@ memberof_task_destructor(Slapi_Task *task)
"memberof_task_destructor <--\n" );
}
-int memberof_fix_memberof(MemberOfConfig *config, char *dn, char *filter_str)
+int memberof_fix_memberof(MemberOfConfig *config, Slapi_Task *task, task_data *td)
{
int rc = 0;
Slapi_PBlock *search_pb = slapi_pblock_new();
- slapi_search_internal_set_pb(search_pb, dn,
- LDAP_SCOPE_SUBTREE, filter_str, 0, 0,
+ slapi_search_internal_set_pb(search_pb, td->dn,
+ LDAP_SCOPE_SUBTREE, td->filter_str, 0, 0,
0, 0,
memberof_get_plugin_id(),
0);
@@ -2844,6 +2853,16 @@ int memberof_fix_memberof(MemberOfConfig *config, char *dn, char *filter_str)
config,
0, memberof_fix_memberof_callback,
0);
+ if (rc){
+ char *errmsg;
+ int result;
+
+ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_RESULT, &result);
+ errmsg = ldap_err2string(result);
+ slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM,
+ "memberof_fix_memberof - Failed (%s)\n", errmsg );
+ slapi_task_log_notice(task, "Memberof task failed (%s)\n", errmsg );
+ }
slapi_pblock_destroy(search_pb);
diff --git a/ldap/servers/slapd/plugin_internal_op.c b/ldap/servers/slapd/plugin_internal_op.c
index 0f03c76..e203cf5 100644
--- a/ldap/servers/slapd/plugin_internal_op.c
+++ b/ldap/servers/slapd/plugin_internal_op.c
@@ -728,7 +728,7 @@ search_internal_callback_pb (Slapi_PBlock *pb, void *callback_data,
if (ifstr == NULL || (scope != LDAP_SCOPE_BASE && scope != LDAP_SCOPE_ONELEVEL
&& scope != LDAP_SCOPE_SUBTREE))
{
- opresult = LDAP_PARAM_ERROR;
+ opresult = LDAP_PARAM_ERROR;
slapi_pblock_set(pb, SLAPI_PLUGIN_INTOP_RESULT, &opresult);
return -1;
}
@@ -745,19 +745,19 @@ search_internal_callback_pb (Slapi_PBlock *pb, void *callback_data,
op->o_search_referral_handler = internal_ref_entry_callback;
filter = slapi_str2filter((fstr = slapi_ch_strdup(ifstr)));
- if(scope == LDAP_SCOPE_BASE) {
- filter->f_flags |= (SLAPI_FILTER_LDAPSUBENTRY |
- SLAPI_FILTER_TOMBSTONE | SLAPI_FILTER_RUV);
+ if (NULL == filter) {
+ int result = LDAP_FILTER_ERROR;
+ send_ldap_result(pb, result, NULL, NULL, 0, NULL);
+ slapi_pblock_set(pb, SLAPI_PLUGIN_INTOP_RESULT, &result);
+ rc = -1;
+ goto done;
}
- if (NULL == filter)
- {
- send_ldap_result(pb, LDAP_FILTER_ERROR, NULL, NULL, 0, NULL);
- rc = -1;
- goto done;
+ if (scope == LDAP_SCOPE_BASE) {
+ filter->f_flags |= (SLAPI_FILTER_LDAPSUBENTRY |
+ SLAPI_FILTER_TOMBSTONE | SLAPI_FILTER_RUV);
}
filter_normalize(filter);
-
slapi_pblock_set(pb, SLAPI_SEARCH_FILTER, filter);
slapi_pblock_set(pb, SLAPI_REQCONTROLS, controls);
@@ -785,11 +785,8 @@ search_internal_callback_pb (Slapi_PBlock *pb, void *callback_data,
slapi_pblock_get(pb, SLAPI_SEARCH_FILTER, &filter);
done:
- slapi_ch_free((void **) & fstr);
- if (filter != NULL)
- {
- slapi_filter_free(filter, 1 /* recurse */);
- }
+ slapi_ch_free_string(&fstr);
+ slapi_filter_free(filter, 1 /* recurse */);
slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &tmp_attrs);
slapi_ch_array_free(tmp_attrs);
slapi_pblock_set(pb, SLAPI_SEARCH_ATTRS, NULL);
7 years, 4 months
Branch '389-ds-base-1.3.5' - dirsrvtests/tests ldap/servers
by Mark Reynolds
dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py | 79 +++++++++-
dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py | 22 +-
dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py | 3
ldap/servers/plugins/memberof/memberof.c | 59 ++++---
ldap/servers/slapd/plugin_internal_op.c | 27 +--
5 files changed, 143 insertions(+), 47 deletions(-)
New commits:
commit b0020b73d34bdd630fb5b1a3e4fcebbb4b81f9c9
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Tue Dec 20 14:59:02 2016 -0500
Ticket 49072 - validate memberof fixup task args
Bug Description: If an invalid base dn, or invalid filter was provided
in the task there was no way to tell thathte task
actually failed.
Fix Description: Log an error, and properly update the task status/exit
code when an error occurs.
Added CI test (also fixed some issues in the dynamic
plugins test suite).
https://fedorahosted.org/389/ticket/49072
Reviewed by: nhosoi(Thanks!)
(cherry picked from commit a79ae70df6b20cd288fca511f784c414e8c52df4)
diff --git a/dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py b/dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py
index 30dfa88..0a74eef 100644
--- a/dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py
+++ b/dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py
@@ -96,6 +96,7 @@ def test_dependency(inst, plugin):
################################################################################
def wait_for_task(conn, task_dn):
finished = False
+ exitcode = 0
count = 0
while count < 60:
try:
@@ -105,6 +106,7 @@ def wait_for_task(conn, task_dn):
assert False
if task_entry[0].hasAttr('nstaskexitcode'):
# task is done
+ exitcode = task_entry[0].nsTaskExitCode
finished = True
break
except ldap.LDAPError as e:
@@ -117,6 +119,8 @@ def wait_for_task(conn, task_dn):
log.fatal('wait_for_task: Task (%s) did not complete!' % task_dn)
assert False
+ return exitcode
+
################################################################################
#
@@ -1416,9 +1420,82 @@ def test_memberof(inst, args=None):
log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc'])
assert False
- # Enable the plugin, and run the task
+ # Enable memberof plugin
inst.plugins.enable(name=PLUGIN_MEMBER_OF)
+ #############################################################
+ # Test memberOf fixup arg validation: Test the DN and filter
+ #############################################################
+
+ #
+ # Test bad/nonexistant DN
+ #
+ TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK
+ try:
+ inst.add_s(Entry((TASK_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'basedn': DEFAULT_SUFFIX + "bad",
+ 'filter': 'objectclass=top'})))
+ except ldap.LDAPError as e:
+ log.fatal('test_memberof: Failed to add task(bad dn): error ' +
+ e.message['desc'])
+ assert False
+
+ exitcode = wait_for_task(inst, TASK_DN)
+ if exitcode == "0":
+ # We should an error
+ log.fatal('test_memberof: Task with invalid DN still reported success')
+ assert False
+
+ #
+ # Test invalid DN syntax
+ #
+ TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK
+ try:
+ inst.add_s(Entry((TASK_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'basedn': "bad",
+ 'filter': 'objectclass=top'})))
+ except ldap.LDAPError as e:
+ log.fatal('test_memberof: Failed to add task(invalid dn syntax): ' +
+ e.message['desc'])
+ assert False
+
+ exitcode = wait_for_task(inst, TASK_DN)
+ if exitcode == "0":
+ # We should an error
+ log.fatal('test_memberof: Task with invalid DN syntax still reported' +
+ ' success')
+ assert False
+
+ #
+ # Test bad filter (missing closing parenthesis)
+ #
+ TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK
+ try:
+ inst.add_s(Entry((TASK_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'basedn': DEFAULT_SUFFIX,
+ 'filter': '(objectclass=top'})))
+ except ldap.LDAPError as e:
+ log.fatal('test_memberof: Failed to add task(bad filter: error ' +
+ e.message['desc'])
+ assert False
+
+ exitcode = wait_for_task(inst, TASK_DN)
+ if exitcode == "0":
+ # We should an error
+ log.fatal('test_memberof: Task with invalid filter still reported ' +
+ 'success')
+ assert False
+
+ ####################################################
+ # Test fixup works
+ ####################################################
+
+ #
+ # Run the task and validate that it worked
+ #
TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK
try:
inst.add_s(Entry((TASK_DN, {
diff --git a/dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py b/dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py
index 920d3f6..79a8086 100644
--- a/dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py
+++ b/dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py
@@ -88,8 +88,9 @@ class DelUsers(threading.Thread):
try:
conn.delete_s(USER_DN)
except ldap.LDAPError as e:
- log.fatal('DeleteUsers: failed to delete (' + USER_DN + ') error: ' + e.message['desc'])
- assert False
+ if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN:
+ log.fatal('DeleteUsers: failed to delete (' + USER_DN + ') error: ' + e.message['desc'])
+ assert False
idx += 1
@@ -115,11 +116,10 @@ class AddUsers(threading.Thread):
conn.add_s(Entry((GROUP_DN,
{'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
'uid': 'user' + str(idx)})))
- except ldap.ALREADY_EXISTS:
- pass
except ldap.LDAPError as e:
- log.fatal('AddUsers: failed to add group (' + USER_DN + ') error: ' + e.message['desc'])
- assert False
+ if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN:
+ log.fatal('AddUsers: failed to add group (' + USER_DN + ') error: ' + e.message['desc'])
+ assert False
log.info('AddUsers - Adding ' + str(NUM_USERS) + ' entries (' + self.rdnval + ')...')
@@ -129,16 +129,18 @@ class AddUsers(threading.Thread):
conn.add_s(Entry((USER_DN, {'objectclass': 'top extensibleObject'.split(),
'uid': 'user' + str(idx)})))
except ldap.LDAPError as e:
- log.fatal('AddUsers: failed to add (' + USER_DN + ') error: ' + e.message['desc'])
- assert False
+ if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN:
+ log.fatal('AddUsers: failed to add (' + USER_DN + ') error: ' + e.message['desc'])
+ assert False
if self.addToGroup:
# Add the user to the group
try:
conn.modify_s(GROUP_DN, [(ldap.MOD_ADD, 'uniquemember', USER_DN)])
except ldap.LDAPError as e:
- log.fatal('AddUsers: Failed to add user' + USER_DN + ' to group: error ' + e.message['desc'])
- assert False
+ if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN:
+ log.fatal('AddUsers: Failed to add user' + USER_DN + ' to group: error ' + e.message['desc'])
+ assert False
idx += 1
diff --git a/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py b/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py
index fa980ec..0945cde 100644
--- a/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py
+++ b/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py
@@ -297,7 +297,8 @@ def test_dynamic_plugins(topology):
except:
log.info('Stress test failed!')
- repl_fail(replica_inst)
+ if replication_run:
+ repl_fail(replica_inst)
stress_count += 1
log.info('####################################################################')
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
index aad300f..6ff9c9a 100644
--- a/ldap/servers/plugins/memberof/memberof.c
+++ b/ldap/servers/plugins/memberof/memberof.c
@@ -65,6 +65,13 @@ typedef struct _memberof_get_groups_data
Slapi_ValueSet **group_norm_vals;
} memberof_get_groups_data;
+typedef struct _task_data
+{
+ char *dn;
+ char *bind_dn;
+ char *filter_str;
+} task_data;
+
/*** function prototypes ***/
/* exported functions */
@@ -142,7 +149,7 @@ static void memberof_task_destructor(Slapi_Task *task);
static const char *fetch_attr(Slapi_Entry *e, const char *attrname,
const char *default_val);
static void memberof_fixup_task_thread(void *arg);
-static int memberof_fix_memberof(MemberOfConfig *config, char *dn, char *filter_str);
+static int memberof_fix_memberof(MemberOfConfig *config, Slapi_Task *task, task_data *td);
static int memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data);
static int memberof_entry_in_scope(MemberOfConfig *config, Slapi_DN *sdn);
static int memberof_add_objectclass(char *auto_add_oc, const char *dn);
@@ -2625,13 +2632,6 @@ void memberof_unlock()
}
}
-typedef struct _task_data
-{
- char *dn;
- char *bind_dn;
- char *filter_str;
-} task_data;
-
void memberof_fixup_task_thread(void *arg)
{
MemberOfConfig configCopy = {0, 0, 0, 0};
@@ -2646,6 +2646,7 @@ void memberof_fixup_task_thread(void *arg)
slapi_task_inc_refcount(task);
slapi_log_error(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
"memberof_fixup_task_thread --> refcount incremented.\n" );
+
/* Fetch our task data from the task */
td = (task_data *)slapi_task_get_data(task);
@@ -2653,10 +2654,10 @@ void memberof_fixup_task_thread(void *arg)
slapi_td_set_dn(slapi_ch_strdup(td->bind_dn));
slapi_task_begin(task, 1);
- slapi_task_log_notice(task, "Memberof task starts (arg: %s) ...\n",
+ slapi_task_log_notice(task, "Memberof task starts (filter: %s) ...\n",
td->filter_str);
slapi_log_error(SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM,
- "Memberof task starts (arg: %s) ...\n", td->filter_str);
+ "Memberof task starts (filter: \"%s\") ...\n", td->filter_str);
/* We need to get the config lock first. Trying to get the
* config lock after we already hold the op lock can cause
@@ -2671,7 +2672,7 @@ void memberof_fixup_task_thread(void *arg)
if (usetxn) {
Slapi_DN *sdn = slapi_sdn_new_dn_byref(td->dn);
- Slapi_Backend *be = slapi_be_select(sdn);
+ Slapi_Backend *be = slapi_be_select_exact(sdn);
slapi_sdn_free(&sdn);
if (be) {
fixup_pb = slapi_pblock_new();
@@ -2679,12 +2680,18 @@ void memberof_fixup_task_thread(void *arg)
rc = slapi_back_transaction_begin(fixup_pb);
if (rc) {
slapi_log_error(SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM,
- "memberof_fixup_task_thread: failed to start transaction\n");
+ "memberof_fixup_task_thread: failed to start transaction\n");
+ goto done;
}
} else {
slapi_log_error(SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM,
- "memberof_fixup_task_thread: failed to get be backend from %s\n",
- td->dn);
+ "memberof_fixup_task_thread: failed to get be backend from (%s)\n",
+ td->dn);
+ slapi_task_log_notice(task, "Memberof task - Failed to get be backend from (%s)\n",
+ td->dn);
+ rc = -1;
+ goto done;
+
}
}
@@ -2692,13 +2699,14 @@ void memberof_fixup_task_thread(void *arg)
memberof_lock();
/* do real work */
- rc = memberof_fix_memberof(&configCopy, td->dn, td->filter_str);
+ rc = memberof_fix_memberof(&configCopy, task, td);
/* release the memberOf operation lock */
memberof_unlock();
+done:
if (usetxn && fixup_pb) {
- if (rc) { /* failes */
+ if (rc) { /* failed */
slapi_back_transaction_abort(fixup_pb);
} else {
slapi_back_transaction_commit(fixup_pb);
@@ -2711,7 +2719,8 @@ void memberof_fixup_task_thread(void *arg)
slapi_task_log_status(task, "Memberof task finished.");
slapi_task_inc_progress(task);
slapi_log_error(SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM,
- "Memberof task finished (arg: %s) ...\n", td->filter_str);
+ "Memberof task finished (filter: %s) result: %d\n",
+ td->filter_str, rc);
/* this will queue the destruction of the task */
slapi_task_finish(task, rc);
@@ -2829,13 +2838,13 @@ memberof_task_destructor(Slapi_Task *task)
"memberof_task_destructor <--\n" );
}
-int memberof_fix_memberof(MemberOfConfig *config, char *dn, char *filter_str)
+int memberof_fix_memberof(MemberOfConfig *config, Slapi_Task *task, task_data *td)
{
int rc = 0;
Slapi_PBlock *search_pb = slapi_pblock_new();
- slapi_search_internal_set_pb(search_pb, dn,
- LDAP_SCOPE_SUBTREE, filter_str, 0, 0,
+ slapi_search_internal_set_pb(search_pb, td->dn,
+ LDAP_SCOPE_SUBTREE, td->filter_str, 0, 0,
0, 0,
memberof_get_plugin_id(),
0);
@@ -2844,6 +2853,16 @@ int memberof_fix_memberof(MemberOfConfig *config, char *dn, char *filter_str)
config,
0, memberof_fix_memberof_callback,
0);
+ if (rc){
+ char *errmsg;
+ int result;
+
+ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_RESULT, &result);
+ errmsg = ldap_err2string(result);
+ slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM,
+ "memberof_fix_memberof - Failed (%s)\n", errmsg );
+ slapi_task_log_notice(task, "Memberof task failed (%s)\n", errmsg );
+ }
slapi_pblock_destroy(search_pb);
diff --git a/ldap/servers/slapd/plugin_internal_op.c b/ldap/servers/slapd/plugin_internal_op.c
index 0f03c76..e203cf5 100644
--- a/ldap/servers/slapd/plugin_internal_op.c
+++ b/ldap/servers/slapd/plugin_internal_op.c
@@ -728,7 +728,7 @@ search_internal_callback_pb (Slapi_PBlock *pb, void *callback_data,
if (ifstr == NULL || (scope != LDAP_SCOPE_BASE && scope != LDAP_SCOPE_ONELEVEL
&& scope != LDAP_SCOPE_SUBTREE))
{
- opresult = LDAP_PARAM_ERROR;
+ opresult = LDAP_PARAM_ERROR;
slapi_pblock_set(pb, SLAPI_PLUGIN_INTOP_RESULT, &opresult);
return -1;
}
@@ -745,19 +745,19 @@ search_internal_callback_pb (Slapi_PBlock *pb, void *callback_data,
op->o_search_referral_handler = internal_ref_entry_callback;
filter = slapi_str2filter((fstr = slapi_ch_strdup(ifstr)));
- if(scope == LDAP_SCOPE_BASE) {
- filter->f_flags |= (SLAPI_FILTER_LDAPSUBENTRY |
- SLAPI_FILTER_TOMBSTONE | SLAPI_FILTER_RUV);
+ if (NULL == filter) {
+ int result = LDAP_FILTER_ERROR;
+ send_ldap_result(pb, result, NULL, NULL, 0, NULL);
+ slapi_pblock_set(pb, SLAPI_PLUGIN_INTOP_RESULT, &result);
+ rc = -1;
+ goto done;
}
- if (NULL == filter)
- {
- send_ldap_result(pb, LDAP_FILTER_ERROR, NULL, NULL, 0, NULL);
- rc = -1;
- goto done;
+ if (scope == LDAP_SCOPE_BASE) {
+ filter->f_flags |= (SLAPI_FILTER_LDAPSUBENTRY |
+ SLAPI_FILTER_TOMBSTONE | SLAPI_FILTER_RUV);
}
filter_normalize(filter);
-
slapi_pblock_set(pb, SLAPI_SEARCH_FILTER, filter);
slapi_pblock_set(pb, SLAPI_REQCONTROLS, controls);
@@ -785,11 +785,8 @@ search_internal_callback_pb (Slapi_PBlock *pb, void *callback_data,
slapi_pblock_get(pb, SLAPI_SEARCH_FILTER, &filter);
done:
- slapi_ch_free((void **) & fstr);
- if (filter != NULL)
- {
- slapi_filter_free(filter, 1 /* recurse */);
- }
+ slapi_ch_free_string(&fstr);
+ slapi_filter_free(filter, 1 /* recurse */);
slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &tmp_attrs);
slapi_ch_array_free(tmp_attrs);
slapi_pblock_set(pb, SLAPI_SEARCH_ATTRS, NULL);
7 years, 4 months
dirsrvtests/tests ldap/servers
by Mark Reynolds
dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py | 79 +++++++++-
dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py | 22 +-
dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py | 3
ldap/servers/plugins/memberof/memberof.c | 57 ++++---
ldap/servers/slapd/plugin_internal_op.c | 27 +--
5 files changed, 140 insertions(+), 48 deletions(-)
New commits:
commit a79ae70df6b20cd288fca511f784c414e8c52df4
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Tue Dec 20 14:59:02 2016 -0500
Ticket 49072 - validate memberof fixup task args
Bug Description: If an invalid base dn, or invalid filter was provided
in the task there was no way to tell thathte task
actually failed.
Fix Description: Log an error, and properly update the task status/exit
code when an error occurs.
Added CI test (also fixed some issues in the dynamic
plugins test suite).
https://fedorahosted.org/389/ticket/49072
Reviewed by: nhosoi(Thanks!)
diff --git a/dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py b/dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py
index b8bf477..99559cc 100644
--- a/dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py
+++ b/dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py
@@ -96,6 +96,7 @@ def test_dependency(inst, plugin):
################################################################################
def wait_for_task(conn, task_dn):
finished = False
+ exitcode = 0
count = 0
while count < 60:
try:
@@ -105,6 +106,7 @@ def wait_for_task(conn, task_dn):
assert False
if task_entry[0].hasAttr('nstaskexitcode'):
# task is done
+ exitcode = task_entry[0].nsTaskExitCode
finished = True
break
except ldap.LDAPError as e:
@@ -117,6 +119,8 @@ def wait_for_task(conn, task_dn):
log.fatal('wait_for_task: Task (%s) did not complete!' % task_dn)
assert False
+ return exitcode
+
################################################################################
#
@@ -1416,9 +1420,82 @@ def test_memberof(inst, args=None):
log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc'])
assert False
- # Enable the plugin, and run the task
+ # Enable memberof plugin
inst.plugins.enable(name=PLUGIN_MEMBER_OF)
+ #############################################################
+ # Test memberOf fixup arg validation: Test the DN and filter
+ #############################################################
+
+ #
+ # Test bad/nonexistant DN
+ #
+ TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK
+ try:
+ inst.add_s(Entry((TASK_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'basedn': DEFAULT_SUFFIX + "bad",
+ 'filter': 'objectclass=top'})))
+ except ldap.LDAPError as e:
+ log.fatal('test_memberof: Failed to add task(bad dn): error ' +
+ e.message['desc'])
+ assert False
+
+ exitcode = wait_for_task(inst, TASK_DN)
+ if exitcode == "0":
+ # We should an error
+ log.fatal('test_memberof: Task with invalid DN still reported success')
+ assert False
+
+ #
+ # Test invalid DN syntax
+ #
+ TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK
+ try:
+ inst.add_s(Entry((TASK_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'basedn': "bad",
+ 'filter': 'objectclass=top'})))
+ except ldap.LDAPError as e:
+ log.fatal('test_memberof: Failed to add task(invalid dn syntax): ' +
+ e.message['desc'])
+ assert False
+
+ exitcode = wait_for_task(inst, TASK_DN)
+ if exitcode == "0":
+ # We should an error
+ log.fatal('test_memberof: Task with invalid DN syntax still reported' +
+ ' success')
+ assert False
+
+ #
+ # Test bad filter (missing closing parenthesis)
+ #
+ TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK
+ try:
+ inst.add_s(Entry((TASK_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'basedn': DEFAULT_SUFFIX,
+ 'filter': '(objectclass=top'})))
+ except ldap.LDAPError as e:
+ log.fatal('test_memberof: Failed to add task(bad filter: error ' +
+ e.message['desc'])
+ assert False
+
+ exitcode = wait_for_task(inst, TASK_DN)
+ if exitcode == "0":
+ # We should an error
+ log.fatal('test_memberof: Task with invalid filter still reported ' +
+ 'success')
+ assert False
+
+ ####################################################
+ # Test fixup works
+ ####################################################
+
+ #
+ # Run the task and validate that it worked
+ #
TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK
try:
inst.add_s(Entry((TASK_DN, {
diff --git a/dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py b/dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py
index f98812c..a869e98 100644
--- a/dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py
+++ b/dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py
@@ -88,8 +88,9 @@ class DelUsers(threading.Thread):
try:
conn.delete_s(USER_DN)
except ldap.LDAPError as e:
- log.fatal('DeleteUsers: failed to delete (' + USER_DN + ') error: ' + e.message['desc'])
- assert False
+ if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN:
+ log.fatal('DeleteUsers: failed to delete (' + USER_DN + ') error: ' + e.message['desc'])
+ assert False
idx += 1
@@ -115,11 +116,10 @@ class AddUsers(threading.Thread):
conn.add_s(Entry((GROUP_DN,
{'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
'uid': 'user' + str(idx)})))
- except ldap.ALREADY_EXISTS:
- pass
except ldap.LDAPError as e:
- log.fatal('AddUsers: failed to add group (' + USER_DN + ') error: ' + e.message['desc'])
- assert False
+ if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN:
+ log.fatal('AddUsers: failed to add group (' + USER_DN + ') error: ' + e.message['desc'])
+ assert False
log.info('AddUsers - Adding ' + str(NUM_USERS) + ' entries (' + self.rdnval + ')...')
@@ -129,16 +129,18 @@ class AddUsers(threading.Thread):
conn.add_s(Entry((USER_DN, {'objectclass': 'top extensibleObject'.split(),
'uid': 'user' + str(idx)})))
except ldap.LDAPError as e:
- log.fatal('AddUsers: failed to add (' + USER_DN + ') error: ' + e.message['desc'])
- assert False
+ if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN:
+ log.fatal('AddUsers: failed to add (' + USER_DN + ') error: ' + e.message['desc'])
+ assert False
if self.addToGroup:
# Add the user to the group
try:
conn.modify_s(GROUP_DN, [(ldap.MOD_ADD, 'uniquemember', USER_DN)])
except ldap.LDAPError as e:
- log.fatal('AddUsers: Failed to add user' + USER_DN + ' to group: error ' + e.message['desc'])
- assert False
+ if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN:
+ log.fatal('AddUsers: Failed to add user' + USER_DN + ' to group: error ' + e.message['desc'])
+ assert False
idx += 1
diff --git a/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py b/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py
index 2411424..e55bc85 100644
--- a/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py
+++ b/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py
@@ -250,7 +250,8 @@ def test_dynamic_plugins(topology_st):
except:
log.info('Stress test failed!')
- repl_fail(replica_inst)
+ if replication_run:
+ repl_fail(replica_inst)
stress_count += 1
log.info('####################################################################')
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
index e0573a7..8028163 100644
--- a/ldap/servers/plugins/memberof/memberof.c
+++ b/ldap/servers/plugins/memberof/memberof.c
@@ -68,6 +68,13 @@ typedef struct _memberof_get_groups_data
Slapi_ValueSet **group_norm_vals;
} memberof_get_groups_data;
+typedef struct _task_data
+{
+ char *dn;
+ char *bind_dn;
+ char *filter_str;
+} task_data;
+
/*** function prototypes ***/
/* exported functions */
@@ -145,7 +152,7 @@ static void memberof_task_destructor(Slapi_Task *task);
static const char *fetch_attr(Slapi_Entry *e, const char *attrname,
const char *default_val);
static void memberof_fixup_task_thread(void *arg);
-static int memberof_fix_memberof(MemberOfConfig *config, char *dn, char *filter_str);
+static int memberof_fix_memberof(MemberOfConfig *config, Slapi_Task *task, task_data *td);
static int memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data);
static int memberof_entry_in_scope(MemberOfConfig *config, Slapi_DN *sdn);
static int memberof_add_objectclass(char *auto_add_oc, const char *dn);
@@ -2641,13 +2648,6 @@ void memberof_unlock()
}
}
-typedef struct _task_data
-{
- char *dn;
- char *bind_dn;
- char *filter_str;
-} task_data;
-
void memberof_fixup_task_thread(void *arg)
{
MemberOfConfig configCopy = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
@@ -2661,7 +2661,7 @@ void memberof_fixup_task_thread(void *arg)
}
slapi_task_inc_refcount(task);
slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
- "memberof_fixup_task_thread - refcount incremented.\n" );
+ "memberof_fixup_task_thread - refcount incremented.\n" );
/* Fetch our task data from the task */
td = (task_data *)slapi_task_get_data(task);
@@ -2672,7 +2672,8 @@ void memberof_fixup_task_thread(void *arg)
slapi_task_log_notice(task, "Memberof task starts (arg: %s) ...\n",
td->filter_str);
slapi_log_err(SLAPI_LOG_INFO, MEMBEROF_PLUGIN_SUBSYSTEM,
- "memberof_fixup_task_thread - Memberof task starts (arg: %s) ...\n", td->filter_str);
+ "memberof_fixup_task_thread - Memberof task starts (filter: \"%s\") ...\n",
+ td->filter_str);
/* We need to get the config lock first. Trying to get the
* config lock after we already hold the op lock can cause
@@ -2687,7 +2688,7 @@ void memberof_fixup_task_thread(void *arg)
if (usetxn) {
Slapi_DN *sdn = slapi_sdn_new_dn_byref(td->dn);
- Slapi_Backend *be = slapi_be_select(sdn);
+ Slapi_Backend *be = slapi_be_select_exact(sdn);
slapi_sdn_free(&sdn);
if (be) {
fixup_pb = slapi_pblock_new();
@@ -2695,12 +2696,17 @@ void memberof_fixup_task_thread(void *arg)
rc = slapi_back_transaction_begin(fixup_pb);
if (rc) {
slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM,
- "memberof_fixup_task_thread - Failed to start transaction\n");
+ "memberof_fixup_task_thread - Failed to start transaction\n");
+ goto done;
}
} else {
slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM,
- "memberof_fixup_task_thread - Failed to get be backend from %s\n",
- td->dn);
+ "memberof_fixup_task_thread - Failed to get be backend from (%s)\n",
+ td->dn);
+ slapi_task_log_notice(task, "Memberof task - Failed to get be backend from (%s)\n",
+ td->dn);
+ rc = -1;
+ goto done;
}
}
@@ -2708,13 +2714,14 @@ void memberof_fixup_task_thread(void *arg)
memberof_lock();
/* do real work */
- rc = memberof_fix_memberof(&configCopy, td->dn, td->filter_str);
+ rc = memberof_fix_memberof(&configCopy, task, td);
/* release the memberOf operation lock */
memberof_unlock();
+done:
if (usetxn && fixup_pb) {
- if (rc) { /* failes */
+ if (rc) { /* failed */
slapi_back_transaction_abort(fixup_pb);
} else {
slapi_back_transaction_commit(fixup_pb);
@@ -2726,8 +2733,6 @@ void memberof_fixup_task_thread(void *arg)
slapi_task_log_notice(task, "Memberof task finished.");
slapi_task_log_status(task, "Memberof task finished.");
slapi_task_inc_progress(task);
- slapi_log_err(SLAPI_LOG_INFO, MEMBEROF_PLUGIN_SUBSYSTEM,
- "memberof_fixup_task_thread - Memberof task finished (arg: %s) ...\n", td->filter_str);
/* this will queue the destruction of the task */
slapi_task_finish(task, rc);
@@ -2845,13 +2850,13 @@ memberof_task_destructor(Slapi_Task *task)
"memberof_task_destructor <--\n" );
}
-int memberof_fix_memberof(MemberOfConfig *config, char *dn, char *filter_str)
+int memberof_fix_memberof(MemberOfConfig *config, Slapi_Task *task, task_data *td)
{
int rc = 0;
Slapi_PBlock *search_pb = slapi_pblock_new();
- slapi_search_internal_set_pb(search_pb, dn,
- LDAP_SCOPE_SUBTREE, filter_str, 0, 0,
+ slapi_search_internal_set_pb(search_pb, td->dn,
+ LDAP_SCOPE_SUBTREE, td->filter_str, 0, 0,
0, 0,
memberof_get_plugin_id(),
0);
@@ -2860,6 +2865,16 @@ int memberof_fix_memberof(MemberOfConfig *config, char *dn, char *filter_str)
config,
0, memberof_fix_memberof_callback,
0);
+ if (rc){
+ char *errmsg;
+ int result;
+
+ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_RESULT, &result);
+ errmsg = ldap_err2string(result);
+ slapi_log_err(SLAPI_LOG_ERR, MEMBEROF_PLUGIN_SUBSYSTEM,
+ "memberof_fix_memberof - Failed (%s)\n", errmsg );
+ slapi_task_log_notice(task, "Memberof task failed (%s)\n", errmsg );
+ }
slapi_pblock_destroy(search_pb);
diff --git a/ldap/servers/slapd/plugin_internal_op.c b/ldap/servers/slapd/plugin_internal_op.c
index 8cbdf06..966a71f 100644
--- a/ldap/servers/slapd/plugin_internal_op.c
+++ b/ldap/servers/slapd/plugin_internal_op.c
@@ -726,7 +726,7 @@ search_internal_callback_pb (Slapi_PBlock *pb, void *callback_data,
if (ifstr == NULL || (scope != LDAP_SCOPE_BASE && scope != LDAP_SCOPE_ONELEVEL
&& scope != LDAP_SCOPE_SUBTREE))
{
- opresult = LDAP_PARAM_ERROR;
+ opresult = LDAP_PARAM_ERROR;
slapi_pblock_set(pb, SLAPI_PLUGIN_INTOP_RESULT, &opresult);
return -1;
}
@@ -743,19 +743,19 @@ search_internal_callback_pb (Slapi_PBlock *pb, void *callback_data,
op->o_search_referral_handler = internal_ref_entry_callback;
filter = slapi_str2filter((fstr = slapi_ch_strdup(ifstr)));
- if(scope == LDAP_SCOPE_BASE) {
- filter->f_flags |= (SLAPI_FILTER_LDAPSUBENTRY |
- SLAPI_FILTER_TOMBSTONE | SLAPI_FILTER_RUV);
+ if (NULL == filter) {
+ int result = LDAP_FILTER_ERROR;
+ send_ldap_result(pb, result, NULL, NULL, 0, NULL);
+ slapi_pblock_set(pb, SLAPI_PLUGIN_INTOP_RESULT, &result);
+ rc = -1;
+ goto done;
}
- if (NULL == filter)
- {
- send_ldap_result(pb, LDAP_FILTER_ERROR, NULL, NULL, 0, NULL);
- rc = -1;
- goto done;
+ if (scope == LDAP_SCOPE_BASE) {
+ filter->f_flags |= (SLAPI_FILTER_LDAPSUBENTRY |
+ SLAPI_FILTER_TOMBSTONE | SLAPI_FILTER_RUV);
}
filter_normalize(filter);
-
slapi_pblock_set(pb, SLAPI_SEARCH_FILTER, filter);
slapi_pblock_set(pb, SLAPI_REQCONTROLS, controls);
@@ -783,11 +783,8 @@ search_internal_callback_pb (Slapi_PBlock *pb, void *callback_data,
slapi_pblock_get(pb, SLAPI_SEARCH_FILTER, &filter);
done:
- slapi_ch_free((void **) & fstr);
- if (filter != NULL)
- {
- slapi_filter_free(filter, 1 /* recurse */);
- }
+ slapi_ch_free_string(&fstr);
+ slapi_filter_free(filter, 1 /* recurse */);
slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &tmp_attrs);
slapi_ch_array_free(tmp_attrs);
slapi_pblock_set(pb, SLAPI_SEARCH_ATTRS, NULL);
7 years, 4 months
Branch '389-ds-base-1.2.11' - ldap/admin
by Mark Reynolds
ldap/admin/src/scripts/template-db2index.pl.in | 4 ++--
ldap/admin/src/scripts/template-db2ldif.pl.in | 4 ++--
ldap/admin/src/scripts/template-ldif2db.pl.in | 4 ++--
3 files changed, 6 insertions(+), 6 deletions(-)
New commits:
commit 7211e65825ae59c79ad5c7cf2d00c18314e900fd
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Mon Dec 19 10:30:40 2016 -0500
Ticket 49070 - ldif2db.pl/db2ldif.pl/db2index.pl displays the wrong usage
Bug Description: The usage displays "-n instance", but this is misleading as
it's supposed to be the backend name.
Fix Description: Change the usage to use "backend".
https://fedorahosted.org/389/ticket/49070
Reviewed by: nhosoi(Thanks!)
diff --git a/ldap/admin/src/scripts/template-db2index.pl.in b/ldap/admin/src/scripts/template-db2index.pl.in
index 2423d36..741726e 100644
--- a/ldap/admin/src/scripts/template-db2index.pl.in
+++ b/ldap/admin/src/scripts/template-db2index.pl.in
@@ -44,12 +44,12 @@ use DSUtil qw(shellEscape);
sub usage {
print(STDERR "Usage: $0 [-v] -D rootdn { -w password | -w - | -j filename } \n");
- print(STDERR " -n instance [-t attributeName[:indextypes[:matchingrules]]]\n");
+ print(STDERR " -n backend [-t attributeName[:indextypes[:matchingrules]]]\n");
print(STDERR " Opts: -D rootdn - Directory Manager\n");
print(STDERR " : -w password - Directory Manager's password\n");
print(STDERR " : -w - - Prompt for Directory Manager's password\n");
print(STDERR " : -j filename - Read Directory Manager's password from file\n");
- print(STDERR " : -n instance - instance to be indexed\n");
+ print(STDERR " : -n backend - Backend database name to be indexed\n");
print(STDERR " : -t attributeName[:indextypes[:matchingrules]]\n");
print(STDERR " - attributeName: name of the attribute to be indexed\n");
print(STDERR " If omitted, all the indexes defined \n");
diff --git a/ldap/admin/src/scripts/template-db2ldif.pl.in b/ldap/admin/src/scripts/template-db2ldif.pl.in
index d1b1f39..ee9a07f 100644
--- a/ldap/admin/src/scripts/template-db2ldif.pl.in
+++ b/ldap/admin/src/scripts/template-db2ldif.pl.in
@@ -44,13 +44,13 @@ use DSUtil qw(shellEscape);
sub usage {
print(STDERR "Usage: $0 [-v] -D rootdn { -w password | -w - | -j filename } \n");
- print(STDERR " {-n instance}* | {-s include}* [{-x exclude}*] \n");
+ print(STDERR " {-n backend}* | {-s include}* [{-x exclude}*] \n");
print(STDERR " [-m] [-M] [-u] [-C] [-N] [-U] [-a filename]\n");
print(STDERR " Opts: -D rootdn - Directory Manager\n");
print(STDERR " : -w password - Directory Manager's password\n");
print(STDERR " : -w - - Prompt for Directory Manager's password\n");
print(STDERR " : -j filename - Read Directory Manager's password from file\n");
- print(STDERR " : -n instance - instance to be exported\n");
+ print(STDERR " : -n backend - Backend database name to be exported\n");
print(STDERR " : -a filename - output ldif file\n");
print(STDERR " : -s include - included suffix(es)\n");
print(STDERR " : -x exclude - excluded suffix(es)\n");
diff --git a/ldap/admin/src/scripts/template-ldif2db.pl.in b/ldap/admin/src/scripts/template-ldif2db.pl.in
index 5fff029..0338acf 100644
--- a/ldap/admin/src/scripts/template-ldif2db.pl.in
+++ b/ldap/admin/src/scripts/template-ldif2db.pl.in
@@ -44,13 +44,13 @@ use DSUtil qw(shellEscape);
sub usage {
print(STDERR "Usage: $0 [-v] -D rootdn { -w password | -w - | -j filename } \n");
- print(STDERR " -n instance | {-s include}* [{-x exclude}*] [-O] [-c]\n");
+ print(STDERR " -n backend | {-s include}* [{-x exclude}*] [-O] [-c]\n");
print(STDERR " [-g [string]] [-G namespace_id] {-i filename}*\n");
print(STDERR " Opts: -D rootdn - Directory Manager\n");
print(STDERR " : -w password - Directory Manager's password\n");
print(STDERR " : -w - - Prompt for Directory Manager's password\n");
print(STDERR " : -j filename - Read Directory Manager's password from file\n");
- print(STDERR " : -n instance - instance to be imported to\n");
+ print(STDERR " : -n backend - Backend database name to be imported to\n");
print(STDERR " : -i filename - input ldif file(s)\n");
print(STDERR " : -s include - included suffix\n");
print(STDERR " : -x exclude - excluded suffix(es)\n");
7 years, 4 months
Branch '389-ds-base-1.2.11' - ldap/servers
by Mark Reynolds
ldap/servers/slapd/back-ldbm/import.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
New commits:
commit 934c5608472788825289219f19380ed218f8c5a6
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Mon Dec 19 12:26:59 2016 -0500
Ticket 49071 - Import with duplicate DNs throws unexpected errors
Bug Description: When an import fails there are unable to flush error
messages.
Fix Description: When an import fails close the database files before
deleting them.
Also fixed a small issue in DSUtil where we did not properly
check if an entry was valid.
https://fedorahosted.org/389/ticket/49071
Reviewed by: mreynolds(one line commit rule)
(cherry picked from commit 64b1ebffe5af118965bcdf3a84d62c0fc3efd196)
(cherry picked from commit d2f46f5af86faa9f50b85eea8dea2df563501f28)
diff --git a/ldap/servers/slapd/back-ldbm/import.c b/ldap/servers/slapd/back-ldbm/import.c
index 81c3c15..bf70cf3 100644
--- a/ldap/servers/slapd/back-ldbm/import.c
+++ b/ldap/servers/slapd/back-ldbm/import.c
@@ -1427,11 +1427,11 @@ error:
}
}
if (0 != ret) {
+ dblayer_instance_close(job->inst->inst_be);
if (!(job->flags & FLAG_DRYRUN)) { /* If not dryrun */
/* if running in the dry run mode, don't touch the db */
dblayer_delete_instance_dir(be);
}
- dblayer_instance_close(job->inst->inst_be);
} else {
if (0 != (ret = dblayer_instance_close(job->inst->inst_be)) ) {
import_log_notice(job, "Failed to close database");
7 years, 4 months