ldap/servers
by William Brown
ldap/servers/slapd/tools/pwenc.c | 3 +++
1 file changed, 3 insertions(+)
New commits:
commit 6d50ba5712a04d49b44dce125dd245af154f86a0
Author: William Brown <firstyear(a)redhat.com>
Date: Wed Jul 13 15:59:26 2016 +1000
Ticket 48920 - Memory leak in pwdhash-bin
Bug Description: We have a memory leak in pwdhash-bin
==388==ERROR: LeakSanitizer: detected memory leaks
Direct leak of 72 byte(s) in 1 object(s) allocated from:
#0 0x7f5f5f94dfd0 in calloc (/lib64/libasan.so.3+0xc6fd0)
#1 0x7f5f5d7f72ae (/lib64/libnss3.so+0x752ae)
SUMMARY: AddressSanitizer: 72 byte(s) leaked in 1 allocation(s).
Fix Description: Properly shutdown NSS at the end of usage
https://fedorahosted.org/389/ticket/48920
Author: wibrown
Review by: nhosoi
diff --git a/ldap/servers/slapd/tools/pwenc.c b/ldap/servers/slapd/tools/pwenc.c
index 525cd15..f92136c 100644
--- a/ldap/servers/slapd/tools/pwenc.c
+++ b/ldap/servers/slapd/tools/pwenc.c
@@ -263,6 +263,9 @@ out:
plugin_closeall( 1 /* Close Backends */, 1 /* Close Globals */);
+ /* Shutdown NSS to free values */
+ (void)NSS_Shutdown();
+
return( rc == 0 ? 0 : 1 );
}
7 years, 9 months
ldap/servers
by Mark Reynolds
ldap/servers/slapd/back-ldbm/ldbm_index_config.c | 6 ++++++
1 file changed, 6 insertions(+)
New commits:
commit 6c77c37ae5ce847ffa2bd75287dbd157c2f2d6af
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Wed Jul 13 16:24:19 2016 -0400
Ticket 48922 - Fix crash when deleting backend while import is running
Bug Description: If you delete a backend from the config while an
import is running the server can crash.
Fix Description: When deleting a backend from the config wait for the
backend instance to not be busy before removing the
indexes. Otherwise the dbenv is not stable and this
can cause the crash.
https://fedorahosted.org/389/ticket/48922
Reviewed by: nhosoi(Thanks!)
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
index 3e59e72..c5ceacf 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
@@ -151,6 +151,12 @@ ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb, Slapi_Entry* e, Sla
rc = SLAPI_DSE_CALLBACK_ERROR;
goto bail;
}
+
+ while(is_instance_busy(inst)){
+ /* Wait for import/indexing job to complete */
+ DS_Sleep(PR_SecondsToInterval(1));
+ }
+
*returncode = LDAP_SUCCESS;
slapi_entry_attr_find(e, "cn", &attr);
7 years, 9 months
Branch '389-ds-base-1.3.4' - ldap/servers
by Mark Reynolds
ldap/servers/slapd/back-ldbm/ldbm_modify.c | 18 ++++++++++++++----
1 file changed, 14 insertions(+), 4 deletions(-)
New commits:
commit 96da572b21f81601d3a6ec73ab64b4370d6caa20
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Wed Jul 13 15:51:56 2016 -0400
Ticket 48924 - Fixup tombstone task needs to set proper flag when updating tombstones
Bug Description: The fixup tombstone task is not updating tombstones due to
TOMBSTONE_INCLUDE not being set when looking up the entry to
modify.
Fix Description: If fixing up tombstones called find_entry2modify_only_ext with
the TOMBSTONE_INCLUDED flag.
https://fedorahosted.org/389/ticket/48924
Reviewed by: nhosoi(Thanks!)
(cherry picked from commit 8cfb650170bbb4f6ce328b827dc294437ee38c4b)
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
index fecd3b8..27f5c74 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
@@ -465,9 +465,14 @@ ldbm_back_modify( Slapi_PBlock *pb )
*/
if ( MANAGE_ENTRY_BEFORE_DBLOCK(li)) {
/* find and lock the entry we are about to modify */
- if ( (e = find_entry2modify( pb, be, addr, &txn )) == NULL ) {
+ if (fixup_tombstone) {
+ e = find_entry2modify_only_ext( pb, be, addr, TOMBSTONE_INCLUDED, &txn );
+ } else {
+ e = find_entry2modify( pb, be, addr, &txn );
+ }
+ if (e == NULL) {
ldap_result_code= -1;
- goto error_return; /* error result sent by find_entry2modify() */
+ goto error_return; /* error result sent by find_entry2modify() */
}
}
@@ -545,9 +550,14 @@ ldbm_back_modify( Slapi_PBlock *pb )
if (0 == retry_count) { /* just once */
if ( !MANAGE_ENTRY_BEFORE_DBLOCK(li)) {
/* find and lock the entry we are about to modify */
- if ( (e = find_entry2modify( pb, be, addr, &txn )) == NULL ) {
+ if (fixup_tombstone) {
+ e = find_entry2modify_only_ext( pb, be, addr, TOMBSTONE_INCLUDED, &txn );
+ } else {
+ e = find_entry2modify( pb, be, addr, &txn );
+ }
+ if (e == NULL) {
ldap_result_code= -1;
- goto error_return; /* error result sent by find_entry2modify() */
+ goto error_return; /* error result sent by find_entry2modify() */
}
}
7 years, 9 months
ldap/servers
by Mark Reynolds
ldap/servers/slapd/back-ldbm/ldbm_modify.c | 18 ++++++++++++++----
1 file changed, 14 insertions(+), 4 deletions(-)
New commits:
commit 8cfb650170bbb4f6ce328b827dc294437ee38c4b
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Wed Jul 13 15:51:56 2016 -0400
Ticket 48924 - Fixup tombstone task needs to set proper flag when updating tombstones
Bug Description: The fixup tombstone task is not updating tombstones due to
TOMBSTONE_INCLUDE not being set when looking up the entry to
modify.
Fix Description: If fixing up tombstones called find_entry2modify_only_ext with
the TOMBSTONE_INCLUDED flag.
https://fedorahosted.org/389/ticket/48924
Reviewed by: nhosoi(Thanks!)
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
index 37225cd..9b3062c 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
@@ -465,9 +465,14 @@ ldbm_back_modify( Slapi_PBlock *pb )
*/
if ( MANAGE_ENTRY_BEFORE_DBLOCK(li)) {
/* find and lock the entry we are about to modify */
- if ( (e = find_entry2modify( pb, be, addr, &txn )) == NULL ) {
+ if (fixup_tombstone) {
+ e = find_entry2modify_only_ext( pb, be, addr, TOMBSTONE_INCLUDED, &txn );
+ } else {
+ e = find_entry2modify( pb, be, addr, &txn );
+ }
+ if (e == NULL) {
ldap_result_code= -1;
- goto error_return; /* error result sent by find_entry2modify() */
+ goto error_return; /* error result sent by find_entry2modify() */
}
}
@@ -545,9 +550,14 @@ ldbm_back_modify( Slapi_PBlock *pb )
if (0 == retry_count) { /* just once */
if ( !MANAGE_ENTRY_BEFORE_DBLOCK(li)) {
/* find and lock the entry we are about to modify */
- if ( (e = find_entry2modify( pb, be, addr, &txn )) == NULL ) {
+ if (fixup_tombstone) {
+ e = find_entry2modify_only_ext( pb, be, addr, TOMBSTONE_INCLUDED, &txn );
+ } else {
+ e = find_entry2modify( pb, be, addr, &txn );
+ }
+ if (e == NULL) {
ldap_result_code= -1;
- goto error_return; /* error result sent by find_entry2modify() */
+ goto error_return; /* error result sent by find_entry2modify() */
}
}
7 years, 9 months
ldap/admin
by Noriko Hosoi
ldap/admin/src/scripts/status-dirsrv.in | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
New commits:
commit a8b07cd2671c82421830ae94584b370436ef3434
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Mon Jul 11 19:09:24 2016 -0700
Ticket #48144 - Add /usr/sbin/status-dirsrv script to get the status of the directory server instance.
Description:
Analysis by vashirov(a)redhat.com:
https://bugzilla.redhat.com/show_bug.cgi?id=1209128#c11
The error comes from bash built-in `return`. bash version prior to
4.3 didn't support negative numbers as argument for `return`.
See for reference: http://wiki.bash-hackers.org/scripting/bashchanges
As suggested in the comment, instead of -1, 255 should be returned in
this error case:
> 255 is returned if the instance does not exist.
https://fedorahosted.org/389/ticket/48144
Viktor's proposal is reviewed by nhosoi(a)redhat.com.
diff --git a/ldap/admin/src/scripts/status-dirsrv.in b/ldap/admin/src/scripts/status-dirsrv.in
index 0f01eaa..9042899 100755
--- a/ldap/admin/src/scripts/status-dirsrv.in
+++ b/ldap/admin/src/scripts/status-dirsrv.in
@@ -11,14 +11,14 @@ status_instance() {
SERV_ID=$1
shift
- initfile=`get_init_file $initconfig_dir $SERV_ID` || { echo Instance $SERV_ID not found. ; return -1 ; }
+ initfile=`get_init_file $initconfig_dir $SERV_ID` || { echo Instance $SERV_ID not found. ; return 255 ; }
# source env. for this instance
if [ -f $initfile ] ; then
. $initfile
else
echo Instance $SERV_ID not found.
- return -1
+ return 255
fi
prefix="$DS_ROOT"
7 years, 9 months
ldap/schema
by William Brown
ldap/schema/50ns-directory.ldif | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
New commits:
commit f983a594b4a76f1e61af55d13c3ed7bef793078e
Author: William Brown <firstyear(a)redhat.com>
Date: Mon Jul 11 10:37:33 2016 +1000
Ticket 48912 - ntUserNtPassword schema
Bug Description: FreeRADIOS needs access to an NT hash password to work
with pure ldap. We should support this.
Fix Description: add ntUserNtPassword to schema for applications to be able to
set.
https://fedorahosted.org/389/ticket/48912
Author: wibrown
Review by: mreynolds (Thanks)
diff --git a/ldap/schema/50ns-directory.ldif b/ldap/schema/50ns-directory.ldif
index 062ac97..1f85398 100644
--- a/ldap/schema/50ns-directory.ldif
+++ b/ldap/schema/50ns-directory.ldif
@@ -77,6 +77,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.533 NAME 'ntUserCodePage' DESC 'Netscape
attributeTypes: ( 2.16.840.1.113730.3.1.534 NAME 'ntUserPrimaryGroupId' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-ORIGIN 'Netscape NT Synchronization' )
attributeTypes: ( 2.16.840.1.113730.3.1.535 NAME 'ntUserHomeDirDrive' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape NT Synchronization' )
attributeTypes: ( 2.16.840.1.113730.3.1.536 NAME 'ntGroupAttributes' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-ORIGIN 'Netscape NT Synchronization' )
+attributeTypes: ( 2.16.840.1.113730.3.1.2334 NAME 'ntUserNtPassword' DESC 'Netscape defined attribute type, synced or generated NT Password hash' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-ORIGIN 'Netscape NT Synchronization' )
attributeTypes: ( 2.16.840.1.113730.3.1.54 NAME 'replicaUseSSL' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.57 NAME 'replicaRoot' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.58 NAME 'replicaBindDn' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'Netscape Directory Server' )
@@ -84,7 +85,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.69 NAME 'subtreeACI' DESC 'Netscape defi
attributeTypes: ( 2.16.840.1.113730.3.1.2084 NAME 'nsSymmetricKey' DESC 'A symmetric key - currently used by attribute encryption' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 SINGLE-VALUE X-ORIGIN 'attribute encryption' )
objectClasses: ( 2.16.840.1.113730.3.2.23 NAME 'netscapeDirectoryServer' DESC 'Netscape defined objectclass' SUP top MUST ( objectclass ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( nsDirectoryServer-oid NAME 'nsDirectoryServer' DESC 'Netscape defined objectclass' SUP top MUST ( objectclass $ nsServerID ) MAY ( serverHostName $ nsServerPort $ nsSecureServerPort $ nsBindPassword $ nsBindDN $ nsBaseDN ) X-ORIGIN 'Netscape Directory Server' )
-objectClasses: ( 2.16.840.1.113730.3.2.8 NAME 'ntUser' DESC 'Netscape defined objectclass' SUP top MUST ( ntUserDomainId ) MAY ( description $ l $ ou $ seeAlso $ ntUserPriv $ ntUserHomeDir $ ntUserComment $ ntUserFlags $ ntUserScriptPath $ ntUserAuthFlags $ ntUserUsrComment $ ntUserParms $ ntUserWorkstations $ ntUserLastLogon $ ntUserLastLogoff $ ntUserAcctExpires $ ntUserMaxStorage $ ntUserUnitsPerWeek $ ntUserLogonHours $ ntUserBadPwCount $ ntUserNumLogons $ ntUserLogonServer $ ntUserCountryCode $ ntUserCodePage $ ntUserUniqueId $ ntUserPrimaryGroupId $ ntUserProfile $ ntUserHomeDirDrive $ ntUserPasswordExpired $ ntUserCreateNewAccount $ ntUserDeleteAccount $ ntUniqueId) X-ORIGIN 'Netscape NT Synchronization' )
+objectClasses: ( 2.16.840.1.113730.3.2.8 NAME 'ntUser' DESC 'Netscape defined objectclass' SUP top MUST ( ntUserDomainId ) MAY ( description $ l $ ou $ seeAlso $ ntUserPriv $ ntUserHomeDir $ ntUserComment $ ntUserFlags $ ntUserScriptPath $ ntUserAuthFlags $ ntUserUsrComment $ ntUserParms $ ntUserWorkstations $ ntUserLastLogon $ ntUserLastLogoff $ ntUserAcctExpires $ ntUserMaxStorage $ ntUserUnitsPerWeek $ ntUserLogonHours $ ntUserBadPwCount $ ntUserNumLogons $ ntUserLogonServer $ ntUserCountryCode $ ntUserCodePage $ ntUserUniqueId $ ntUserPrimaryGroupId $ ntUserProfile $ ntUserHomeDirDrive $ ntUserPasswordExpired $ ntUserCreateNewAccount $ ntUserDeleteAccount $ ntUniqueId $ ntUserNtPassword ) X-ORIGIN 'Netscape NT Synchronization' )
objectClasses: ( 2.16.840.1.113730.3.2.9 NAME 'ntGroup' DESC 'Netscape defined objectclass' SUP top MUST ( ntUserDomainId ) MAY ( description $ l $ ou $ seeAlso $ ntGroupId $ ntGroupAttributes $ ntGroupCreateNewGroup $ ntGroupDeleteGroup $ ntGroupType $ ntUniqueId $ mail ) X-ORIGIN 'Netscape NT Synchronization' )
objectClasses: ( 2.16.840.1.113730.3.2.82 NAME 'nsChangelog4Config' DESC 'Netscape defined objectclass' SUP top MAY ( cn ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.114 NAME 'nsConsumer4Config' DESC 'Netscape defined objectclass' SUP top MAY ( cn ) X-ORIGIN 'Netscape Directory Server' )
7 years, 9 months
dirsrvtests/tests .gitignore ldap/servers
by William Brown
.gitignore | 1
dirsrvtests/tests/tickets/ticket48916_test.py | 253 ++++++++++++++++++++++++++
ldap/servers/plugins/dna/dna.c | 40 +++-
3 files changed, 289 insertions(+), 5 deletions(-)
New commits:
commit 05ebb6d10cf0ec8e03c59bade7f819ddb1fdcf78
Author: William Brown <firstyear(a)redhat.com>
Date: Sat Jul 9 19:02:37 2016 +1000
Ticket 48916 - DNA Threshold set to 0 causes SIGFPE
Bug Description: If the DNA threshold was set to 0, a divide by zero would
occur when requesting ranges.
Fix Description: Prevent the config from setting a value of 0 for dna threshold.
If an existing site has a threshold of 0, we guard the divide operation, and
return an operations error instead.
https://fedorahosted.org/389/ticket/48916
Author: wibrown
Review by: nhosoi, mreynolds (Thank you!)
diff --git a/.gitignore b/.gitignore
index f6583c2..f92bcd8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,6 +5,7 @@ autom4te.cache
.cproject
.project
.settings
+.cache
*.a
*.dirstamp
*.la
diff --git a/dirsrvtests/tests/tickets/ticket48916_test.py b/dirsrvtests/tests/tickets/ticket48916_test.py
new file mode 100644
index 0000000..44c96da
--- /dev/null
+++ b/dirsrvtests/tests/tickets/ticket48916_test.py
@@ -0,0 +1,253 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+
+DEBUGGING = False
+
+if DEBUGGING:
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
+else:
+ logging.getLogger(__name__).setLevel(logging.INFO)
+
+
+log = logging.getLogger(__name__)
+
+
+class TopologyReplication(object):
+ """The Replication Topology Class"""
+ def __init__(self, master1, master2):
+ """Init"""
+ master1.open()
+ self.master1 = master1
+ master2.open()
+ self.master2 = master2
+
+
+(a)pytest.fixture(scope="module")
+def topology(request):
+ """Create Replication Deployment"""
+
+ # Creating master 1...
+ if DEBUGGING:
+ master1 = DirSrv(verbose=True)
+ else:
+ master1 = DirSrv(verbose=False)
+ args_instance[SER_HOST] = HOST_MASTER_1
+ args_instance[SER_PORT] = PORT_MASTER_1
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_master = args_instance.copy()
+ master1.allocate(args_master)
+ instance_master1 = master1.exists()
+ if instance_master1:
+ master1.delete()
+ master1.create()
+ master1.open()
+ master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
+
+ # Creating master 2...
+ if DEBUGGING:
+ master2 = DirSrv(verbose=True)
+ else:
+ master2 = DirSrv(verbose=False)
+ args_instance[SER_HOST] = HOST_MASTER_2
+ args_instance[SER_PORT] = PORT_MASTER_2
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_master = args_instance.copy()
+ master2.allocate(args_master)
+ instance_master2 = master2.exists()
+ if instance_master2:
+ master2.delete()
+ master2.create()
+ master2.open()
+ master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
+
+ #
+ # Create all the agreements
+ #
+ # Creating agreement from master 1 to master 2
+ properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
+ if not m1_m2_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m1_m2_agmt)
+
+ # Creating agreement from master 2 to master 1
+ properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
+ if not m2_m1_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m2_m1_agmt)
+
+ # Allow the replicas to get situated with the new agreements...
+ time.sleep(5)
+
+ #
+ # Initialize all the agreements
+ #
+ master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
+ master1.waitForReplInit(m1_m2_agmt)
+
+ # Check replication is working...
+ if master1.testReplication(DEFAULT_SUFFIX, master2):
+ log.info('Replication is working.')
+ else:
+ log.fatal('Replication is not working.')
+ assert False
+
+ def fin():
+ """If we are debugging just stop the instances, otherwise remove
+ them
+ """
+ if DEBUGGING:
+ master1.stop()
+ master2.stop()
+ else:
+ master1.delete()
+ master2.delete()
+
+ request.addfinalizer(fin)
+
+ # Clear out the tmp dir
+ master1.clearTmpDir(__file__)
+
+ return TopologyReplication(master1, master2)
+
+
+def _create_user(inst, idnum):
+ inst.add_s(Entry(
+ ('uid=user%s,ou=People,%s' % (idnum, DEFAULT_SUFFIX), {
+ 'objectClass' : 'top account posixAccount'.split(' '),
+ 'cn' : 'user',
+ 'uid' : 'user%s' % idnum,
+ 'homeDirectory' : '/home/user%s' % idnum,
+ 'loginShell' : '/bin/nologin',
+ 'gidNumber' : '-1',
+ 'uidNumber' : '-1',
+ })
+ ))
+
+def test_ticket48916(topology):
+ """
+ https://bugzilla.redhat.com/show_bug.cgi?id=1353629
+
+ This is an issue with ID exhaustion in DNA causing a crash.
+
+ To access each DirSrv instance use: topology.master1, topology.master2,
+ ..., topology.hub1, ..., topology.consumer1,...
+
+
+ """
+
+ if DEBUGGING:
+ # Add debugging steps(if any)...
+ pass
+
+ # Enable the plugin on both servers
+
+ dna_m1 = topology.master1.plugins.get('Distributed Numeric Assignment Plugin')
+ dna_m2 = topology.master2.plugins.get('Distributed Numeric Assignment Plugin')
+
+ # Configure it
+ # Create the container for the ranges to go into.
+
+ topology.master1.add_s(Entry(
+ ('ou=Ranges,%s' % DEFAULT_SUFFIX, {
+ 'objectClass' : 'top organizationalUnit'.split(' '),
+ 'ou' : 'Ranges',
+ })
+ ))
+
+ # Create the dnaAdmin?
+
+ # For now we just pinch the dn from the dna_m* types, and add the relevant child config
+ # but in the future, this could be a better plugin template type from lib389
+
+ config_dn = dna_m1.dn
+
+ topology.master1.add_s(Entry(
+ ('cn=uids,%s' % config_dn, {
+ 'objectClass' : 'top dnaPluginConfig'.split(' '),
+ 'cn': 'uids',
+ 'dnatype': 'uidNumber gidNumber'.split(' '),
+ 'dnafilter': '(objectclass=posixAccount)',
+ 'dnascope': '%s' % DEFAULT_SUFFIX,
+ 'dnaNextValue': '1',
+ 'dnaMaxValue': '50',
+ 'dnasharedcfgdn': 'ou=Ranges,%s' % DEFAULT_SUFFIX,
+ 'dnaThreshold': '0',
+ 'dnaRangeRequestTimeout': '60',
+ 'dnaMagicRegen': '-1',
+ 'dnaRemoteBindDN': 'uid=dnaAdmin,ou=People,%s' % DEFAULT_SUFFIX,
+ 'dnaRemoteBindCred': 'secret123',
+ 'dnaNextRange': '80-90'
+ })
+ ))
+
+ topology.master2.add_s(Entry(
+ ('cn=uids,%s' % config_dn, {
+ 'objectClass' : 'top dnaPluginConfig'.split(' '),
+ 'cn': 'uids',
+ 'dnatype': 'uidNumber gidNumber'.split(' '),
+ 'dnafilter': '(objectclass=posixAccount)',
+ 'dnascope': '%s' % DEFAULT_SUFFIX,
+ 'dnaNextValue': '61',
+ 'dnaMaxValue': '70',
+ 'dnasharedcfgdn': 'ou=Ranges,%s' % DEFAULT_SUFFIX,
+ 'dnaThreshold': '2',
+ 'dnaRangeRequestTimeout': '60',
+ 'dnaMagicRegen': '-1',
+ 'dnaRemoteBindDN': 'uid=dnaAdmin,ou=People,%s' % DEFAULT_SUFFIX,
+ 'dnaRemoteBindCred': 'secret123',
+ })
+ ))
+
+
+ # Enable the plugins
+ dna_m1.enable()
+ dna_m2.enable()
+
+ # Restart the instances
+ topology.master1.restart(60)
+ topology.master2.restart(60)
+
+ # Wait for a replication .....
+ time.sleep(40)
+
+ # Allocate the 10 members to exhaust
+
+ for i in range(1,11):
+ _create_user(topology.master2, i)
+
+ # Allocate the 11th
+ _create_user(topology.master2, 11)
+
+ log.info('Test PASSED')
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
+
diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c
index 2908443..cf640d8 100644
--- a/ldap/servers/plugins/dna/dna.c
+++ b/ldap/servers/plugins/dna/dna.c
@@ -1244,6 +1244,12 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry * e, int apply)
slapi_log_error(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM,
"----------> %s [%s]\n", DNA_THRESHOLD, value);
+ if (entry->threshold <= 0) {
+ entry->threshold = 1;
+ slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
+ "----------> %s too low, setting to [%s]\n", DNA_THRESHOLD, value);
+ }
+
slapi_ch_free_string(&value);
} else {
entry->threshold = 1;
@@ -2171,7 +2177,7 @@ static int dna_dn_is_config(char *dn)
int ret = 0;
slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM,
- "--> dna_is_config\n");
+ "--> dna_is_config %s\n", dn);
if (slapi_dn_issuffix(dn, getPluginDN())) {
ret = 1;
@@ -3404,18 +3410,21 @@ _dna_pre_op_add(Slapi_PBlock *pb, Slapi_Entry *e, char **errstr)
/* Did we already service all of these configured types? */
if (dna_list_contains_types(generated_types, config_entry->types)) {
+ slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " no types to act upon.\n");
goto next;
}
/* is the entry in scope? */
if (config_entry->scope &&
!slapi_dn_issuffix(dn, config_entry->scope)) {
+ slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " dn not in scope\n");
goto next;
}
/* is this entry in an excluded scope? */
for (i = 0; config_entry->excludescope && config_entry->excludescope[i]; i++) {
if (slapi_dn_issuffix(dn, slapi_sdn_get_dn(config_entry->excludescope[i]))) {
+ slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " dn in excluded scope\n");
goto next;
}
}
@@ -3424,7 +3433,8 @@ _dna_pre_op_add(Slapi_PBlock *pb, Slapi_Entry *e, char **errstr)
if (config_entry->slapi_filter) {
ret = slapi_vattr_filter_test(pb, e, config_entry->slapi_filter, 0);
if (LDAP_SUCCESS != ret) {
- goto next;
+ slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " dn does not match filter\n");
+ goto next;
}
}
@@ -3454,6 +3464,8 @@ _dna_pre_op_add(Slapi_PBlock *pb, Slapi_Entry *e, char **errstr)
}
if (types_to_generate && types_to_generate[0]) {
+
+ slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " adding %s to %s as -2\n", types_to_generate[0], dn);
/* add - add to entry */
for (i = 0; types_to_generate && types_to_generate[i]; i++) {
slapi_entry_attr_set_charptr(e, types_to_generate[i],
@@ -3492,6 +3504,7 @@ _dna_pre_op_add(Slapi_PBlock *pb, Slapi_Entry *e, char **errstr)
slapi_lock_mutex(config_entry->lock);
ret = dna_first_free_value(config_entry, &setval);
+ slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " retrieved value %" PRIu64 " ret %d\n", setval, ret);
if (LDAP_SUCCESS != ret) {
/* check if we overflowed the configured range */
if (setval > config_entry->maxval) {
@@ -4022,18 +4035,22 @@ static int dna_be_txn_pre_op(Slapi_PBlock *pb, int modtype)
"--> dna_be_txn_pre_op\n");
if (!slapi_plugin_running(pb)) {
+ slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, " --x bailing, plugin not running\n");
goto bail;
}
if (0 == (dn = dna_get_dn(pb))) {
+ slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, " --x bailing, is dna dn\n");
goto bail;
}
if (dna_dn_is_config(dn)) {
+ slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, " --x bailing is dna config dn\n");
goto bail;
}
if (dna_isrepl(pb)) {
+ slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, " --x bailing replicated operation\n");
/* if repl, the dna values should be already in the entry. */
goto bail;
}
@@ -4045,6 +4062,7 @@ static int dna_be_txn_pre_op(Slapi_PBlock *pb, int modtype)
}
if (e == NULL) {
+ slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, " --x bailing entry is NULL\n");
goto bail;
} else if (LDAP_CHANGETYPE_MODIFY == modtype) {
slapi_pblock_get(pb, SLAPI_MODIFY_MODS, &mods);
@@ -4056,32 +4074,39 @@ static int dna_be_txn_pre_op(Slapi_PBlock *pb, int modtype)
if (!PR_CLIST_IS_EMPTY(dna_global_config)) {
list = PR_LIST_HEAD(dna_global_config);
+ slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, " using global config...\n");
while (list != dna_global_config && LDAP_SUCCESS == ret) {
config_entry = (struct configEntry *) list;
/* Did we already service all of these configured types? */
if (dna_list_contains_types(generated_types, config_entry->types)) {
+ slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, " All types already serviced\n");
goto next;
}
/* is the entry in scope? */
if (config_entry->scope) {
- if (!slapi_dn_issuffix(dn, config_entry->scope))
+ if (!slapi_dn_issuffix(dn, config_entry->scope)) {
+ slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " Entry not in scope of dnaScope!\n");
goto next;
+ }
}
/* is this entry in an excluded scope? */
for (i = 0; config_entry->excludescope && config_entry->excludescope[i]; i++) {
if (slapi_dn_issuffix(dn, slapi_sdn_get_dn(config_entry->excludescope[i]))) {
+ slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " Entry in excluded scope, next\n");
goto next;
}
}
-
+
/* does the entry match the filter? */
if (config_entry->slapi_filter) {
- if(LDAP_SUCCESS != slapi_vattr_filter_test(pb,e,config_entry->slapi_filter, 0))
+ if(LDAP_SUCCESS != slapi_vattr_filter_test(pb,e,config_entry->slapi_filter, 0)) {
+ slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, " Entry does not match filter\n");
goto next;
+ }
}
if (LDAP_CHANGETYPE_ADD == modtype) {
@@ -4526,6 +4551,11 @@ dna_release_range(char *range_dn, PRUint64 *lower, PRUint64 *upper)
* it instead of from the active range */
if (config_entry->next_range_lower != 0) {
/* Release up to half of our values from the next range. */
+ if (config_entry->threshold == 0) {
+ ret = LDAP_UNWILLING_TO_PERFORM;
+ goto bail;
+ }
+
release = (((config_entry->next_range_upper - config_entry->next_range_lower + 1) /
2) / config_entry->threshold) * config_entry->threshold;
7 years, 9 months
rpm/389-ds-base.spec.in
by Noriko Hosoi
rpm/389-ds-base.spec.in | 10 ++++------
1 file changed, 4 insertions(+), 6 deletions(-)
New commits:
commit f593ae7790e3372c6812bfe59e58e6d709ec171f
Author: Viktor Ashirov <vashirov(a)redhat.com>
Date: Mon Jul 11 10:10:42 2016 +0200
Ticket #48918 - Upgrade to 389-ds-base >= 1.3.5.5 doesn't install 389-ds-base-snmp
Bug description:
During upgrade from 389-ds-base version <1.3.5.5 additional
package 389-ds-base-snmp is not installed.
Fix description:
Move "Obsoletes:" section from %description to %package.
https://fedorahosted.org/389/ticket/48918
Reviewed by: nhosoi(a)redhat.com.
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
index d08d379..0924cb5 100644
--- a/rpm/389-ds-base.spec.in
+++ b/rpm/389-ds-base.spec.in
@@ -47,6 +47,8 @@ Group: System Environment/Daemons
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
Obsoletes: %{name}-selinux
Conflicts: selinux-policy-base < 3.9.8
+# upgrade path from monolithic %{name} (including -libs & -devel) to %{name} + %{name}-snmp
+Obsoletes: %{name} <= 1.3.5.4
Requires: %{name}-libs = %{version}-%{release}
Provides: ldif2ldbm
@@ -152,9 +154,6 @@ isn't what you want. Please contact support immediately.
Please see http://seclists.org/oss-sec/2016/q1/363 for more information.
%endif
-# upgrade path from monolithic %{name} (including -libs & -devel) to %{name} + %{name}-snmp
-Obsoletes: %{name} <= 1.3.5.4
-
%package libs
Summary: Core libraries for 389 Directory Server (%{variant})
Group: System Environment/Daemons
@@ -213,13 +212,12 @@ Development Libraries and headers for the 389 Directory Server base package.
Summary: SNMP Agent for 389 Directory Server
Group: System Environment/Daemons
Requires: %{name} = %{version}-%{release}
+# upgrade path from monolithic %{name} (including -libs & -devel) to %{name} + %{name}-snmp
+Obsoletes: %{name} <= 1.3.5.4
%description snmp
SNMP Agent for the 389 Directory Server base package.
-# upgrade path from monolithic %{name} (including -libs & -devel) to %{name} + %{name}-snmp
-Obsoletes: %{name} <= 1.3.5.4
-
%package tests
Summary: The lib389 Continuous Integration Tests
Group: Development/Libraries
7 years, 9 months
ldap/servers
by Noriko Hosoi
ldap/servers/slapd/back-ldbm/dblayer.c | 95 +++++++++++++++++++++------------
1 file changed, 61 insertions(+), 34 deletions(-)
New commits:
commit ff997cd6fa5f2a0678721ba0b6a56fdce327feb0
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Sat Jul 9 18:36:17 2016 -0700
Ticket #48914 - db2bak.pl task enters infinitive loop when bak fs is almost full
Description: A backend helper function dblayer_copyfile returns an error
when any of the copy operation fails. But one of the caller functions
dblayer_backup ignored the error.
This patch checks the error returned from dblayer_copyfile and abort the
back-up.
Also, more error info is added to the log messages.
https://fedorahosted.org/389/ticket/48914
Reviewed by mreynolds(a)redhat.com (Thank you, Mark!!)
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
index 93d42be..783d104 100644
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
@@ -5643,18 +5643,16 @@ dblayer_copyfile(char *source, char *destination, int overwrite, int mode)
source_fd = OPEN_FUNCTION(source,O_RDONLY,0);
if (-1 == source_fd)
{
- LDAPDebug1Arg(LDAP_DEBUG_ANY,
- "dblayer_copyfile: failed to open source file: %s\n",
- source);
+ LDAPDebug2Args(LDAP_DEBUG_ANY, "dblayer_copyfile: failed to open source file %s by \"%s\"\n",
+ source, strerror(errno));
goto error;
}
/* Open destination file */
dest_fd = OPEN_FUNCTION(destination,O_CREAT | O_WRONLY, mode);
if (-1 == dest_fd)
{
- LDAPDebug1Arg(LDAP_DEBUG_ANY,
- "dblayer_copyfile: failed to open dest file: %s\n",
- destination);
+ LDAPDebug2Args(LDAP_DEBUG_ANY, "dblayer_copyfile: failed to open dest file %s by \"%s\"\n",
+ destination, strerror(errno));
goto error;
}
LDAPDebug2Args(LDAP_DEBUG_BACKLDBM,
@@ -5662,24 +5660,38 @@ dblayer_copyfile(char *source, char *destination, int overwrite, int mode)
/* Loop round reading data and writing it */
while (1)
{
+ int i;
+ char *ptr = NULL;
return_value = read(source_fd,buffer,64*1024);
- if (return_value <= 0)
- {
+ if (return_value <= 0) {
/* means error or EOF */
- if (return_value < 0)
- {
- LDAPDebug1Arg(LDAP_DEBUG_ANY,
- "dblayer_copyfile: failed to read: %d\n", errno);
+ if (return_value < 0) {
+ LDAPDebug2Args(LDAP_DEBUG_ANY, "dblayer_copyfile: failed to read by \"%s\": rval = %d\n",
+ strerror(errno), return_value);
}
break;
}
bytes_to_write = return_value;
- return_value = write(dest_fd,buffer,bytes_to_write);
- if (return_value != bytes_to_write)
- {
- /* means error */
- LDAPDebug1Arg(LDAP_DEBUG_ANY,
- "dblayer_copyfile: failed to write: %d\n", errno);
+ ptr = buffer;
+#define CPRETRY 4
+ for (i = 0; i < CPRETRY; i++) { /* retry twice */
+ return_value = write(dest_fd, ptr, bytes_to_write);
+ if (return_value == bytes_to_write) {
+ break;
+ } else {
+ /* means error */
+ LDAPDebug(LDAP_DEBUG_ANY, "dblayer_copyfile: failed to write by \"%s\"; real: %d bytes, exp: %d bytes\n",
+ strerror(errno), return_value, bytes_to_write);
+ if (return_value > 0) {
+ bytes_to_write -= return_value;
+ ptr += return_value;
+ LDAPDebug1Arg(LDAP_DEBUG_ANY, "dblayer_copyfile: retrying to write %d bytes\n", bytes_to_write);
+ } else {
+ break;
+ }
+ }
+ }
+ if ((CPRETRY == i) || (return_value < 0)) {
return_value = -1;
break;
}
@@ -5906,10 +5918,15 @@ dblayer_copy_directory(struct ldbminfo *li,
return_value = dblayer_copyfile(filename1, filename2,
0, priv->dblayer_file_mode);
}
+ if (return_value < 0) {
+ LDAPDebug2Args(LDAP_DEBUG_ANY, "dblayer_copy_directory: Failed to copy file %s to %s\n",
+ filename1, filename2);
+ slapi_ch_free((void**)&filename1);
+ slapi_ch_free((void**)&filename2);
+ break;
+ }
slapi_ch_free((void**)&filename1);
slapi_ch_free((void**)&filename2);
- if (0 > return_value)
- break;
(*cnt)++;
}
@@ -6165,9 +6182,14 @@ dblayer_backup(struct ldbminfo *li, char *dest_dir, Slapi_Task *task)
changelog_destdir, DBVERSION_FILENAME);
return_value = dblayer_copyfile(pathname1, pathname2,
0, priv->dblayer_file_mode);
- slapi_ch_free_string(&pathname1);
slapi_ch_free_string(&pathname2);
slapi_ch_free_string(&changelog_destdir);
+ if (0 > return_value) {
+ LDAPDebug1Arg(LDAP_DEBUG_ANY, "Backup: Failed to copy file %s\n", pathname1);
+ slapi_ch_free_string(&pathname1);
+ goto bail;
+ }
+ slapi_ch_free_string(&pathname1);
}
if (priv->dblayer_enable_transactions) {
/* now, get the list of logfiles that still exist */
@@ -6240,15 +6262,15 @@ dblayer_backup(struct ldbminfo *li, char *dest_dir, Slapi_Task *task)
return_value = dblayer_copyfile(pathname1, pathname2,
0, priv->dblayer_file_mode);
if (0 > return_value) {
- LDAPDebug2Args(LDAP_DEBUG_ANY, "Backup: error in "
- "copying file '%s' (err=%d) -- Starting over...\n",
- pathname1, return_value);
+ LDAPDebug2Args(LDAP_DEBUG_ANY, "Backup: error in copying file '%s' (err=%d)\n",
+ pathname1, return_value);
if (task) {
- slapi_task_log_notice(task,
- "Error copying file '%s' (err=%d) -- Starting "
- "over...", pathname1, return_value);
+ slapi_task_log_notice(task, "Error copying file '%s' (err=%d)",
+ pathname1, return_value);
}
- ok = 0;
+ slapi_ch_free((void **)&pathname1);
+ slapi_ch_free((void **)&pathname2);
+ goto bail;
}
if ( g_get_shutdown() || c_get_shutdown() ) {
LDAPDebug0Args(LDAP_DEBUG_ANY, "Backup aborted\n");
@@ -6276,9 +6298,8 @@ dblayer_backup(struct ldbminfo *li, char *dest_dir, Slapi_Task *task)
slapi_task_log_notice(task, "Backing up file %d (%s)", cnt, pathname2);
slapi_task_log_status(task, "Backing up file %d (%s)", cnt, pathname2);
}
- return_value =
- dblayer_copyfile(pathname1, pathname2, 0, priv->dblayer_file_mode);
- if (return_value) {
+ return_value = dblayer_copyfile(pathname1, pathname2, 0, priv->dblayer_file_mode);
+ if (0 > return_value) {
LDAPDebug(LDAP_DEBUG_ANY,
"Backup: error in copying version file "
"(%s -> %s): err=%d\n",
@@ -6458,11 +6479,12 @@ static int dblayer_copy_dirand_contents(char* src_dir, char* dst_dir, int mode,
slapi_task_log_status(task, "Moving file %s",
filename2);
}
- return_value = dblayer_copyfile(filename1, filename2, 0,
- mode);
+ return_value = dblayer_copyfile(filename1, filename2, 0, mode);
}
- if (0 > return_value)
+ if (0 > return_value) {
+ LDAPDebug1Arg(LDAP_DEBUG_ANY, "dblayer_copy_dirand_contents: failed to copy file %s\n", filename1);
break;
+ }
}
PR_CloseDir(dirhandle);
}
@@ -6838,6 +6860,10 @@ int dblayer_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task, char *
changelogdir, DBVERSION_FILENAME);
return_value = dblayer_copyfile(filename1, filename2,
0, priv->dblayer_file_mode);
+ if (0 > return_value) {
+ LDAPDebug1Arg(LDAP_DEBUG_ANY, "Restore: failed to copy file %s\n", filename1);
+ goto error_out;
+ }
}
continue;
}
@@ -6897,6 +6923,7 @@ int dblayer_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task, char *
return_value = dblayer_copyfile(filename1, filename2, 0,
priv->dblayer_file_mode);
if (0 > return_value) {
+ LDAPDebug1Arg(LDAP_DEBUG_ANY, "Restore: failed to copy file %s\n", filename1);
goto error_out;
}
cnt++;
7 years, 9 months
Branch '389-ds-base-1.3.1' - ldap/servers
by Mark Reynolds
ldap/servers/plugins/replication/repl5_inc_protocol.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
New commits:
commit 6dec417242238791cad12e2420ebb2a1602b8e8f
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Mon Jul 11 10:30:04 2016 -0400
Ticket 48767 - flow control in replication also blocks receiving results
Bug Description: In ticket 47942 a flow control was introduced to reduce
the load of a replication consumer. It adds some pauses
in the asynch sending of updates. Unfortunately while it
pauses it holds the reader lock, so that the result reader
thread is also paused.
Fix Description: If we need to pause the sending of updates then also release
the Result Data lock so the reader thread is not blocked.
https://fedorahosted.org/389/ticket/48767
Reviewed by: nhosi(Thanks!)
(cherry picked from commit ba636587e77423c7773df60894344dea0377c36f)
diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c
index e02e883..fd0e3e5 100644
--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c
@@ -472,9 +472,11 @@ repl5_inc_flow_control_results(Repl_Agmt *agmt, result_data *rd)
if ((rd->last_message_id_received <= rd->last_message_id_sent) &&
((rd->last_message_id_sent - rd->last_message_id_received) >= agmt_get_flowcontrolwindow(agmt))) {
rd->flowcontrol_detection++;
+ PR_Unlock(rd->lock);
DS_Sleep(PR_MillisecondsToInterval(agmt_get_flowcontrolpause(agmt)));
+ } else {
+ PR_Unlock(rd->lock);
}
- PR_Unlock(rd->lock);
}
static int
7 years, 9 months