Branch '389-ds-base-1.3.3' - dirsrvtests/suites dirsrvtests/tickets ldap/servers
by Mark Reynolds
dirsrvtests/suites/dynamic-plugins/plugin_tests.py | 486 ++++++++++---
dirsrvtests/suites/dynamic-plugins/stress_tests.py | 10
dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py | 463 +++++++++---
dirsrvtests/tickets/ticket47560_test.py | 2
ldap/servers/plugins/acctpolicy/acct_config.c | 8
ldap/servers/plugins/acctpolicy/acct_init.c | 99 ++
ldap/servers/plugins/acctpolicy/acct_plugin.c | 178 ++++
ldap/servers/plugins/acctpolicy/acct_util.c | 19
ldap/servers/plugins/acctpolicy/acctpolicy.h | 25
ldap/servers/plugins/linkedattrs/fixup_task.c | 4
ldap/servers/plugins/memberof/memberof_config.c | 1
ldap/servers/slapd/dse.c | 2
ldap/servers/slapd/plugin.c | 45 -
ldap/servers/slapd/slapi-plugin.h | 6
ldap/servers/slapd/thread_data.c | 29
15 files changed, 1112 insertions(+), 265 deletions(-)
New commits:
commit 7be03f890c1773727332686f6c164450d4a31996
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Tue Dec 23 21:40:00 2014 -0500
Ticket 47451 - Dynamic plugins - fixed thread synchronization
Description: Made various fixes and overall improvements to the dynamic
plugin feature,and Tthe CI test suite.
dirsrvtests/suites/dynamic-plugins/plugin_tests.py
dirsrvtests/suites/dynamic-plugins/stress_tests.py
dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py
- Improved/intensified stress test
- Improved task monitoring
- Added a replication run to the entire test suite
- Added tests for "shared config areas": MO & RI plugins
ldap/servers/plugins/acctpolicy/acct_config.c
ldap/servers/plugins/acctpolicy/acct_init.c
ldap/servers/plugins/acctpolicy/acct_plugin.c
ldap/servers/plugins/acctpolicy/acct_util.c
ldap/servers/plugins/acctpolicy/acctpolicy.h
- Added the necessary postop calls to check for config updates
ldap/servers/plugins/linkedattrs/fixup_task.c
- Fixed logging issue
ldap/servers/plugins/memberof/memberof_config.c
- Fixed double free/crash
ldap/servers/slapd/dse.c
- The ADD entry was incorrectly being set to NULL(memory leak)
ldap/servers/slapd/plugin.c
- Improved thread sychronization/fixed race condition
- Fixed memory leak when deleting plugin for the plugin config area
ldap/servers/slapd/slapi-plugin.h
ldap/servers/slapd/thread_data.c
- Revised plugin lock thread data wrappers
https://fedorahosted.org/389/ticket/47451
Jenkins: Passed
Valgrind: Passed
Reviewed by: nhosoi(Thanks!)
(cherry picked from commit 14e5422328d8f116916efb4a9e192b8db4686e44)
diff --git a/dirsrvtests/suites/dynamic-plugins/plugin_tests.py b/dirsrvtests/suites/dynamic-plugins/plugin_tests.py
index fa88145..e147be5 100644
--- a/dirsrvtests/suites/dynamic-plugins/plugin_tests.py
+++ b/dirsrvtests/suites/dynamic-plugins/plugin_tests.py
@@ -31,6 +31,7 @@ BRANCH2_DN = 'ou=branch2,' + DEFAULT_SUFFIX
GROUP_OU = 'ou=groups,' + DEFAULT_SUFFIX
PEOPLE_OU = 'ou=people,' + DEFAULT_SUFFIX
GROUP_DN = 'cn=group,' + DEFAULT_SUFFIX
+CONFIG_AREA = 'nsslapd-pluginConfigArea'
'''
Functional tests for each plugin
@@ -85,6 +86,35 @@ def test_dependency(inst, plugin):
################################################################################
#
+# Wait for task to complete
+#
+################################################################################
+def wait_for_task(conn, task_dn):
+ finished = False
+ count = 0
+ while count < 60:
+ try:
+ task_entry = conn.search_s(task_dn, ldap.SCOPE_BASE, 'objectclass=*')
+ if not task_entry:
+ log.fatal('wait_for_task: Search failed to find task: ' + task_dn)
+ assert False
+ if task_entry[0].hasAttr('nstaskexitcode'):
+ # task is done
+ finished = True
+ break
+ except ldap.LDAPError, e:
+ log.fatal('wait_for_task: Search failed: ' + e.message['desc'])
+ assert False
+
+ time.sleep(1)
+ count += 1
+ if not finished:
+ log.error('wait_for_task: Task (%s) did not complete!' % task_dn)
+ assert False
+
+
+################################################################################
+#
# Test Account Policy Plugin (0)
#
################################################################################
@@ -97,6 +127,7 @@ def test_acctpolicy(inst, args=None):
return True
CONFIG_DN = 'cn=config,cn=Account Policy Plugin,cn=plugins,cn=config'
+
log.info('Testing ' + PLUGIN_ACCT_POLICY + '...')
############################################################################
@@ -123,23 +154,12 @@ def test_acctpolicy(inst, args=None):
log.error('test_acctpolicy: Failed to add config entry: error ' + e.message['desc'])
assert False
- # Now set the config entry in the plugin entry
- #try:
- # inst.modify_s('cn=' + PLUGIN_ACCT_POLICY + ',cn=plugins,cn=config',
- # [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', CONFIG_DN)])
- #except ldap.LDAPError, e:
- # log.error('test_acctpolicy: Failed to set config entry in plugin entry: error ' + e.message['desc'])
- # assert False
-
############################################################################
# Test plugin
############################################################################
- # !!!! acctpolicy does have have a dse callabck to check for live updates - restart plugin for now !!!!
- inst.plugins.disable(name=PLUGIN_ACCT_POLICY)
- inst.plugins.enable(name=PLUGIN_ACCT_POLICY)
-
# Add an entry
+ time.sleep(1)
try:
inst.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(),
'sn': '1',
@@ -154,10 +174,11 @@ def test_acctpolicy(inst, args=None):
try:
inst.simple_bind_s(USER1_DN, "password")
except ldap.LDAPError, e:
- log.error('test_acctpolicy:Failed to bind as user1: ' + e.message['desc'])
+ log.error('test_acctpolicy: Failed to bind as user1: ' + e.message['desc'])
assert False
# Bind as Root DN
+ time.sleep(1)
try:
inst.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError, e:
@@ -185,14 +206,11 @@ def test_acctpolicy(inst, args=None):
log.error('test_acctpolicy: Failed to modify config entry: error ' + e.message['desc'])
assert False
- # !!!! must restart for now !!!!!
- inst.plugins.disable(name=PLUGIN_ACCT_POLICY)
- inst.plugins.enable(name=PLUGIN_ACCT_POLICY)
-
############################################################################
# Test plugin
############################################################################
+ time.sleep(1)
# login as user
try:
inst.simple_bind_s(USER1_DN, "password")
@@ -200,6 +218,7 @@ def test_acctpolicy(inst, args=None):
log.error('test_acctpolicy: Failed to bind(2nd) as user1: ' + e.message['desc'])
assert False
+ time.sleep(1)
# Bind as Root DN
try:
inst.simple_bind_s(DN_DM, PASSWORD)
@@ -498,7 +517,7 @@ def test_automember(inst, args=None):
log.error('test_automember: Failed to user3 to branch2: error ' + e.message['desc'])
assert False
- # Check the group - uniquemember sahould not exist
+ # Check the group - uniquemember should not exist
try:
entries = inst.search_s(GROUP_DN, ldap.SCOPE_BASE,
'(uniquemember=' + BUSER3_DN + ')')
@@ -512,9 +531,10 @@ def test_automember(inst, args=None):
# Enable plugin
inst.plugins.enable(name=PLUGIN_AUTOMEMBER)
+ TASK_DN = 'cn=task-' + str(int(time.time())) + ',cn=automember rebuild membership,cn=tasks,cn=config'
# Add the task
try:
- inst.add_s(Entry(('cn=task-' + str(int(time.time())) + ',cn=automember rebuild membership,cn=tasks,cn=config', {
+ inst.add_s(Entry((TASK_DN, {
'objectclass': 'top extensibleObject'.split(),
'basedn': 'ou=branch2,' + DEFAULT_SUFFIX,
'filter': 'objectclass=top'})))
@@ -522,7 +542,7 @@ def test_automember(inst, args=None):
log.error('test_automember: Failed to add task: error ' + e.message['desc'])
assert False
- time.sleep(3) # Wait for the task to do its work
+ wait_for_task(inst, TASK_DN)
# Verify the fixup task worked
try:
@@ -722,7 +742,7 @@ def test_dna(inst, args=None):
try:
inst.delete_s(USER1_DN)
except ldap.LDAPError, e:
- log.error('test_automember: Failed to delete test entry1: ' + e.message['desc'])
+ log.error('test_dna: Failed to delete test entry1: ' + e.message['desc'])
assert False
inst.plugins.disable(name=PLUGIN_DNA)
@@ -914,32 +934,11 @@ def test_linkedattrs(inst, args=None):
log.fatal('test_linkedattrs: Search for user1 failed: ' + e.message['desc'])
assert False
- # Verify that the task does not work yet(not until we enable the plugin)
- try:
- inst.add_s(Entry(('cn=task-' + str(int(time.time())) + ',cn=fixup linked attributes,cn=tasks,cn=config', {
- 'objectclass': 'top extensibleObject'.split(),
- 'basedn': DEFAULT_SUFFIX,
- 'filter': '(objectclass=top)'})))
- except ldap.LDAPError, e:
- log.error('test_linkedattrs: Failed to add task: error ' + e.message['desc'])
- assert False
-
- time.sleep(3) # Wait for the task to do, or not do, its work
-
- # The entry should still not have a manager attribute
- try:
- entries = inst.search_s(USER2_DN, ldap.SCOPE_BASE, '(manager=*)')
- if entries:
- log.fatal('test_linkedattrs: user2 incorrectly has a "manager" attr')
- assert False
- except ldap.LDAPError, e:
- log.fatal('test_linkedattrs: Search for user2 failed: ' + e.message['desc'])
- assert False
-
# Enable the plugin and rerun the task entry
inst.plugins.enable(name=PLUGIN_LINKED_ATTRS)
# Add the task again
+ TASK_DN = 'cn=task-' + str(int(time.time())) + ',cn=fixup linked attributes,cn=tasks,cn=config'
try:
inst.add_s(Entry(('cn=task-' + str(int(time.time())) + ',cn=fixup linked attributes,cn=tasks,cn=config', {
'objectclass': 'top extensibleObject'.split(),
@@ -949,7 +948,7 @@ def test_linkedattrs(inst, args=None):
log.error('test_linkedattrs: Failed to add task: error ' + e.message['desc'])
assert False
- time.sleep(3) # Wait for the task to do its work
+ wait_for_task(inst, TASK_DN)
# Check if user2 now has a manager attribute now
try:
@@ -1011,6 +1010,7 @@ def test_memberof(inst, args=None):
return
PLUGIN_DN = 'cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config'
+ SHARED_CONFIG_DN = 'cn=memberOf Config,' + DEFAULT_SUFFIX
log.info('Testing ' + PLUGIN_MEMBER_OF + '...')
@@ -1048,6 +1048,16 @@ def test_memberof(inst, args=None):
log.error('test_memberof: Failed to add group: error ' + e.message['desc'])
assert False
+ try:
+ inst.add_s(Entry((SHARED_CONFIG_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'memberofgroupattr': 'member',
+ 'memberofattr': 'memberof'
+ })))
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to shared config entry: error ' + e.message['desc'])
+ assert False
+
# Check if the user now has a "memberOf" attribute
try:
entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)')
@@ -1069,7 +1079,7 @@ def test_memberof(inst, args=None):
try:
entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)')
if entries:
- log.fatal('test_memberof: user1 incorrect has memberOf attr')
+ log.fatal('test_memberof: user1 incorrectly has memberOf attr')
assert False
except ldap.LDAPError, e:
log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc'])
@@ -1116,51 +1126,169 @@ def test_memberof(inst, args=None):
try:
entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)')
if entries:
- log.fatal('test_memberof: user1 incorrect has memberOf attr')
+ log.fatal('test_memberof: user1 incorrectly has memberOf attr')
assert False
except ldap.LDAPError, e:
log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc'])
assert False
############################################################################
- # Test Fixup Task
+ # Set the shared config entry and test the plugin
############################################################################
- inst.plugins.disable(name=PLUGIN_MEMBER_OF)
+ # The shared config entry uses "member" - the above test uses "uniquemember"
+ try:
+ inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, CONFIG_AREA, SHARED_CONFIG_DN)])
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to set plugin area: error ' + e.message['desc'])
+ assert False
+
+ # Delete the test entries then readd them to start with a clean slate
+ try:
+ inst.delete_s(USER1_DN)
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to delete test entry1: ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.delete_s(GROUP_DN)
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to delete test group: ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.add_s(Entry((USER1_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to add user1: error ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.add_s(Entry((GROUP_DN, {
+ 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
+ 'cn': 'group',
+ 'member': USER1_DN
+ })))
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to add group: error ' + e.message['desc'])
+ assert False
+
+ # Test the shared config
+ # Check if the user now has a "memberOf" attribute
+ try:
+ entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)')
+ if not entries:
+ log.fatal('test_memberof: user1 missing memberOf')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc'])
+ assert False
+
+ # Remove "member" should remove "memberOf" from the entry
+ try:
+ inst.modify_s(GROUP_DN, [(ldap.MOD_DELETE, 'member', None)])
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to delete member: error ' + e.message['desc'])
+ assert False
+
+ # Check that "memberOf" was removed
+ try:
+ entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)')
+ if entries:
+ log.fatal('test_memberof: user1 incorrectly has memberOf attr')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc'])
+ assert False
+
+ ############################################################################
+ # Change the shared config entry to use 'uniquemember' and test the plugin
+ ############################################################################
+
+ try:
+ inst.modify_s(SHARED_CONFIG_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'uniquemember')])
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to set shared plugin entry(uniquemember): error '
+ + e.message['desc'])
+ assert False
- # Add uniquemember, should not update USER1
try:
inst.modify_s(GROUP_DN, [(ldap.MOD_REPLACE, 'uniquemember', USER1_DN)])
except ldap.LDAPError, e:
log.error('test_memberof: Failed to add uniquemember: error ' + e.message['desc'])
assert False
- # Check for "memberOf"
+ # Check if the user now has a "memberOf" attribute
+ try:
+ entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)')
+ if not entries:
+ log.fatal('test_memberof: user1 missing memberOf')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc'])
+ assert False
+
+ # Remove "uniquemember" should remove "memberOf" from the entry
+ try:
+ inst.modify_s(GROUP_DN, [(ldap.MOD_DELETE, 'uniquemember', None)])
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to delete member: error ' + e.message['desc'])
+ assert False
+
+ # Check that "memberOf" was removed
try:
entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)')
if entries:
- log.fatal('test_memberof: user1 incorrect has memberOf attr')
+ log.fatal('test_memberof: user1 incorrectly has memberOf attr')
assert False
except ldap.LDAPError, e:
log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc'])
assert False
- # Run fixup task while plugin disabled - should not add "memberOf
- # Verify that the task does not work yet(not until we enable the plugin)
+ ############################################################################
+ # Remove shared config from plugin, and retest
+ ############################################################################
+
+ # First change the plugin to use member before we move the shared config that uses uniquemember
try:
- inst.add_s(Entry(('cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK, {
- 'objectclass': 'top extensibleObject'.split(),
- 'basedn': DEFAULT_SUFFIX,
- 'filter': 'objectclass=top'})))
- except ldap.NO_SUCH_OBJECT:
- pass
+ inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'member')])
except ldap.LDAPError, e:
- log.error('test_memberof: Failed to add task: error ' + e.message['desc'])
+ log.error('test_memberof: Failed to update config(uniquemember): error ' + e.message['desc'])
assert False
- time.sleep(3) # Wait for the task to do, or not do, its work
+ # Remove shared config from plugin
+ try:
+ inst.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, CONFIG_AREA, None)])
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to add uniquemember: error ' + e.message['desc'])
+ assert False
- # Check for "memberOf"
+ try:
+ inst.modify_s(GROUP_DN, [(ldap.MOD_REPLACE, 'member', USER1_DN)])
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to add uniquemember: error ' + e.message['desc'])
+ assert False
+
+ # Check if the user now has a "memberOf" attribute
+ try:
+ entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)')
+ if not entries:
+ log.fatal('test_memberof: user1 missing memberOf')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc'])
+ assert False
+
+ # Remove "uniquemember" should remove "memberOf" from the entry
+ try:
+ inst.modify_s(GROUP_DN, [(ldap.MOD_DELETE, 'member', None)])
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to delete member: error ' + e.message['desc'])
+ assert False
+
+ # Check that "memberOf" was removed
try:
entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)')
if entries:
@@ -1170,11 +1298,42 @@ def test_memberof(inst, args=None):
log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc'])
assert False
+ ############################################################################
+ # Test Fixup Task
+ ############################################################################
+
+ inst.plugins.disable(name=PLUGIN_MEMBER_OF)
+
+ # First change the plugin to use uniquemember
+ try:
+ inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'uniquemember')])
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to update config(uniquemember): error ' + e.message['desc'])
+ assert False
+
+ # Add uniquemember, should not update USER1
+ try:
+ inst.modify_s(GROUP_DN, [(ldap.MOD_REPLACE, 'uniquemember', USER1_DN)])
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to add uniquemember: error ' + e.message['desc'])
+ assert False
+
+ # Check for "memberOf"
+ try:
+ entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)')
+ if entries:
+ log.fatal('test_memberof: user1 incorrect has memberOf attr')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc'])
+ assert False
+
# Enable the plugin, and run the task
inst.plugins.enable(name=PLUGIN_MEMBER_OF)
+ TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK
try:
- inst.add_s(Entry(('cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK, {
+ inst.add_s(Entry((TASK_DN, {
'objectclass': 'top extensibleObject'.split(),
'basedn': DEFAULT_SUFFIX,
'filter': 'objectclass=top'})))
@@ -1182,7 +1341,7 @@ def test_memberof(inst, args=None):
log.error('test_memberof: Failed to add task: error ' + e.message['desc'])
assert False
- time.sleep(3) # Wait for the task to do its work
+ wait_for_task(inst, TASK_DN)
# Check for "memberOf"
try:
@@ -1216,6 +1375,12 @@ def test_memberof(inst, args=None):
log.error('test_memberof: Failed to delete test group: ' + e.message['desc'])
assert False
+ try:
+ inst.delete_s(SHARED_CONFIG_DN)
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to delete shared config entry: ' + e.message['desc'])
+ assert False
+
############################################################################
# Test passed
############################################################################
@@ -1286,9 +1451,6 @@ def test_mep(inst, args=None):
log.error('test_mep: Failed to add template entry: error ' + e.message['desc'])
assert False
- # log.info('geb.....')
- # time.sleep(30)
-
# Add the config entry
try:
inst.add_s(Entry((CONFIG_DN, {
@@ -1456,19 +1618,10 @@ def test_passthru(inst, args=None):
# Create second instance
passthru_inst = DirSrv(verbose=False)
- #if installation1_prefix:
- # args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Args for the master1 instance
- """
- args_instance[SER_HOST] = '127.0.0.1'
- args_instance[SER_PORT] = '33333'
- args_instance[SER_SERVERID_PROP] = 'passthru'
- """
- args_instance[SER_HOST] = 'localhost.localdomain'
+ # Args for the instance
+ args_instance[SER_HOST] = LOCALHOST
args_instance[SER_PORT] = 33333
args_instance[SER_SERVERID_PROP] = 'passthru'
-
args_instance[SER_CREATION_SUFFIX] = PASS_SUFFIX1
args_passthru_inst = args_instance.copy()
passthru_inst.allocate(args_passthru_inst)
@@ -1615,6 +1768,7 @@ def test_referint(inst, args=None):
log.info('Testing ' + PLUGIN_REFER_INTEGRITY + '...')
PLUGIN_DN = 'cn=' + PLUGIN_REFER_INTEGRITY + ',cn=plugins,cn=config'
+ SHARED_CONFIG_DN = 'cn=RI Config,' + DEFAULT_SUFFIX
############################################################################
# Configure plugin
@@ -1660,6 +1814,28 @@ def test_referint(inst, args=None):
log.error('test_referint: Failed to add group: error ' + e.message['desc'])
assert False
+ # Grab the referint log file from the plugin
+
+ try:
+ entries = inst.search_s(PLUGIN_DN, ldap.SCOPE_BASE, '(objectclass=top)')
+ REFERINT_LOGFILE = entries[0].getValue('referint-logfile')
+ except ldap.LDAPError, e:
+ log.fatal('test_referint: Unable to search plugin entry: ' + e.message['desc'])
+ assert False
+
+ # Add shared config entry
+ try:
+ inst.add_s(Entry((SHARED_CONFIG_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'referint-membership-attr': 'member',
+ 'referint-update-delay': '0',
+ 'referint-logfile': REFERINT_LOGFILE,
+ 'referint-logchanges': '0'
+ })))
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to shared config entry: error ' + e.message['desc'])
+ assert False
+
# Delete a user
try:
inst.delete_s(USER1_DN)
@@ -1709,6 +1885,150 @@ def test_referint(inst, args=None):
assert False
############################################################################
+ # Set the shared config entry and test the plugin
+ ############################################################################
+
+ # The shared config entry uses "member" - the above test used "uniquemember"
+ try:
+ inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, CONFIG_AREA, SHARED_CONFIG_DN)])
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to set plugin area: error ' + e.message['desc'])
+ assert False
+
+ # Delete the group, and readd everything
+ try:
+ inst.delete_s(GROUP_DN)
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to delete group: ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.add_s(Entry((USER1_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to add user1: error ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.add_s(Entry((USER2_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user2'
+ })))
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to add user2: error ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.add_s(Entry((GROUP_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'cn': 'group',
+ 'member': USER1_DN,
+ 'uniquemember': USER2_DN
+ })))
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to add group: error ' + e.message['desc'])
+ assert False
+
+ # Delete a user
+ try:
+ inst.delete_s(USER1_DN)
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to delete user1: ' + e.message['desc'])
+ assert False
+
+ # Check for integrity
+ try:
+ entry = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, '(member=' + USER1_DN + ')')
+ if entry:
+ log.error('test_referint: user1 was not removed from group')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('test_referint: Unable to search group: ' + e.message['desc'])
+ assert False
+
+ ############################################################################
+ # Change the shared config entry to use 'uniquemember' and test the plugin
+ ############################################################################
+
+ try:
+ inst.modify_s(SHARED_CONFIG_DN, [(ldap.MOD_REPLACE, 'referint-membership-attr', 'uniquemember')])
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to set shared plugin entry(uniquemember): error '
+ + e.message['desc'])
+ assert False
+
+ # Delete a user
+ try:
+ inst.delete_s(USER2_DN)
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to delete user1: ' + e.message['desc'])
+ assert False
+
+ # Check for integrity
+ try:
+ entry = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, '(uniquemember=' + USER2_DN + ')')
+ if entry:
+ log.error('test_referint: user2 was not removed from group')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('test_referint: Unable to search group: ' + e.message['desc'])
+ assert False
+
+ ############################################################################
+ # Remove shared config from plugin, and retest
+ ############################################################################
+
+ # First change the plugin to use member before we move the shared config that uses uniquemember
+ try:
+ inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'referint-membership-attr', 'member')])
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to update config(uniquemember): error ' + e.message['desc'])
+ assert False
+
+ # Remove shared config from plugin
+ try:
+ inst.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, CONFIG_AREA, None)])
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to add uniquemember: error ' + e.message['desc'])
+ assert False
+
+ # Add test user
+ try:
+ inst.add_s(Entry((USER1_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to add user1: error ' + e.message['desc'])
+ assert False
+
+ # Add user to group
+ try:
+ inst.modify_s(GROUP_DN, [(ldap.MOD_REPLACE, 'member', USER1_DN)])
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to add uniquemember: error ' + e.message['desc'])
+ assert False
+
+ # Delete a user
+ try:
+ inst.delete_s(USER1_DN)
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to delete user1: ' + e.message['desc'])
+ assert False
+
+ # Check for integrity
+ try:
+ entry = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, '(member=' + USER1_DN + ')')
+ if entry:
+ log.error('test_referint: user1 was not removed from group')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('test_referint: Unable to search group: ' + e.message['desc'])
+ assert False
+
+ ############################################################################
# Test plugin dependency
############################################################################
@@ -1721,7 +2041,13 @@ def test_referint(inst, args=None):
try:
inst.delete_s(GROUP_DN)
except ldap.LDAPError, e:
- log.error('test_referint: Failed to delete user1: ' + e.message['desc'])
+ log.error('test_referint: Failed to delete group: ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.delete_s(SHARED_CONFIG_DN)
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to delete shared config entry: ' + e.message['desc'])
assert False
############################################################################
@@ -1863,7 +2189,7 @@ def test_rootdn(inst, args=None):
'userpassword': 'password'
})))
except ldap.LDAPError, e:
- log.error('test_retrocl: Failed to add user1: error ' + e.message['desc'])
+ log.error('test_rootdn: Failed to add user1: error ' + e.message['desc'])
assert False
# Set an aci so we can modify the plugin after ew deny the root dn
diff --git a/dirsrvtests/suites/dynamic-plugins/stress_tests.py b/dirsrvtests/suites/dynamic-plugins/stress_tests.py
index a1f666d..f1a34b4 100644
--- a/dirsrvtests/suites/dynamic-plugins/stress_tests.py
+++ b/dirsrvtests/suites/dynamic-plugins/stress_tests.py
@@ -21,6 +21,7 @@ from constants import *
log = logging.getLogger(__name__)
NUM_USERS = 250
+GROUP_DN = 'cn=stress-group,' + DEFAULT_SUFFIX
def openConnection(inst):
@@ -58,6 +59,14 @@ def configureMO(inst):
assert False
+def cleanup(conn):
+ try:
+ conn.delete_s(GROUP_DN)
+ except ldap.LDAPError, e:
+ log.error('cleanup: failed to delete group (' + GROUP_DN + ') error: ' + e.message['desc'])
+ assert False
+
+
class DelUsers(threading.Thread):
def __init__(self, inst, rdnval):
threading.Thread.__init__(self)
@@ -97,7 +106,6 @@ class AddUsers(threading.Thread):
idx = 0
if self.addToGroup:
- GROUP_DN = 'cn=stress-group,' + DEFAULT_SUFFIX
try:
conn.add_s(Entry((GROUP_DN,
{'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
diff --git a/dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py b/dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py
index 3677fd5..288505b 100644
--- a/dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py
+++ b/dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py
@@ -31,6 +31,12 @@ class TopologyStandalone(object):
self.standalone = standalone
+def repl_fail(replica):
+ # remove replica instance, and assert failure
+ replica.delete()
+ assert False
+
+
@pytest.fixture(scope="module")
def topology(request):
'''
@@ -128,7 +134,10 @@ def test_dynamic_plugins(topology):
Test Dynamic Plugins - exercise each plugin and its main features, while
changing the configuration without restarting the server.
- Need to test: functionality, stability, and stress.
+ Need to test: functionality, stability, and stress. These tests need to run
+ with replication disabled, and with replication setup with a
+ second instance. Then test if replication is working, and we have
+ same entries on each side.
Functionality - Make sure that as configuration changes are made they take
effect immediately. Cross plugin interaction (e.g. automember/memberOf)
@@ -137,17 +146,21 @@ def test_dynamic_plugins(topology):
Memory Corruption - Restart the plugins many times, and in different orders and test
functionality, and stability. This will excerise the internal
- plugin linked lists, dse callabcks, and task handlers.
+ plugin linked lists, dse callbacks, and task handlers.
- Stress - Put the server under some type of load that is using a particular
- plugin for each operation, and then make changes to that plugin.
- The new changes should take effect, and the server should not crash.
+ Stress - Put the server under load that will trigger multiple plugins(MO, RI, DNA, etc)
+ Restart various plugins while these operations are going on. Perform this test
+ 5 times(stress_max_run).
"""
- ############################################################################
- # Test plugin functionality
- ############################################################################
+ REPLICA_PORT = 33334
+ RUV_FILTER = '(&(nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff)(objectclass=nstombstone))'
+ master_maxcsn = 0
+ replica_maxcsn = 0
+ msg = ' (no replication)'
+ replication_run = False
+ stress_max_runs = 5
# First enable dynamic plugins
try:
@@ -156,132 +169,337 @@ def test_dynamic_plugins(topology):
ldap.error('Failed to enable dynamic plugin!' + e.message['desc'])
assert False
- log.info('#####################################################')
- log.info('Testing Dynamic Plugins Functionality...')
- log.info('#####################################################\n')
-
- plugin_tests.test_all_plugins(topology.standalone)
-
- log.info('#####################################################')
- log.info('Successfully Tested Dynamic Plugins Functionality.')
- log.info('#####################################################\n')
-
- ############################################################################
- # Test the stability by exercising the internal lists, callabcks, and task handlers
- ############################################################################
-
- log.info('#####################################################')
- log.info('Testing Dynamic Plugins for Memory Corruption...')
- log.info('#####################################################\n')
- prev_plugin_test = None
- prev_prev_plugin_test = None
- for plugin_test in plugin_tests.func_tests:
+ while 1:
#
- # Restart the plugin several times (and prev plugins) - work that linked list
+ # First run the tests with replication disabled, then rerun them with replication set up
#
- plugin_test(topology.standalone, "restart")
- if prev_prev_plugin_test:
- prev_prev_plugin_test(topology.standalone, "restart")
+ ############################################################################
+ # Test plugin functionality
+ ############################################################################
+
+ log.info('####################################################################')
+ log.info('Testing Dynamic Plugins Functionality' + msg + '...')
+ log.info('####################################################################\n')
+
+ plugin_tests.test_all_plugins(topology.standalone)
+
+ log.info('####################################################################')
+ log.info('Successfully Tested Dynamic Plugins Functionality' + msg + '.')
+ log.info('####################################################################\n')
+
+ ############################################################################
+ # Test the stability by exercising the internal lists, callabcks, and task handlers
+ ############################################################################
+
+ log.info('####################################################################')
+ log.info('Testing Dynamic Plugins for Memory Corruption' + msg + '...')
+ log.info('####################################################################\n')
+ prev_plugin_test = None
+ prev_prev_plugin_test = None
+
+ for plugin_test in plugin_tests.func_tests:
+ #
+ # Restart the plugin several times (and prev plugins) - work that linked list
+ #
+ plugin_test(topology.standalone, "restart")
+
+ if prev_prev_plugin_test:
+ prev_prev_plugin_test(topology.standalone, "restart")
+
+ plugin_test(topology.standalone, "restart")
+
+ if prev_plugin_test:
+ prev_plugin_test(topology.standalone, "restart")
+
+ plugin_test(topology.standalone, "restart")
+
+ # Now run the functional test
+ plugin_test(topology.standalone)
+
+ # Set the previous tests
+ if prev_plugin_test:
+ prev_prev_plugin_test = prev_plugin_test
+ prev_plugin_test = plugin_test
+
+ log.info('####################################################################')
+ log.info('Successfully Tested Dynamic Plugins for Memory Corruption' + msg + '.')
+ log.info('####################################################################\n')
+
+ ############################################################################
+ # Stress two plugins while restarting it, and while restarting other plugins.
+ # The goal is to not crash, and have the plugins work after stressing them.
+ ############################################################################
+
+ log.info('####################################################################')
+ log.info('Stressing Dynamic Plugins' + msg + '...')
+ log.info('####################################################################\n')
+
+ stress_tests.configureMO(topology.standalone)
+ stress_tests.configureRI(topology.standalone)
+
+ stress_count = 0
+ while stress_count < stress_max_runs:
+ log.info('####################################################################')
+ log.info('Running stress test' + msg + '. Run (%d/%d)...' % (stress_count + 1, stress_max_runs))
+ log.info('####################################################################\n')
+
+ try:
+ # Launch three new threads to add a bunch of users
+ add_users = stress_tests.AddUsers(topology.standalone, 'employee', True)
+ add_users.start()
+ add_users2 = stress_tests.AddUsers(topology.standalone, 'entry', True)
+ add_users2.start()
+ add_users3 = stress_tests.AddUsers(topology.standalone, 'person', True)
+ add_users3.start()
+ time.sleep(1)
+
+ # While we are adding users restart the MO plugin and an idle plugin
+ topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ time.sleep(1)
+ topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ time.sleep(1)
+ topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
+ topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
+ time.sleep(1)
+ topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ time.sleep(2)
+ topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ time.sleep(1)
+ topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
+ topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
+ topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ time.sleep(1)
+ topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+
+ # Wait for the 'adding' threads to complete
+ add_users.join()
+ add_users2.join()
+ add_users3.join()
+
+ # Now launch three threads to delete the users
+ del_users = stress_tests.DelUsers(topology.standalone, 'employee')
+ del_users.start()
+ del_users2 = stress_tests.DelUsers(topology.standalone, 'entry')
+ del_users2.start()
+ del_users3 = stress_tests.DelUsers(topology.standalone, 'person')
+ del_users3.start()
+ time.sleep(1)
+
+ # Restart both the MO, RI plugins during these deletes, and an idle plugin
+ topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
+ topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
+ time.sleep(1)
+ topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
+ time.sleep(1)
+ topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ time.sleep(1)
+ topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ time.sleep(1)
+ topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
+ topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
+ topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
+ topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
+ topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
+ time.sleep(2)
+ topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
+ time.sleep(1)
+ topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ time.sleep(1)
+ topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ time.sleep(1)
+ topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
+ topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
+ topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
+
+ # Wait for the 'deleting' threads to complete
+ del_users.join()
+ del_users2.join()
+ del_users3.join()
+
+ # Now make sure both the MO and RI plugins still work correctly
+ plugin_tests.func_tests[8](topology.standalone) # RI plugin
+ plugin_tests.func_tests[5](topology.standalone) # MO plugin
+
+ # Cleanup the stress tests
+ stress_tests.cleanup(topology.standalone)
+
+ except:
+ log.info('Stress test failed!')
+ repl_fail(replica_inst)
+
+ stress_count += 1
+ log.info('####################################################################')
+ log.info('Successfully Stressed Dynamic Plugins' + msg +
+ '. Completed (%d/%d)' % (stress_count, stress_max_runs))
+ log.info('####################################################################\n')
+
+ if replication_run:
+ # We're done.
+ break
+ else:
+ #
+ # Enable replication and run everything one more time
+ #
+ log.info('Setting up replication, and rerunning the tests...\n')
+
+ # Create replica instance
+ replica_inst = DirSrv(verbose=False)
+ args_instance[SER_HOST] = LOCALHOST
+ args_instance[SER_PORT] = REPLICA_PORT
+ args_instance[SER_SERVERID_PROP] = 'replica'
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+
+ args_replica_inst = args_instance.copy()
+ replica_inst.allocate(args_replica_inst)
+ replica_inst.create()
+ replica_inst.open()
+
+ try:
+ topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX,
+ role=REPLICAROLE_MASTER,
+ replicaId=1)
+ replica_inst.replica.enableReplication(suffix=DEFAULT_SUFFIX,
+ role=REPLICAROLE_CONSUMER,
+ replicaId=65535)
+ properties = {RA_NAME: r'to_replica',
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+
+ repl_agreement = topology.standalone.agreement.create(suffix=DEFAULT_SUFFIX,
+ host=LOCALHOST,
+ port=REPLICA_PORT,
+ properties=properties)
+
+ if not repl_agreement:
+ log.fatal("Fail to create a replica agreement")
+ repl_fail(replica_inst)
+
+ topology.standalone.agreement.init(DEFAULT_SUFFIX, LOCALHOST, REPLICA_PORT)
+ topology.standalone.waitForReplInit(repl_agreement)
+ except:
+ log.info('Failed to setup replication!')
+ repl_fail(replica_inst)
+
+ replication_run = True
+ msg = ' (replication enabled)'
+ time.sleep(1)
- plugin_test(topology.standalone, "restart")
+ ############################################################################
+ # Check replication, and data are in sync, and remove the instance
+ ############################################################################
- if prev_plugin_test:
- prev_plugin_test(topology.standalone, "restart")
+ log.info('Checking if replication is in sync...')
- plugin_test(topology.standalone, "restart")
+ try:
+ # Grab master's max CSN
+ entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, RUV_FILTER)
+ if not entry:
+ log.error('Failed to find db tombstone entry from master')
+ repl_fail(replica_inst)
+ elements = entry[0].getValues('nsds50ruv')
+ for ruv in elements:
+ if 'replica 1' in ruv:
+ parts = ruv.split()
+ if len(parts) == 5:
+ master_maxcsn = parts[4]
+ break
+ else:
+ log.error('RUV is incomplete')
+ repl_fail(replica_inst)
+ if master_maxcsn == 0:
+ log.error('Failed to find maxcsn on master')
+ repl_fail(replica_inst)
- # Now run the functional test
- plugin_test(topology.standalone)
+ except ldap.LDAPError, e:
+ log.fatal('Unable to search masterfor db tombstone: ' + e.message['desc'])
+ repl_fail(replica_inst)
+
+ # Loop on the consumer - waiting for it to catch up
+ count = 0
+ insync = False
+ while count < 10:
+ try:
+ # Grab master's max CSN
+ entry = replica_inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, RUV_FILTER)
+ if not entry:
+ log.error('Failed to find db tombstone entry on consumer')
+ repl_fail(replica_inst)
+ elements = entry[0].getValues('nsds50ruv')
+ for ruv in elements:
+ if 'replica 1' in ruv:
+ parts = ruv.split()
+ if len(parts) == 5:
+ replica_maxcsn = parts[4]
+ break
+ if replica_maxcsn == 0:
+ log.error('Failed to find maxcsn on consumer')
+ repl_fail(replica_inst)
+ except ldap.LDAPError, e:
+ log.fatal('Unable to search for db tombstone on consumer: ' + e.message['desc'])
+ repl_fail(replica_inst)
+
+ if master_maxcsn == replica_maxcsn:
+ insync = True
+ log.info('Replication is in sync.\n')
+ break
+ count += 1
+ time.sleep(1)
+
+ # Report on replication status
+ if not insync:
+ log.error('Consumer not in sync with master!')
+ repl_fail(replica_inst)
- # Set the previous tests
- if prev_plugin_test:
- prev_prev_plugin_test = prev_plugin_test
- prev_plugin_test = plugin_test
+ #
+ # Verify the databases are identical. There should not be any "user, entry, employee" entries
+ #
+ log.info('Checking if the data is the same between the replicas...')
- log.info('#####################################################')
- log.info('Successfully Tested Dynamic Plugins for Memory Corruption.')
- log.info('#####################################################\n')
+ # Check the master
+ try:
+ entries = topology.standalone.search_s(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ "(|(uid=person*)(uid=entry*)(uid=employee*))")
+ if len(entries) > 0:
+ log.error('Master database has incorrect data set!\n')
+ repl_fail(replica_inst)
+ except ldap.LDAPError, e:
+ log.fatal('Unable to search db on master: ' + e.message['desc'])
+ repl_fail(replica_inst)
- ############################################################################
- # Stress two plugins while restarting it, and while restarting other plugins.
- # The goal is to not crash, and have the plugins work after stressing it.
- ############################################################################
+ # Check the consumer
+ try:
+ entries = replica_inst.search_s(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ "(|(uid=person*)(uid=entry*)(uid=employee*))")
+ if len(entries) > 0:
+ log.error('Consumer database in not consistent with master database')
+ repl_fail(replica_inst)
+ except ldap.LDAPError, e:
+ log.fatal('Unable to search db on consumer: ' + e.message['desc'])
+ repl_fail(replica_inst)
- log.info('#####################################################')
- log.info('Stressing Dynamic Plugins...')
- log.info('#####################################################\n')
+ log.info('Data is consistent across the replicas.\n')
- # Configure the plugins
- stress_tests.configureMO(topology.standalone)
- stress_tests.configureRI(topology.standalone)
-
- # Launch three new threads to add a bunch of users
- add_users = stress_tests.AddUsers(topology.standalone, 'user', True)
- add_users.start()
- add_users2 = stress_tests.AddUsers(topology.standalone, 'entry', True)
- add_users2.start()
- add_users3 = stress_tests.AddUsers(topology.standalone, 'person', True)
- add_users3.start()
- time.sleep(1)
-
- # While we are adding users restart the MO plugin
- topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
- time.sleep(3)
- topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
- time.sleep(1)
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
-
- # Restart idle plugin
- topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
- topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
-
- # Wait for the 'adding' threads to complete
- add_users.join()
- add_users2.join()
- add_users3.join()
-
- # Now launch three threads to delete the users, and restart both the MO and RI plugins
- del_users = stress_tests.DelUsers(topology.standalone, 'user')
- del_users.start()
- del_users2 = stress_tests.DelUsers(topology.standalone, 'entry')
- del_users2.start()
- del_users3 = stress_tests.DelUsers(topology.standalone, 'person')
- del_users3.start()
- time.sleep(1)
-
- # Restart the both the MO and RI plugins during these deletes
-
- topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
- topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
- topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
- time.sleep(3)
- topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
- time.sleep(1)
- topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
- time.sleep(1)
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
- time.sleep(1)
- topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
-
- # Restart idle plugin
- topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
- topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
-
- # Wait for the 'deleting' threads to complete
- del_users.join()
- del_users2.join()
- del_users3.join()
-
- # Now make sure both the MO and RI plugins still work
- plugin_tests.func_tests[8](topology.standalone) # RI plugin
- plugin_tests.func_tests[5](topology.standalone) # MO plugin
+ log.info('####################################################################')
+ log.info('Replication consistency test passed')
+ log.info('####################################################################\n')
- log.info('#####################################################')
- log.info('Successfully Stressed Dynamic Plugins.')
- log.info('#####################################################\n')
+ # Remove the replica instance
+ replica_inst.delete()
############################################################################
# We made it to the end!
@@ -291,7 +509,8 @@ def test_dynamic_plugins(topology):
log.info('#####################################################')
log.info("Dynamic Plugins Testsuite: Completed Successfully!")
log.info('#####################################################')
- log.info('#####################################################')
+ log.info('#####################################################\n')
+
def test_dynamic_plugins_final(topology):
topology.standalone.stop(timeout=10)
diff --git a/dirsrvtests/tickets/ticket47560_test.py b/dirsrvtests/tickets/ticket47560_test.py
index 0b7e436..af7fdc3 100644
--- a/dirsrvtests/tickets/ticket47560_test.py
+++ b/dirsrvtests/tickets/ticket47560_test.py
@@ -146,7 +146,7 @@ def test_ticket47560(topology):
Enable or disable mbo plugin depending on 'value' ('on'/'off')
"""
# enable/disable the mbo plugin
- if value != 'on':
+ if value == 'on':
topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
else:
topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
diff --git a/ldap/servers/plugins/acctpolicy/acct_config.c b/ldap/servers/plugins/acctpolicy/acct_config.c
index 25352b1..d1acf1a 100644
--- a/ldap/servers/plugins/acctpolicy/acct_config.c
+++ b/ldap/servers/plugins/acctpolicy/acct_config.c
@@ -53,9 +53,11 @@ acct_policy_load_config_startup( Slapi_PBlock* pb, void* plugin_id ) {
PLUGIN_CONFIG_DN, rc );
return( -1 );
}
-
+ config_wr_lock();
+ free_config();
newcfg = get_config();
rc = acct_policy_entry2config( config_entry, newcfg );
+ config_unlock();
slapi_entry_free( config_entry );
@@ -85,8 +87,8 @@ acct_policy_entry2config( Slapi_Entry *e, acctPluginCfg *newcfg ) {
} else if (!update_is_allowed_attr(newcfg->state_attr_name)) {
/* log a warning that this attribute cannot be updated */
slapi_log_error( SLAPI_LOG_FATAL, PLUGIN_NAME,
- "The configured state attribute [%s] cannot be updated, accounts will always become inactive.\n",
- newcfg->state_attr_name );
+ "The configured state attribute [%s] cannot be updated, accounts will always become inactive.\n",
+ newcfg->state_attr_name );
}
newcfg->alt_state_attr_name = get_attr_string_val( e, CFG_ALT_LASTLOGIN_STATE_ATTR );
diff --git a/ldap/servers/plugins/acctpolicy/acct_init.c b/ldap/servers/plugins/acctpolicy/acct_init.c
index c4dba22..0b1af91 100644
--- a/ldap/servers/plugins/acctpolicy/acct_init.c
+++ b/ldap/servers/plugins/acctpolicy/acct_init.c
@@ -63,6 +63,47 @@ int acct_postop_init( Slapi_PBlock *pb );
int acct_bind_preop( Slapi_PBlock *pb );
int acct_bind_postop( Slapi_PBlock *pb );
+static void *_PluginID = NULL;
+static Slapi_DN *_PluginDN = NULL;
+static Slapi_DN *_ConfigAreaDN = NULL;
+static Slapi_RWLock *config_rwlock = NULL;
+
+void
+acct_policy_set_plugin_id(void *pluginID)
+{
+ _PluginID = pluginID;
+}
+
+void *
+acct_policy_get_plugin_id()
+{
+ return _PluginID;
+}
+
+void
+acct_policy_set_plugin_sdn(Slapi_DN *pluginDN)
+{
+ _PluginDN = pluginDN;
+}
+
+Slapi_DN *
+acct_policy_get_plugin_sdn()
+{
+ return _PluginDN;
+}
+
+void
+acct_policy_set_config_area(Slapi_DN *sdn)
+{
+ _ConfigAreaDN = sdn;
+}
+
+Slapi_DN *
+acct_policy_get_config_area()
+{
+ return _ConfigAreaDN;
+}
+
/*
Master init function for the account plugin
*/
@@ -120,14 +161,32 @@ acct_policy_init( Slapi_PBlock *pb )
which is needed to retrieve the plugin configuration
*/
int
-acct_policy_start( Slapi_PBlock *pb ) {
+acct_policy_start( Slapi_PBlock *pb )
+{
acctPluginCfg *cfg;
void *plugin_id = get_identity();
+ Slapi_DN *plugindn = NULL;
+ char *config_area = NULL;
if(slapi_plugin_running(pb)){
return 0;
}
+ slapi_pblock_get(pb, SLAPI_TARGET_SDN, &plugindn);
+ acct_policy_set_plugin_sdn(slapi_sdn_dup(plugindn));
+
+ /* Set the alternate config area if one is defined. */
+ slapi_pblock_get(pb, SLAPI_PLUGIN_CONFIG_AREA, &config_area);
+ if (config_area) {
+ acct_policy_set_config_area(slapi_sdn_new_normdn_byval(config_area));
+ }
+
+ if(config_rwlock == NULL){
+ if((config_rwlock = slapi_new_rwlock()) == NULL){
+ return( CALLBACK_ERR );
+ }
+ }
+
/* Load plugin configuration */
if( acct_policy_load_config_startup( pb, plugin_id ) ) {
slapi_log_error( SLAPI_LOG_FATAL, PLUGIN_NAME,
@@ -151,6 +210,10 @@ acct_policy_close( Slapi_PBlock *pb )
{
int rc = 0;
+ slapi_destroy_rwlock(config_rwlock);
+ config_rwlock = NULL;
+ slapi_sdn_free(&_PluginDN);
+ slapi_sdn_free(&_ConfigAreaDN);
free_config();
return rc;
@@ -168,8 +231,11 @@ acct_preop_init( Slapi_PBlock *pb ) {
return( CALLBACK_ERR );
}
- if ( slapi_pblock_set( pb, SLAPI_PLUGIN_PRE_BIND_FN,
- (void *) acct_bind_preop ) != 0 ) {
+ if ( slapi_pblock_set( pb, SLAPI_PLUGIN_PRE_BIND_FN, (void *) acct_bind_preop ) != 0 ||
+ slapi_pblock_set(pb, SLAPI_PLUGIN_PRE_ADD_FN, (void *) acct_add_pre_op) != 0 ||
+ slapi_pblock_set(pb, SLAPI_PLUGIN_PRE_MODIFY_FN, (void *) acct_mod_pre_op) != 0 ||
+ slapi_pblock_set(pb, SLAPI_PLUGIN_PRE_DELETE_FN, (void *) acct_del_pre_op) != 0)
+ {
slapi_log_error( SLAPI_LOG_FATAL, PRE_PLUGIN_NAME,
"Failed to set plugin callback function\n" );
return( CALLBACK_ERR );
@@ -192,8 +258,11 @@ acct_postop_init( Slapi_PBlock *pb )
return( CALLBACK_ERR );
}
- if ( slapi_pblock_set( pb, SLAPI_PLUGIN_POST_BIND_FN,
- (void *)acct_bind_postop ) != 0 ) {
+
+ if ( slapi_pblock_set( pb, SLAPI_PLUGIN_POST_BIND_FN, (void *)acct_bind_postop ) != 0 ||
+ slapi_pblock_set(pb, SLAPI_PLUGIN_POST_ADD_FN, (void *) acct_post_op) != 0 ||
+ slapi_pblock_set(pb, SLAPI_PLUGIN_POST_MODIFY_FN, (void *) acct_post_op) != 0)
+ {
slapi_log_error( SLAPI_LOG_FATAL, POST_PLUGIN_NAME,
"Failed to set plugin callback function\n" );
return( CALLBACK_ERR );
@@ -208,3 +277,23 @@ acct_postop_init( Slapi_PBlock *pb )
return( CALLBACK_OK );
}
+/*
+ * Wrappers for config locking
+ */
+void
+config_rd_lock()
+{
+ slapi_rwlock_rdlock(config_rwlock);
+}
+
+void
+config_wr_lock()
+{
+ slapi_rwlock_wrlock(config_rwlock);
+}
+
+void
+config_unlock()
+{
+ slapi_rwlock_unlock(config_rwlock);
+}
diff --git a/ldap/servers/plugins/acctpolicy/acct_plugin.c b/ldap/servers/plugins/acctpolicy/acct_plugin.c
index 5719f27..a61a50c 100644
--- a/ldap/servers/plugins/acctpolicy/acct_plugin.c
+++ b/ldap/servers/plugins/acctpolicy/acct_plugin.c
@@ -28,6 +28,46 @@ Hewlett-Packard Development Company, L.P.
#include "acctpolicy.h"
/*
+ * acct_policy_dn_is_config()
+ *
+ * Checks if dn is a plugin config entry.
+ */
+static int
+acct_policy_dn_is_config(Slapi_DN *sdn)
+{
+ int ret = 0;
+
+ slapi_log_error(SLAPI_LOG_TRACE, PLUGIN_NAME,
+ "--> automember_dn_is_config\n");
+
+ if (sdn == NULL) {
+ goto bail;
+ }
+
+ /* If an alternate config area is configured, treat it's child
+ * entries as config entries. If the alternate config area is
+ * not configured, treat children of the top-level plug-in
+ * config entry as our config entries. */
+ if (acct_policy_get_config_area()) {
+ if (slapi_sdn_issuffix(sdn, acct_policy_get_config_area()) &&
+ slapi_sdn_compare(sdn, acct_policy_get_config_area())) {
+ ret = 1;
+ }
+ } else {
+ if (slapi_sdn_issuffix(sdn, acct_policy_get_plugin_sdn()) &&
+ slapi_sdn_compare(sdn, acct_policy_get_plugin_sdn())) {
+ ret = 1;
+ }
+ }
+
+bail:
+ slapi_log_error(SLAPI_LOG_TRACE, PLUGIN_NAME,
+ "<-- automember_dn_is_config\n");
+
+ return ret;
+}
+
+/*
Checks bind entry for last login state and compares current time with last
login time plus the limit to decide whether to deny the bind.
*/
@@ -39,6 +79,7 @@ acct_inact_limit( Slapi_PBlock *pb, const char *dn, Slapi_Entry *target_entry, a
int rc = 0; /* Optimistic default */
acctPluginCfg *cfg;
+ config_rd_lock();
cfg = get_config();
if( ( lasttimestr = get_attr_string_val( target_entry,
cfg->state_attr_name ) ) != NULL ) {
@@ -75,6 +116,7 @@ acct_inact_limit( Slapi_PBlock *pb, const char *dn, Slapi_Entry *target_entry, a
}
done:
+ config_unlock();
/* Deny bind; the account has exceeded the inactivity limit */
if( rc == 1 ) {
slapi_send_ldap_result( pb, LDAP_CONSTRAINT_VIOLATION, NULL,
@@ -106,13 +148,14 @@ acct_record_login( const char *dn )
Slapi_PBlock *modpb = NULL;
int skip_mod_attrs = 1; /* value doesn't matter as long as not NULL */
+ config_rd_lock();
cfg = get_config();
/* if we are not allowed to modify the state attr we're done
* this could be intentional, so just return
*/
if (! update_is_allowed_attr(cfg->always_record_login_attr) )
- return rc;
+ goto done;
plugin_id = get_identity();
@@ -152,6 +195,7 @@ acct_record_login( const char *dn )
}
done:
+ config_unlock();
slapi_pblock_destroy( modpb );
slapi_ch_free_string( ×tr );
@@ -274,6 +318,7 @@ acct_bind_postop( Slapi_PBlock *pb )
goto done;
}
+ config_rd_lock();
cfg = get_config();
tracklogin = cfg->always_record_login;
@@ -296,6 +341,7 @@ acct_bind_postop( Slapi_PBlock *pb )
}
}
}
+ config_unlock();
if( tracklogin ) {
rc = acct_record_login( dn );
@@ -319,3 +365,133 @@ done:
return( rc == 0 ? CALLBACK_OK : CALLBACK_ERR );
}
+
+static int acct_pre_op( Slapi_PBlock *pb, int modop )
+{
+ Slapi_DN *sdn = 0;
+ Slapi_Entry *e = 0;
+ Slapi_Mods *smods = 0;
+ LDAPMod **mods;
+ int free_entry = 0;
+ char *errstr = NULL;
+ int ret = SLAPI_PLUGIN_SUCCESS;
+
+ slapi_log_error(SLAPI_LOG_TRACE, PRE_PLUGIN_NAME, "--> acct_pre_op\n");
+
+ slapi_pblock_get(pb, SLAPI_TARGET_SDN, &sdn);
+
+ if (acct_policy_dn_is_config(sdn)) {
+ /* Validate config changes, but don't apply them.
+ * This allows us to reject invalid config changes
+ * here at the pre-op stage. Applying the config
+ * needs to be done at the post-op stage. */
+
+ if (LDAP_CHANGETYPE_ADD == modop) {
+ slapi_pblock_get(pb, SLAPI_ADD_ENTRY, &e);
+
+ /* If the entry doesn't exist, just bail and
+ * let the server handle it. */
+ if (e == NULL) {
+ goto bail;
+ }
+ } else if (LDAP_CHANGETYPE_MODIFY == modop) {
+ /* Fetch the entry being modified so we can
+ * create the resulting entry for validation. */
+ if (sdn) {
+ slapi_search_internal_get_entry(sdn, 0, &e, get_identity());
+ free_entry = 1;
+ }
+
+ /* If the entry doesn't exist, just bail and
+ * let the server handle it. */
+ if (e == NULL) {
+ goto bail;
+ }
+
+ /* Grab the mods. */
+ slapi_pblock_get(pb, SLAPI_MODIFY_MODS, &mods);
+ smods = slapi_mods_new();
+ slapi_mods_init_byref(smods, mods);
+
+ /* Apply the mods to create the resulting entry. */
+ if (mods && (slapi_entry_apply_mods(e, mods) != LDAP_SUCCESS)) {
+ /* The mods don't apply cleanly, so we just let this op go
+ * to let the main server handle it. */
+ goto bailmod;
+ }
+ } else if (modop == LDAP_CHANGETYPE_DELETE){
+ ret = LDAP_UNWILLING_TO_PERFORM;
+ slapi_log_error(SLAPI_LOG_FATAL, PRE_PLUGIN_NAME,
+ "acct_pre_op: can not delete plugin config entry [%d]\n", ret);
+ } else {
+ errstr = slapi_ch_smprintf("acct_pre_op: invalid op type %d", modop);
+ ret = LDAP_PARAM_ERROR;
+ goto bail;
+ }
+ }
+
+ bailmod:
+ /* Clean up smods. */
+ if (LDAP_CHANGETYPE_MODIFY == modop) {
+ slapi_mods_free(&smods);
+ }
+
+ bail:
+ if (free_entry && e)
+ slapi_entry_free(e);
+
+ if (ret) {
+ slapi_log_error(SLAPI_LOG_PLUGIN, PRE_PLUGIN_NAME,
+ "acct_pre_op: operation failure [%d]\n", ret);
+ slapi_send_ldap_result(pb, ret, NULL, errstr, 0, NULL);
+ slapi_ch_free((void **)&errstr);
+ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &ret);
+ ret = SLAPI_PLUGIN_FAILURE;
+ }
+
+ slapi_log_error(SLAPI_LOG_TRACE, PRE_PLUGIN_NAME, "<-- acct_pre_op\n");
+
+ return ret;
+}
+
+int
+acct_add_pre_op( Slapi_PBlock *pb )
+{
+ return acct_pre_op(pb, LDAP_CHANGETYPE_ADD);
+}
+
+int
+acct_mod_pre_op( Slapi_PBlock *pb )
+{
+ return acct_pre_op(pb, LDAP_CHANGETYPE_MODIFY);
+}
+
+int
+acct_del_pre_op( Slapi_PBlock *pb )
+{
+ return acct_pre_op(pb, LDAP_CHANGETYPE_DELETE);
+}
+
+int
+acct_post_op(Slapi_PBlock *pb)
+{
+ Slapi_DN *sdn = NULL;
+
+ slapi_log_error(SLAPI_LOG_TRACE, POST_PLUGIN_NAME,
+ "--> acct_policy_post_op\n");
+
+ slapi_pblock_get(pb, SLAPI_TARGET_SDN, &sdn);
+ if (acct_policy_dn_is_config(sdn)){
+ if( acct_policy_load_config_startup( pb, get_identity() ) ) {
+ slapi_log_error( SLAPI_LOG_FATAL, PLUGIN_NAME,
+ "acct_policy_start failed to load configuration\n" );
+ return( CALLBACK_ERR );
+ }
+ }
+
+ slapi_log_error(SLAPI_LOG_TRACE, POST_PLUGIN_NAME,
+ "<-- acct_policy_mod_post_op\n");
+
+ return SLAPI_PLUGIN_SUCCESS;
+}
+
diff --git a/ldap/servers/plugins/acctpolicy/acct_util.c b/ldap/servers/plugins/acctpolicy/acct_util.c
index 2e24da2..cff0176 100644
--- a/ldap/servers/plugins/acctpolicy/acct_util.c
+++ b/ldap/servers/plugins/acctpolicy/acct_util.c
@@ -82,7 +82,8 @@ get_attr_string_val( Slapi_Entry* target_entry, char* attr_name ) {
*/
int
get_acctpolicy( Slapi_PBlock *pb, Slapi_Entry *target_entry, void *plugin_id,
- acctPolicy **policy ) {
+ acctPolicy **policy )
+{
Slapi_DN *sdn = NULL;
Slapi_Entry *policy_entry = NULL;
Slapi_Attr *attr;
@@ -93,8 +94,6 @@ get_acctpolicy( Slapi_PBlock *pb, Slapi_Entry *target_entry, void *plugin_id,
acctPluginCfg *cfg;
int rc = 0;
- cfg = get_config();
-
if( policy == NULL ) {
/* Bad parameter */
return( -1 );
@@ -102,19 +101,22 @@ get_acctpolicy( Slapi_PBlock *pb, Slapi_Entry *target_entry, void *plugin_id,
*policy = NULL;
+ config_rd_lock();
+ cfg = get_config();
/* Return success and NULL policy */
policy_dn = get_attr_string_val( target_entry, cfg->spec_attr_name );
if( policy_dn == NULL ) {
slapi_log_error( SLAPI_LOG_PLUGIN, PLUGIN_NAME,
"\"%s\" is not governed by an account inactivity "
"policy subentry\n", slapi_entry_get_ndn( target_entry ) );
- if (cfg->inactivitylimit != ULONG_MAX) {
- goto dopolicy;
- }
+ if (cfg->inactivitylimit != ULONG_MAX) {
+ goto dopolicy;
+ }
slapi_log_error( SLAPI_LOG_PLUGIN, PLUGIN_NAME,
"\"%s\" is not governed by an account inactivity "
"global policy\n", slapi_entry_get_ndn( target_entry ) );
- return rc;
+ config_unlock();
+ return rc;
}
sdn = slapi_sdn_new_dn_byref( policy_dn );
@@ -153,7 +155,8 @@ dopolicy:
}
}
done:
- slapi_ch_free_string( &policy_dn );
+ config_unlock();
+ slapi_ch_free_string( &policy_dn );
slapi_entry_free( policy_entry );
return( rc );
}
diff --git a/ldap/servers/plugins/acctpolicy/acctpolicy.h b/ldap/servers/plugins/acctpolicy/acctpolicy.h
index 2185b95..64f37fb 100644
--- a/ldap/servers/plugins/acctpolicy/acctpolicy.h
+++ b/ldap/servers/plugins/acctpolicy/acctpolicy.h
@@ -69,10 +69,9 @@ typedef struct accountpolicy {
int get_acctpolicy( Slapi_PBlock *pb, Slapi_Entry *target_entry,
void *plugin_id, acctPolicy **policy );
void free_acctpolicy( acctPolicy **policy );
-int has_attr( Slapi_Entry* target_entry, char* attr_name,
- char** val );
+int has_attr( Slapi_Entry* target_entry, char* attr_name, char** val );
char* get_attr_string_val( Slapi_Entry* e, char* attr_name );
-void* get_identity();
+void* get_identity(void);
void set_identity(void*);
time_t gentimeToEpochtime( char *gentimestr );
char* epochtimeToGentime( time_t epochtime );
@@ -80,6 +79,22 @@ int update_is_allowed_attr (const char *attr);
/* acct_config.c */
int acct_policy_load_config_startup( Slapi_PBlock* pb, void* plugin_id );
-acctPluginCfg* get_config();
-void free_config();
+acctPluginCfg* get_config(void);
+void free_config(void);
+
+/* acct_init.c */
+void acct_policy_set_plugin_sdn(Slapi_DN *pluginDN);
+Slapi_DN * acct_policy_get_plugin_sdn(void);
+void acct_policy_set_config_area(Slapi_DN *sdn);
+Slapi_DN * acct_policy_get_config_area(void);
+void config_rd_lock(void);
+void config_wr_lock(void);
+void config_unlock(void);
+
+/* acc_plugins.c */
+int acct_add_pre_op( Slapi_PBlock *pb );
+int acct_mod_pre_op( Slapi_PBlock *pb );
+int acct_del_pre_op( Slapi_PBlock *pb );
+int acct_post_op( Slapi_PBlock *pb );
+
diff --git a/ldap/servers/plugins/linkedattrs/fixup_task.c b/ldap/servers/plugins/linkedattrs/fixup_task.c
index db3c693..f3f5c04 100644
--- a/ldap/servers/plugins/linkedattrs/fixup_task.c
+++ b/ldap/servers/plugins/linkedattrs/fixup_task.c
@@ -197,8 +197,8 @@ linked_attrs_fixup_task_thread(void *arg)
linked_attrs_unlock();
/* Log finished message. */
- slapi_task_log_notice(task, "Linked attributes fixup task complete.\n");
- slapi_task_log_status(task, "Linked attributes fixup task complete.\n");
+ slapi_task_log_notice(task, "Linked attributes fixup task complete.");
+ slapi_task_log_status(task, "Linked attributes fixup task complete.");
slapi_log_error(SLAPI_LOG_FATAL, LINK_PLUGIN_SUBSYSTEM, "Linked attributes fixup task complete.\n");
slapi_task_inc_progress(task);
diff --git a/ldap/servers/plugins/memberof/memberof_config.c b/ldap/servers/plugins/memberof/memberof_config.c
index 012e2d0..7fa5897 100644
--- a/ldap/servers/plugins/memberof/memberof_config.c
+++ b/ldap/servers/plugins/memberof/memberof_config.c
@@ -867,7 +867,6 @@ memberof_shared_config_validate(Slapi_PBlock *pb)
}
slapi_ch_free_string(&configarea_dn);
slapi_sdn_free(&config_sdn);
- slapi_entry_free(config_entry);
}
}
}
diff --git a/ldap/servers/slapd/dse.c b/ldap/servers/slapd/dse.c
index f0ce255..f80178e 100644
--- a/ldap/servers/slapd/dse.c
+++ b/ldap/servers/slapd/dse.c
@@ -2426,8 +2426,6 @@ dse_add(Slapi_PBlock *pb) /* JCM There should only be one exit point from this f
}
}
- /* entry has been freed, so make sure no one tries to use it later */
- slapi_pblock_set(pb, SLAPI_ADD_ENTRY, NULL);
slapi_send_ldap_result(pb, returncode, NULL, returntext[0] ? returntext : NULL, 0, NULL );
return dse_add_return(rc, e);
}
diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c
index 5530c70..b0b18e7 100644
--- a/ldap/servers/slapd/plugin.c
+++ b/ldap/servers/slapd/plugin.c
@@ -454,10 +454,21 @@ plugin_call_plugins( Slapi_PBlock *pb, int whichfunction )
{
/* We stash the pblock plugin pointer to preserve the callers context */
struct slapdplugin *p;
+ int locked = 0;
+
+ locked = slapi_td_get_plugin_locked();
+ if (!locked) {
+ slapi_rwlock_rdlock(global_rwlock);
+ }
+
slapi_pblock_get(pb, SLAPI_PLUGIN, &p);
/* Call the operation on the Global Plugins */
rc = plugin_call_list(global_plugin_list[plugin_list_number], whichfunction, pb);
slapi_pblock_set(pb, SLAPI_PLUGIN, p);
+
+ if (!locked) {
+ slapi_rwlock_unlock(global_rwlock);
+ }
}
else
{
@@ -1080,12 +1091,6 @@ plugin_start(Slapi_Entry *entry, char *returntext)
int ret = 0;
int i = 0;
- /*
- * Disable registered plugin functions so preops/postops/etc
- * dont get called prior to the plugin being started (due to
- * plugins performing ops on the DIT)
- */
- global_plugin_callbacks_enabled = 0;
global_plugins_started = 0;
/* Count the plugins so we can allocate memory for the config array */
@@ -1404,6 +1409,7 @@ plugin_free_plugin_dep_config(plugin_dep_config **cfg)
}
slapi_ch_free_string(&config[index].type);
slapi_ch_free_string(&config[index].name);
+ slapi_ch_free_string(&config[index].config_area);
pblock_done(&config[index].pb);
index++;
}
@@ -1909,16 +1915,6 @@ plugin_call_func (struct slapdplugin *list, int operation, Slapi_PBlock *pb, int
int rc;
int return_value = 0;
int count = 0;
- int *locked = 0;
-
- /*
- * Take the read lock
- */
- slapi_td_get_plugin_locked(&locked);
- if(locked == 0){
- slapi_rwlock_rdlock(global_rwlock);
- }
-
for (; list != NULL; list = list->plg_next)
{
@@ -1998,9 +1994,6 @@ plugin_call_func (struct slapdplugin *list, int operation, Slapi_PBlock *pb, int
if(call_one)
break;
}
- if(locked == 0){
- slapi_rwlock_unlock(global_rwlock);
- }
return( return_value );
}
@@ -2323,6 +2316,7 @@ plugin_restart(Slapi_Entry *pentryBefore, Slapi_Entry *pentryAfter)
}
slapi_rwlock_wrlock(global_rwlock);
+ slapi_td_set_plugin_locked();
if(plugin_delete(pentryBefore, returntext, 1) == LDAP_SUCCESS){
if(plugin_add(pentryAfter, returntext, 1) == LDAP_SUCCESS){
@@ -2346,6 +2340,7 @@ plugin_restart(Slapi_Entry *pentryBefore, Slapi_Entry *pentryAfter)
}
slapi_rwlock_unlock(global_rwlock);
+ slapi_td_set_plugin_unlocked();
return rc;
}
@@ -2995,12 +2990,11 @@ int
plugin_add(Slapi_Entry *entry, char *returntext, int locked)
{
int rc = LDAP_SUCCESS;
- int td_locked = 1;
if(!locked){
slapi_rwlock_wrlock(global_rwlock);
+ slapi_td_set_plugin_locked();
}
- slapi_td_set_plugin_locked(&td_locked);
if((rc = plugin_setup(entry, 0, 0, 1, returntext)) != LDAP_SUCCESS){
LDAPDebug(LDAP_DEBUG_PLUGIN, "plugin_add: plugin_setup failed for (%s)\n",slapi_entry_get_dn(entry), rc, 0);
@@ -3015,9 +3009,8 @@ plugin_add(Slapi_Entry *entry, char *returntext, int locked)
done:
if(!locked){
slapi_rwlock_unlock(global_rwlock);
+ slapi_td_set_plugin_unlocked();
}
- td_locked = 0;
- slapi_td_set_plugin_locked(&td_locked);
return rc;
}
@@ -3372,7 +3365,6 @@ plugin_delete(Slapi_Entry *plugin_entry, char *returntext, int locked)
struct slapdplugin *plugin = NULL;
const char *plugin_dn = slapi_entry_get_dn_const(plugin_entry);
char *value = NULL;
- int td_locked = 1;
int removed = PLUGIN_BUSY;
int type = 0;
int rc = LDAP_SUCCESS;
@@ -3400,8 +3392,8 @@ plugin_delete(Slapi_Entry *plugin_entry, char *returntext, int locked)
removed = PLUGIN_NOT_FOUND;
if(!locked){
slapi_rwlock_wrlock(global_rwlock);
+ slapi_td_set_plugin_locked();
}
- slapi_td_set_plugin_locked(&td_locked);
rc = plugin_get_type_and_list(value, &type, &plugin_list);
if ( rc != 0 ) {
@@ -3445,9 +3437,8 @@ plugin_delete(Slapi_Entry *plugin_entry, char *returntext, int locked)
unlock:
if(!locked){
slapi_rwlock_unlock(global_rwlock);
+ slapi_td_set_plugin_unlocked();
}
- td_locked = 0;
- slapi_td_set_plugin_locked(&td_locked);
}
}
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index cb8aad0..a61d954 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -5585,8 +5585,10 @@ int slapi_td_dn_init(void);
int slapi_td_set_dn(char *dn);
void slapi_td_get_dn(char **dn);
int slapi_td_plugin_lock_init(void);
-int slapi_td_set_plugin_locked(int *value);
-void slapi_td_get_plugin_locked(int **value);
+int slapi_td_get_plugin_locked(void);
+int slapi_td_set_plugin_locked(void);
+int slapi_td_set_plugin_unlocked(void);
+
/* Thread Local Storage Index Types */
#define SLAPI_TD_REQUESTOR_DN 1
diff --git a/ldap/servers/slapd/thread_data.c b/ldap/servers/slapd/thread_data.c
index 121e2d8..4d9bb93 100644
--- a/ldap/servers/slapd/thread_data.c
+++ b/ldap/servers/slapd/thread_data.c
@@ -168,19 +168,38 @@ slapi_td_plugin_lock_init()
}
int
-slapi_td_set_plugin_locked(int *value)
+slapi_td_set_plugin_locked()
{
- if(slapi_td_set_val(SLAPI_TD_PLUGIN_LIST_LOCK, (void *)value) == PR_FAILURE){
+ int val = 12345;
+
+ if(slapi_td_set_val(SLAPI_TD_PLUGIN_LIST_LOCK, (void *)&val) == PR_FAILURE){
return PR_FAILURE;
}
return PR_SUCCESS;
}
-void
-slapi_td_get_plugin_locked(int **value)
+int
+slapi_td_set_plugin_unlocked()
{
- slapi_td_get_val(SLAPI_TD_PLUGIN_LIST_LOCK, (void **)value);
+ if(slapi_td_set_val(SLAPI_TD_PLUGIN_LIST_LOCK, NULL) == PR_FAILURE){
+ return PR_FAILURE;
+ }
+
+ return PR_SUCCESS;
+}
+
+int
+slapi_td_get_plugin_locked()
+{
+ int *value = 0;
+
+ slapi_td_get_val(SLAPI_TD_PLUGIN_LIST_LOCK, (void **)&value);
+ if(value){
+ return 1;
+ } else{
+ return 0;
+ }
}
/* requestor dn */
8 years, 5 months
dirsrvtests/suites dirsrvtests/tickets ldap/servers
by Mark Reynolds
dirsrvtests/suites/dynamic-plugins/plugin_tests.py | 486 ++++++++++---
dirsrvtests/suites/dynamic-plugins/stress_tests.py | 10
dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py | 463 +++++++++---
dirsrvtests/tickets/ticket47560_test.py | 2
ldap/servers/plugins/acctpolicy/acct_config.c | 8
ldap/servers/plugins/acctpolicy/acct_init.c | 99 ++
ldap/servers/plugins/acctpolicy/acct_plugin.c | 178 ++++
ldap/servers/plugins/acctpolicy/acct_util.c | 19
ldap/servers/plugins/acctpolicy/acctpolicy.h | 25
ldap/servers/plugins/linkedattrs/fixup_task.c | 4
ldap/servers/plugins/memberof/memberof_config.c | 1
ldap/servers/slapd/dse.c | 2
ldap/servers/slapd/plugin.c | 45 -
ldap/servers/slapd/slapi-plugin.h | 6
ldap/servers/slapd/thread_data.c | 29
15 files changed, 1112 insertions(+), 265 deletions(-)
New commits:
commit 14e5422328d8f116916efb4a9e192b8db4686e44
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Tue Dec 23 21:40:00 2014 -0500
Ticket 47451 - Dynamic plugins - fixed thread synchronization
Description: Made various fixes and overall improvements to the dynamic
plugin feature,and Tthe CI test suite.
dirsrvtests/suites/dynamic-plugins/plugin_tests.py
dirsrvtests/suites/dynamic-plugins/stress_tests.py
dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py
- Improved/intensified stress test
- Improved task monitoring
- Added a replication run to the entire test suite
- Added tests for "shared config areas": MO & RI plugins
ldap/servers/plugins/acctpolicy/acct_config.c
ldap/servers/plugins/acctpolicy/acct_init.c
ldap/servers/plugins/acctpolicy/acct_plugin.c
ldap/servers/plugins/acctpolicy/acct_util.c
ldap/servers/plugins/acctpolicy/acctpolicy.h
- Added the necessary postop calls to check for config updates
ldap/servers/plugins/linkedattrs/fixup_task.c
- Fixed logging issue
ldap/servers/plugins/memberof/memberof_config.c
- Fixed double free/crash
ldap/servers/slapd/dse.c
- The ADD entry was incorrectly being set to NULL(memory leak)
ldap/servers/slapd/plugin.c
- Improved thread sychronization/fixed race condition
- Fixed memory leak when deleting plugin for the plugin config area
ldap/servers/slapd/slapi-plugin.h
ldap/servers/slapd/thread_data.c
- Revised plugin lock thread data wrappers
https://fedorahosted.org/389/ticket/47451
Jenkins: Passed
Valgrind: Passed
Reviewed by: nhosoi(Thanks!)
diff --git a/dirsrvtests/suites/dynamic-plugins/plugin_tests.py b/dirsrvtests/suites/dynamic-plugins/plugin_tests.py
index fa88145..e147be5 100644
--- a/dirsrvtests/suites/dynamic-plugins/plugin_tests.py
+++ b/dirsrvtests/suites/dynamic-plugins/plugin_tests.py
@@ -31,6 +31,7 @@ BRANCH2_DN = 'ou=branch2,' + DEFAULT_SUFFIX
GROUP_OU = 'ou=groups,' + DEFAULT_SUFFIX
PEOPLE_OU = 'ou=people,' + DEFAULT_SUFFIX
GROUP_DN = 'cn=group,' + DEFAULT_SUFFIX
+CONFIG_AREA = 'nsslapd-pluginConfigArea'
'''
Functional tests for each plugin
@@ -85,6 +86,35 @@ def test_dependency(inst, plugin):
################################################################################
#
+# Wait for task to complete
+#
+################################################################################
+def wait_for_task(conn, task_dn):
+ finished = False
+ count = 0
+ while count < 60:
+ try:
+ task_entry = conn.search_s(task_dn, ldap.SCOPE_BASE, 'objectclass=*')
+ if not task_entry:
+ log.fatal('wait_for_task: Search failed to find task: ' + task_dn)
+ assert False
+ if task_entry[0].hasAttr('nstaskexitcode'):
+ # task is done
+ finished = True
+ break
+ except ldap.LDAPError, e:
+ log.fatal('wait_for_task: Search failed: ' + e.message['desc'])
+ assert False
+
+ time.sleep(1)
+ count += 1
+ if not finished:
+ log.error('wait_for_task: Task (%s) did not complete!' % task_dn)
+ assert False
+
+
+################################################################################
+#
# Test Account Policy Plugin (0)
#
################################################################################
@@ -97,6 +127,7 @@ def test_acctpolicy(inst, args=None):
return True
CONFIG_DN = 'cn=config,cn=Account Policy Plugin,cn=plugins,cn=config'
+
log.info('Testing ' + PLUGIN_ACCT_POLICY + '...')
############################################################################
@@ -123,23 +154,12 @@ def test_acctpolicy(inst, args=None):
log.error('test_acctpolicy: Failed to add config entry: error ' + e.message['desc'])
assert False
- # Now set the config entry in the plugin entry
- #try:
- # inst.modify_s('cn=' + PLUGIN_ACCT_POLICY + ',cn=plugins,cn=config',
- # [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', CONFIG_DN)])
- #except ldap.LDAPError, e:
- # log.error('test_acctpolicy: Failed to set config entry in plugin entry: error ' + e.message['desc'])
- # assert False
-
############################################################################
# Test plugin
############################################################################
- # !!!! acctpolicy does have have a dse callabck to check for live updates - restart plugin for now !!!!
- inst.plugins.disable(name=PLUGIN_ACCT_POLICY)
- inst.plugins.enable(name=PLUGIN_ACCT_POLICY)
-
# Add an entry
+ time.sleep(1)
try:
inst.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(),
'sn': '1',
@@ -154,10 +174,11 @@ def test_acctpolicy(inst, args=None):
try:
inst.simple_bind_s(USER1_DN, "password")
except ldap.LDAPError, e:
- log.error('test_acctpolicy:Failed to bind as user1: ' + e.message['desc'])
+ log.error('test_acctpolicy: Failed to bind as user1: ' + e.message['desc'])
assert False
# Bind as Root DN
+ time.sleep(1)
try:
inst.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError, e:
@@ -185,14 +206,11 @@ def test_acctpolicy(inst, args=None):
log.error('test_acctpolicy: Failed to modify config entry: error ' + e.message['desc'])
assert False
- # !!!! must restart for now !!!!!
- inst.plugins.disable(name=PLUGIN_ACCT_POLICY)
- inst.plugins.enable(name=PLUGIN_ACCT_POLICY)
-
############################################################################
# Test plugin
############################################################################
+ time.sleep(1)
# login as user
try:
inst.simple_bind_s(USER1_DN, "password")
@@ -200,6 +218,7 @@ def test_acctpolicy(inst, args=None):
log.error('test_acctpolicy: Failed to bind(2nd) as user1: ' + e.message['desc'])
assert False
+ time.sleep(1)
# Bind as Root DN
try:
inst.simple_bind_s(DN_DM, PASSWORD)
@@ -498,7 +517,7 @@ def test_automember(inst, args=None):
log.error('test_automember: Failed to user3 to branch2: error ' + e.message['desc'])
assert False
- # Check the group - uniquemember sahould not exist
+ # Check the group - uniquemember should not exist
try:
entries = inst.search_s(GROUP_DN, ldap.SCOPE_BASE,
'(uniquemember=' + BUSER3_DN + ')')
@@ -512,9 +531,10 @@ def test_automember(inst, args=None):
# Enable plugin
inst.plugins.enable(name=PLUGIN_AUTOMEMBER)
+ TASK_DN = 'cn=task-' + str(int(time.time())) + ',cn=automember rebuild membership,cn=tasks,cn=config'
# Add the task
try:
- inst.add_s(Entry(('cn=task-' + str(int(time.time())) + ',cn=automember rebuild membership,cn=tasks,cn=config', {
+ inst.add_s(Entry((TASK_DN, {
'objectclass': 'top extensibleObject'.split(),
'basedn': 'ou=branch2,' + DEFAULT_SUFFIX,
'filter': 'objectclass=top'})))
@@ -522,7 +542,7 @@ def test_automember(inst, args=None):
log.error('test_automember: Failed to add task: error ' + e.message['desc'])
assert False
- time.sleep(3) # Wait for the task to do its work
+ wait_for_task(inst, TASK_DN)
# Verify the fixup task worked
try:
@@ -722,7 +742,7 @@ def test_dna(inst, args=None):
try:
inst.delete_s(USER1_DN)
except ldap.LDAPError, e:
- log.error('test_automember: Failed to delete test entry1: ' + e.message['desc'])
+ log.error('test_dna: Failed to delete test entry1: ' + e.message['desc'])
assert False
inst.plugins.disable(name=PLUGIN_DNA)
@@ -914,32 +934,11 @@ def test_linkedattrs(inst, args=None):
log.fatal('test_linkedattrs: Search for user1 failed: ' + e.message['desc'])
assert False
- # Verify that the task does not work yet(not until we enable the plugin)
- try:
- inst.add_s(Entry(('cn=task-' + str(int(time.time())) + ',cn=fixup linked attributes,cn=tasks,cn=config', {
- 'objectclass': 'top extensibleObject'.split(),
- 'basedn': DEFAULT_SUFFIX,
- 'filter': '(objectclass=top)'})))
- except ldap.LDAPError, e:
- log.error('test_linkedattrs: Failed to add task: error ' + e.message['desc'])
- assert False
-
- time.sleep(3) # Wait for the task to do, or not do, its work
-
- # The entry should still not have a manager attribute
- try:
- entries = inst.search_s(USER2_DN, ldap.SCOPE_BASE, '(manager=*)')
- if entries:
- log.fatal('test_linkedattrs: user2 incorrectly has a "manager" attr')
- assert False
- except ldap.LDAPError, e:
- log.fatal('test_linkedattrs: Search for user2 failed: ' + e.message['desc'])
- assert False
-
# Enable the plugin and rerun the task entry
inst.plugins.enable(name=PLUGIN_LINKED_ATTRS)
# Add the task again
+ TASK_DN = 'cn=task-' + str(int(time.time())) + ',cn=fixup linked attributes,cn=tasks,cn=config'
try:
inst.add_s(Entry(('cn=task-' + str(int(time.time())) + ',cn=fixup linked attributes,cn=tasks,cn=config', {
'objectclass': 'top extensibleObject'.split(),
@@ -949,7 +948,7 @@ def test_linkedattrs(inst, args=None):
log.error('test_linkedattrs: Failed to add task: error ' + e.message['desc'])
assert False
- time.sleep(3) # Wait for the task to do its work
+ wait_for_task(inst, TASK_DN)
# Check if user2 now has a manager attribute now
try:
@@ -1011,6 +1010,7 @@ def test_memberof(inst, args=None):
return
PLUGIN_DN = 'cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config'
+ SHARED_CONFIG_DN = 'cn=memberOf Config,' + DEFAULT_SUFFIX
log.info('Testing ' + PLUGIN_MEMBER_OF + '...')
@@ -1048,6 +1048,16 @@ def test_memberof(inst, args=None):
log.error('test_memberof: Failed to add group: error ' + e.message['desc'])
assert False
+ try:
+ inst.add_s(Entry((SHARED_CONFIG_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'memberofgroupattr': 'member',
+ 'memberofattr': 'memberof'
+ })))
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to shared config entry: error ' + e.message['desc'])
+ assert False
+
# Check if the user now has a "memberOf" attribute
try:
entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)')
@@ -1069,7 +1079,7 @@ def test_memberof(inst, args=None):
try:
entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)')
if entries:
- log.fatal('test_memberof: user1 incorrect has memberOf attr')
+ log.fatal('test_memberof: user1 incorrectly has memberOf attr')
assert False
except ldap.LDAPError, e:
log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc'])
@@ -1116,51 +1126,169 @@ def test_memberof(inst, args=None):
try:
entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)')
if entries:
- log.fatal('test_memberof: user1 incorrect has memberOf attr')
+ log.fatal('test_memberof: user1 incorrectly has memberOf attr')
assert False
except ldap.LDAPError, e:
log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc'])
assert False
############################################################################
- # Test Fixup Task
+ # Set the shared config entry and test the plugin
############################################################################
- inst.plugins.disable(name=PLUGIN_MEMBER_OF)
+ # The shared config entry uses "member" - the above test uses "uniquemember"
+ try:
+ inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, CONFIG_AREA, SHARED_CONFIG_DN)])
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to set plugin area: error ' + e.message['desc'])
+ assert False
+
+ # Delete the test entries then readd them to start with a clean slate
+ try:
+ inst.delete_s(USER1_DN)
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to delete test entry1: ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.delete_s(GROUP_DN)
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to delete test group: ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.add_s(Entry((USER1_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to add user1: error ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.add_s(Entry((GROUP_DN, {
+ 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
+ 'cn': 'group',
+ 'member': USER1_DN
+ })))
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to add group: error ' + e.message['desc'])
+ assert False
+
+ # Test the shared config
+ # Check if the user now has a "memberOf" attribute
+ try:
+ entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)')
+ if not entries:
+ log.fatal('test_memberof: user1 missing memberOf')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc'])
+ assert False
+
+ # Remove "member" should remove "memberOf" from the entry
+ try:
+ inst.modify_s(GROUP_DN, [(ldap.MOD_DELETE, 'member', None)])
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to delete member: error ' + e.message['desc'])
+ assert False
+
+ # Check that "memberOf" was removed
+ try:
+ entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)')
+ if entries:
+ log.fatal('test_memberof: user1 incorrectly has memberOf attr')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc'])
+ assert False
+
+ ############################################################################
+ # Change the shared config entry to use 'uniquemember' and test the plugin
+ ############################################################################
+
+ try:
+ inst.modify_s(SHARED_CONFIG_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'uniquemember')])
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to set shared plugin entry(uniquemember): error '
+ + e.message['desc'])
+ assert False
- # Add uniquemember, should not update USER1
try:
inst.modify_s(GROUP_DN, [(ldap.MOD_REPLACE, 'uniquemember', USER1_DN)])
except ldap.LDAPError, e:
log.error('test_memberof: Failed to add uniquemember: error ' + e.message['desc'])
assert False
- # Check for "memberOf"
+ # Check if the user now has a "memberOf" attribute
+ try:
+ entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)')
+ if not entries:
+ log.fatal('test_memberof: user1 missing memberOf')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc'])
+ assert False
+
+ # Remove "uniquemember" should remove "memberOf" from the entry
+ try:
+ inst.modify_s(GROUP_DN, [(ldap.MOD_DELETE, 'uniquemember', None)])
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to delete member: error ' + e.message['desc'])
+ assert False
+
+ # Check that "memberOf" was removed
try:
entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)')
if entries:
- log.fatal('test_memberof: user1 incorrect has memberOf attr')
+ log.fatal('test_memberof: user1 incorrectly has memberOf attr')
assert False
except ldap.LDAPError, e:
log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc'])
assert False
- # Run fixup task while plugin disabled - should not add "memberOf
- # Verify that the task does not work yet(not until we enable the plugin)
+ ############################################################################
+ # Remove shared config from plugin, and retest
+ ############################################################################
+
+ # First change the plugin to use member before we move the shared config that uses uniquemember
try:
- inst.add_s(Entry(('cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK, {
- 'objectclass': 'top extensibleObject'.split(),
- 'basedn': DEFAULT_SUFFIX,
- 'filter': 'objectclass=top'})))
- except ldap.NO_SUCH_OBJECT:
- pass
+ inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'member')])
except ldap.LDAPError, e:
- log.error('test_memberof: Failed to add task: error ' + e.message['desc'])
+ log.error('test_memberof: Failed to update config(uniquemember): error ' + e.message['desc'])
assert False
- time.sleep(3) # Wait for the task to do, or not do, its work
+ # Remove shared config from plugin
+ try:
+ inst.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, CONFIG_AREA, None)])
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to add uniquemember: error ' + e.message['desc'])
+ assert False
- # Check for "memberOf"
+ try:
+ inst.modify_s(GROUP_DN, [(ldap.MOD_REPLACE, 'member', USER1_DN)])
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to add uniquemember: error ' + e.message['desc'])
+ assert False
+
+ # Check if the user now has a "memberOf" attribute
+ try:
+ entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)')
+ if not entries:
+ log.fatal('test_memberof: user1 missing memberOf')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc'])
+ assert False
+
+ # Remove "uniquemember" should remove "memberOf" from the entry
+ try:
+ inst.modify_s(GROUP_DN, [(ldap.MOD_DELETE, 'member', None)])
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to delete member: error ' + e.message['desc'])
+ assert False
+
+ # Check that "memberOf" was removed
try:
entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)')
if entries:
@@ -1170,11 +1298,42 @@ def test_memberof(inst, args=None):
log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc'])
assert False
+ ############################################################################
+ # Test Fixup Task
+ ############################################################################
+
+ inst.plugins.disable(name=PLUGIN_MEMBER_OF)
+
+ # First change the plugin to use uniquemember
+ try:
+ inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'uniquemember')])
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to update config(uniquemember): error ' + e.message['desc'])
+ assert False
+
+ # Add uniquemember, should not update USER1
+ try:
+ inst.modify_s(GROUP_DN, [(ldap.MOD_REPLACE, 'uniquemember', USER1_DN)])
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to add uniquemember: error ' + e.message['desc'])
+ assert False
+
+ # Check for "memberOf"
+ try:
+ entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)')
+ if entries:
+ log.fatal('test_memberof: user1 incorrect has memberOf attr')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc'])
+ assert False
+
# Enable the plugin, and run the task
inst.plugins.enable(name=PLUGIN_MEMBER_OF)
+ TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK
try:
- inst.add_s(Entry(('cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK, {
+ inst.add_s(Entry((TASK_DN, {
'objectclass': 'top extensibleObject'.split(),
'basedn': DEFAULT_SUFFIX,
'filter': 'objectclass=top'})))
@@ -1182,7 +1341,7 @@ def test_memberof(inst, args=None):
log.error('test_memberof: Failed to add task: error ' + e.message['desc'])
assert False
- time.sleep(3) # Wait for the task to do its work
+ wait_for_task(inst, TASK_DN)
# Check for "memberOf"
try:
@@ -1216,6 +1375,12 @@ def test_memberof(inst, args=None):
log.error('test_memberof: Failed to delete test group: ' + e.message['desc'])
assert False
+ try:
+ inst.delete_s(SHARED_CONFIG_DN)
+ except ldap.LDAPError, e:
+ log.error('test_memberof: Failed to delete shared config entry: ' + e.message['desc'])
+ assert False
+
############################################################################
# Test passed
############################################################################
@@ -1286,9 +1451,6 @@ def test_mep(inst, args=None):
log.error('test_mep: Failed to add template entry: error ' + e.message['desc'])
assert False
- # log.info('geb.....')
- # time.sleep(30)
-
# Add the config entry
try:
inst.add_s(Entry((CONFIG_DN, {
@@ -1456,19 +1618,10 @@ def test_passthru(inst, args=None):
# Create second instance
passthru_inst = DirSrv(verbose=False)
- #if installation1_prefix:
- # args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Args for the master1 instance
- """
- args_instance[SER_HOST] = '127.0.0.1'
- args_instance[SER_PORT] = '33333'
- args_instance[SER_SERVERID_PROP] = 'passthru'
- """
- args_instance[SER_HOST] = 'localhost.localdomain'
+ # Args for the instance
+ args_instance[SER_HOST] = LOCALHOST
args_instance[SER_PORT] = 33333
args_instance[SER_SERVERID_PROP] = 'passthru'
-
args_instance[SER_CREATION_SUFFIX] = PASS_SUFFIX1
args_passthru_inst = args_instance.copy()
passthru_inst.allocate(args_passthru_inst)
@@ -1615,6 +1768,7 @@ def test_referint(inst, args=None):
log.info('Testing ' + PLUGIN_REFER_INTEGRITY + '...')
PLUGIN_DN = 'cn=' + PLUGIN_REFER_INTEGRITY + ',cn=plugins,cn=config'
+ SHARED_CONFIG_DN = 'cn=RI Config,' + DEFAULT_SUFFIX
############################################################################
# Configure plugin
@@ -1660,6 +1814,28 @@ def test_referint(inst, args=None):
log.error('test_referint: Failed to add group: error ' + e.message['desc'])
assert False
+ # Grab the referint log file from the plugin
+
+ try:
+ entries = inst.search_s(PLUGIN_DN, ldap.SCOPE_BASE, '(objectclass=top)')
+ REFERINT_LOGFILE = entries[0].getValue('referint-logfile')
+ except ldap.LDAPError, e:
+ log.fatal('test_referint: Unable to search plugin entry: ' + e.message['desc'])
+ assert False
+
+ # Add shared config entry
+ try:
+ inst.add_s(Entry((SHARED_CONFIG_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'referint-membership-attr': 'member',
+ 'referint-update-delay': '0',
+ 'referint-logfile': REFERINT_LOGFILE,
+ 'referint-logchanges': '0'
+ })))
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to shared config entry: error ' + e.message['desc'])
+ assert False
+
# Delete a user
try:
inst.delete_s(USER1_DN)
@@ -1709,6 +1885,150 @@ def test_referint(inst, args=None):
assert False
############################################################################
+ # Set the shared config entry and test the plugin
+ ############################################################################
+
+ # The shared config entry uses "member" - the above test used "uniquemember"
+ try:
+ inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, CONFIG_AREA, SHARED_CONFIG_DN)])
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to set plugin area: error ' + e.message['desc'])
+ assert False
+
+ # Delete the group, and readd everything
+ try:
+ inst.delete_s(GROUP_DN)
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to delete group: ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.add_s(Entry((USER1_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to add user1: error ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.add_s(Entry((USER2_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user2'
+ })))
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to add user2: error ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.add_s(Entry((GROUP_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'cn': 'group',
+ 'member': USER1_DN,
+ 'uniquemember': USER2_DN
+ })))
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to add group: error ' + e.message['desc'])
+ assert False
+
+ # Delete a user
+ try:
+ inst.delete_s(USER1_DN)
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to delete user1: ' + e.message['desc'])
+ assert False
+
+ # Check for integrity
+ try:
+ entry = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, '(member=' + USER1_DN + ')')
+ if entry:
+ log.error('test_referint: user1 was not removed from group')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('test_referint: Unable to search group: ' + e.message['desc'])
+ assert False
+
+ ############################################################################
+ # Change the shared config entry to use 'uniquemember' and test the plugin
+ ############################################################################
+
+ try:
+ inst.modify_s(SHARED_CONFIG_DN, [(ldap.MOD_REPLACE, 'referint-membership-attr', 'uniquemember')])
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to set shared plugin entry(uniquemember): error '
+ + e.message['desc'])
+ assert False
+
+ # Delete a user
+ try:
+ inst.delete_s(USER2_DN)
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to delete user1: ' + e.message['desc'])
+ assert False
+
+ # Check for integrity
+ try:
+ entry = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, '(uniquemember=' + USER2_DN + ')')
+ if entry:
+ log.error('test_referint: user2 was not removed from group')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('test_referint: Unable to search group: ' + e.message['desc'])
+ assert False
+
+ ############################################################################
+ # Remove shared config from plugin, and retest
+ ############################################################################
+
+ # First change the plugin to use member before we move the shared config that uses uniquemember
+ try:
+ inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'referint-membership-attr', 'member')])
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to update config(uniquemember): error ' + e.message['desc'])
+ assert False
+
+ # Remove shared config from plugin
+ try:
+ inst.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, CONFIG_AREA, None)])
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to add uniquemember: error ' + e.message['desc'])
+ assert False
+
+ # Add test user
+ try:
+ inst.add_s(Entry((USER1_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to add user1: error ' + e.message['desc'])
+ assert False
+
+ # Add user to group
+ try:
+ inst.modify_s(GROUP_DN, [(ldap.MOD_REPLACE, 'member', USER1_DN)])
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to add uniquemember: error ' + e.message['desc'])
+ assert False
+
+ # Delete a user
+ try:
+ inst.delete_s(USER1_DN)
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to delete user1: ' + e.message['desc'])
+ assert False
+
+ # Check for integrity
+ try:
+ entry = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, '(member=' + USER1_DN + ')')
+ if entry:
+ log.error('test_referint: user1 was not removed from group')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('test_referint: Unable to search group: ' + e.message['desc'])
+ assert False
+
+ ############################################################################
# Test plugin dependency
############################################################################
@@ -1721,7 +2041,13 @@ def test_referint(inst, args=None):
try:
inst.delete_s(GROUP_DN)
except ldap.LDAPError, e:
- log.error('test_referint: Failed to delete user1: ' + e.message['desc'])
+ log.error('test_referint: Failed to delete group: ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.delete_s(SHARED_CONFIG_DN)
+ except ldap.LDAPError, e:
+ log.error('test_referint: Failed to delete shared config entry: ' + e.message['desc'])
assert False
############################################################################
@@ -1863,7 +2189,7 @@ def test_rootdn(inst, args=None):
'userpassword': 'password'
})))
except ldap.LDAPError, e:
- log.error('test_retrocl: Failed to add user1: error ' + e.message['desc'])
+ log.error('test_rootdn: Failed to add user1: error ' + e.message['desc'])
assert False
# Set an aci so we can modify the plugin after ew deny the root dn
diff --git a/dirsrvtests/suites/dynamic-plugins/stress_tests.py b/dirsrvtests/suites/dynamic-plugins/stress_tests.py
index a1f666d..f1a34b4 100644
--- a/dirsrvtests/suites/dynamic-plugins/stress_tests.py
+++ b/dirsrvtests/suites/dynamic-plugins/stress_tests.py
@@ -21,6 +21,7 @@ from constants import *
log = logging.getLogger(__name__)
NUM_USERS = 250
+GROUP_DN = 'cn=stress-group,' + DEFAULT_SUFFIX
def openConnection(inst):
@@ -58,6 +59,14 @@ def configureMO(inst):
assert False
+def cleanup(conn):
+ try:
+ conn.delete_s(GROUP_DN)
+ except ldap.LDAPError, e:
+ log.error('cleanup: failed to delete group (' + GROUP_DN + ') error: ' + e.message['desc'])
+ assert False
+
+
class DelUsers(threading.Thread):
def __init__(self, inst, rdnval):
threading.Thread.__init__(self)
@@ -97,7 +106,6 @@ class AddUsers(threading.Thread):
idx = 0
if self.addToGroup:
- GROUP_DN = 'cn=stress-group,' + DEFAULT_SUFFIX
try:
conn.add_s(Entry((GROUP_DN,
{'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
diff --git a/dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py b/dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py
index 3677fd5..288505b 100644
--- a/dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py
+++ b/dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py
@@ -31,6 +31,12 @@ class TopologyStandalone(object):
self.standalone = standalone
+def repl_fail(replica):
+ # remove replica instance, and assert failure
+ replica.delete()
+ assert False
+
+
@pytest.fixture(scope="module")
def topology(request):
'''
@@ -128,7 +134,10 @@ def test_dynamic_plugins(topology):
Test Dynamic Plugins - exercise each plugin and its main features, while
changing the configuration without restarting the server.
- Need to test: functionality, stability, and stress.
+ Need to test: functionality, stability, and stress. These tests need to run
+ with replication disabled, and with replication setup with a
+ second instance. Then test if replication is working, and we have
+ same entries on each side.
Functionality - Make sure that as configuration changes are made they take
effect immediately. Cross plugin interaction (e.g. automember/memberOf)
@@ -137,17 +146,21 @@ def test_dynamic_plugins(topology):
Memory Corruption - Restart the plugins many times, and in different orders and test
functionality, and stability. This will excerise the internal
- plugin linked lists, dse callabcks, and task handlers.
+ plugin linked lists, dse callbacks, and task handlers.
- Stress - Put the server under some type of load that is using a particular
- plugin for each operation, and then make changes to that plugin.
- The new changes should take effect, and the server should not crash.
+ Stress - Put the server under load that will trigger multiple plugins(MO, RI, DNA, etc)
+ Restart various plugins while these operations are going on. Perform this test
+ 5 times(stress_max_run).
"""
- ############################################################################
- # Test plugin functionality
- ############################################################################
+ REPLICA_PORT = 33334
+ RUV_FILTER = '(&(nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff)(objectclass=nstombstone))'
+ master_maxcsn = 0
+ replica_maxcsn = 0
+ msg = ' (no replication)'
+ replication_run = False
+ stress_max_runs = 5
# First enable dynamic plugins
try:
@@ -156,132 +169,337 @@ def test_dynamic_plugins(topology):
ldap.error('Failed to enable dynamic plugin!' + e.message['desc'])
assert False
- log.info('#####################################################')
- log.info('Testing Dynamic Plugins Functionality...')
- log.info('#####################################################\n')
-
- plugin_tests.test_all_plugins(topology.standalone)
-
- log.info('#####################################################')
- log.info('Successfully Tested Dynamic Plugins Functionality.')
- log.info('#####################################################\n')
-
- ############################################################################
- # Test the stability by exercising the internal lists, callabcks, and task handlers
- ############################################################################
-
- log.info('#####################################################')
- log.info('Testing Dynamic Plugins for Memory Corruption...')
- log.info('#####################################################\n')
- prev_plugin_test = None
- prev_prev_plugin_test = None
- for plugin_test in plugin_tests.func_tests:
+ while 1:
#
- # Restart the plugin several times (and prev plugins) - work that linked list
+ # First run the tests with replication disabled, then rerun them with replication set up
#
- plugin_test(topology.standalone, "restart")
- if prev_prev_plugin_test:
- prev_prev_plugin_test(topology.standalone, "restart")
+ ############################################################################
+ # Test plugin functionality
+ ############################################################################
+
+ log.info('####################################################################')
+ log.info('Testing Dynamic Plugins Functionality' + msg + '...')
+ log.info('####################################################################\n')
+
+ plugin_tests.test_all_plugins(topology.standalone)
+
+ log.info('####################################################################')
+ log.info('Successfully Tested Dynamic Plugins Functionality' + msg + '.')
+ log.info('####################################################################\n')
+
+ ############################################################################
+ # Test the stability by exercising the internal lists, callabcks, and task handlers
+ ############################################################################
+
+ log.info('####################################################################')
+ log.info('Testing Dynamic Plugins for Memory Corruption' + msg + '...')
+ log.info('####################################################################\n')
+ prev_plugin_test = None
+ prev_prev_plugin_test = None
+
+ for plugin_test in plugin_tests.func_tests:
+ #
+ # Restart the plugin several times (and prev plugins) - work that linked list
+ #
+ plugin_test(topology.standalone, "restart")
+
+ if prev_prev_plugin_test:
+ prev_prev_plugin_test(topology.standalone, "restart")
+
+ plugin_test(topology.standalone, "restart")
+
+ if prev_plugin_test:
+ prev_plugin_test(topology.standalone, "restart")
+
+ plugin_test(topology.standalone, "restart")
+
+ # Now run the functional test
+ plugin_test(topology.standalone)
+
+ # Set the previous tests
+ if prev_plugin_test:
+ prev_prev_plugin_test = prev_plugin_test
+ prev_plugin_test = plugin_test
+
+ log.info('####################################################################')
+ log.info('Successfully Tested Dynamic Plugins for Memory Corruption' + msg + '.')
+ log.info('####################################################################\n')
+
+ ############################################################################
+ # Stress two plugins while restarting it, and while restarting other plugins.
+ # The goal is to not crash, and have the plugins work after stressing them.
+ ############################################################################
+
+ log.info('####################################################################')
+ log.info('Stressing Dynamic Plugins' + msg + '...')
+ log.info('####################################################################\n')
+
+ stress_tests.configureMO(topology.standalone)
+ stress_tests.configureRI(topology.standalone)
+
+ stress_count = 0
+ while stress_count < stress_max_runs:
+ log.info('####################################################################')
+ log.info('Running stress test' + msg + '. Run (%d/%d)...' % (stress_count + 1, stress_max_runs))
+ log.info('####################################################################\n')
+
+ try:
+ # Launch three new threads to add a bunch of users
+ add_users = stress_tests.AddUsers(topology.standalone, 'employee', True)
+ add_users.start()
+ add_users2 = stress_tests.AddUsers(topology.standalone, 'entry', True)
+ add_users2.start()
+ add_users3 = stress_tests.AddUsers(topology.standalone, 'person', True)
+ add_users3.start()
+ time.sleep(1)
+
+ # While we are adding users restart the MO plugin and an idle plugin
+ topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ time.sleep(1)
+ topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ time.sleep(1)
+ topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
+ topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
+ time.sleep(1)
+ topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ time.sleep(2)
+ topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ time.sleep(1)
+ topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
+ topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
+ topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ time.sleep(1)
+ topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+
+ # Wait for the 'adding' threads to complete
+ add_users.join()
+ add_users2.join()
+ add_users3.join()
+
+ # Now launch three threads to delete the users
+ del_users = stress_tests.DelUsers(topology.standalone, 'employee')
+ del_users.start()
+ del_users2 = stress_tests.DelUsers(topology.standalone, 'entry')
+ del_users2.start()
+ del_users3 = stress_tests.DelUsers(topology.standalone, 'person')
+ del_users3.start()
+ time.sleep(1)
+
+ # Restart both the MO, RI plugins during these deletes, and an idle plugin
+ topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
+ topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
+ time.sleep(1)
+ topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
+ time.sleep(1)
+ topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ time.sleep(1)
+ topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ time.sleep(1)
+ topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
+ topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
+ topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
+ topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
+ topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
+ time.sleep(2)
+ topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
+ time.sleep(1)
+ topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ time.sleep(1)
+ topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ time.sleep(1)
+ topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
+ topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
+ topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
+
+ # Wait for the 'deleting' threads to complete
+ del_users.join()
+ del_users2.join()
+ del_users3.join()
+
+ # Now make sure both the MO and RI plugins still work correctly
+ plugin_tests.func_tests[8](topology.standalone) # RI plugin
+ plugin_tests.func_tests[5](topology.standalone) # MO plugin
+
+ # Cleanup the stress tests
+ stress_tests.cleanup(topology.standalone)
+
+ except:
+ log.info('Stress test failed!')
+ repl_fail(replica_inst)
+
+ stress_count += 1
+ log.info('####################################################################')
+ log.info('Successfully Stressed Dynamic Plugins' + msg +
+ '. Completed (%d/%d)' % (stress_count, stress_max_runs))
+ log.info('####################################################################\n')
+
+ if replication_run:
+ # We're done.
+ break
+ else:
+ #
+ # Enable replication and run everything one more time
+ #
+ log.info('Setting up replication, and rerunning the tests...\n')
+
+ # Create replica instance
+ replica_inst = DirSrv(verbose=False)
+ args_instance[SER_HOST] = LOCALHOST
+ args_instance[SER_PORT] = REPLICA_PORT
+ args_instance[SER_SERVERID_PROP] = 'replica'
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+
+ args_replica_inst = args_instance.copy()
+ replica_inst.allocate(args_replica_inst)
+ replica_inst.create()
+ replica_inst.open()
+
+ try:
+ topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX,
+ role=REPLICAROLE_MASTER,
+ replicaId=1)
+ replica_inst.replica.enableReplication(suffix=DEFAULT_SUFFIX,
+ role=REPLICAROLE_CONSUMER,
+ replicaId=65535)
+ properties = {RA_NAME: r'to_replica',
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+
+ repl_agreement = topology.standalone.agreement.create(suffix=DEFAULT_SUFFIX,
+ host=LOCALHOST,
+ port=REPLICA_PORT,
+ properties=properties)
+
+ if not repl_agreement:
+ log.fatal("Fail to create a replica agreement")
+ repl_fail(replica_inst)
+
+ topology.standalone.agreement.init(DEFAULT_SUFFIX, LOCALHOST, REPLICA_PORT)
+ topology.standalone.waitForReplInit(repl_agreement)
+ except:
+ log.info('Failed to setup replication!')
+ repl_fail(replica_inst)
+
+ replication_run = True
+ msg = ' (replication enabled)'
+ time.sleep(1)
- plugin_test(topology.standalone, "restart")
+ ############################################################################
+ # Check replication, and data are in sync, and remove the instance
+ ############################################################################
- if prev_plugin_test:
- prev_plugin_test(topology.standalone, "restart")
+ log.info('Checking if replication is in sync...')
- plugin_test(topology.standalone, "restart")
+ try:
+ # Grab master's max CSN
+ entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, RUV_FILTER)
+ if not entry:
+ log.error('Failed to find db tombstone entry from master')
+ repl_fail(replica_inst)
+ elements = entry[0].getValues('nsds50ruv')
+ for ruv in elements:
+ if 'replica 1' in ruv:
+ parts = ruv.split()
+ if len(parts) == 5:
+ master_maxcsn = parts[4]
+ break
+ else:
+ log.error('RUV is incomplete')
+ repl_fail(replica_inst)
+ if master_maxcsn == 0:
+ log.error('Failed to find maxcsn on master')
+ repl_fail(replica_inst)
- # Now run the functional test
- plugin_test(topology.standalone)
+ except ldap.LDAPError, e:
+ log.fatal('Unable to search masterfor db tombstone: ' + e.message['desc'])
+ repl_fail(replica_inst)
+
+ # Loop on the consumer - waiting for it to catch up
+ count = 0
+ insync = False
+ while count < 10:
+ try:
+ # Grab master's max CSN
+ entry = replica_inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, RUV_FILTER)
+ if not entry:
+ log.error('Failed to find db tombstone entry on consumer')
+ repl_fail(replica_inst)
+ elements = entry[0].getValues('nsds50ruv')
+ for ruv in elements:
+ if 'replica 1' in ruv:
+ parts = ruv.split()
+ if len(parts) == 5:
+ replica_maxcsn = parts[4]
+ break
+ if replica_maxcsn == 0:
+ log.error('Failed to find maxcsn on consumer')
+ repl_fail(replica_inst)
+ except ldap.LDAPError, e:
+ log.fatal('Unable to search for db tombstone on consumer: ' + e.message['desc'])
+ repl_fail(replica_inst)
+
+ if master_maxcsn == replica_maxcsn:
+ insync = True
+ log.info('Replication is in sync.\n')
+ break
+ count += 1
+ time.sleep(1)
+
+ # Report on replication status
+ if not insync:
+ log.error('Consumer not in sync with master!')
+ repl_fail(replica_inst)
- # Set the previous tests
- if prev_plugin_test:
- prev_prev_plugin_test = prev_plugin_test
- prev_plugin_test = plugin_test
+ #
+ # Verify the databases are identical. There should not be any "user, entry, employee" entries
+ #
+ log.info('Checking if the data is the same between the replicas...')
- log.info('#####################################################')
- log.info('Successfully Tested Dynamic Plugins for Memory Corruption.')
- log.info('#####################################################\n')
+ # Check the master
+ try:
+ entries = topology.standalone.search_s(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ "(|(uid=person*)(uid=entry*)(uid=employee*))")
+ if len(entries) > 0:
+ log.error('Master database has incorrect data set!\n')
+ repl_fail(replica_inst)
+ except ldap.LDAPError, e:
+ log.fatal('Unable to search db on master: ' + e.message['desc'])
+ repl_fail(replica_inst)
- ############################################################################
- # Stress two plugins while restarting it, and while restarting other plugins.
- # The goal is to not crash, and have the plugins work after stressing it.
- ############################################################################
+ # Check the consumer
+ try:
+ entries = replica_inst.search_s(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ "(|(uid=person*)(uid=entry*)(uid=employee*))")
+ if len(entries) > 0:
+ log.error('Consumer database in not consistent with master database')
+ repl_fail(replica_inst)
+ except ldap.LDAPError, e:
+ log.fatal('Unable to search db on consumer: ' + e.message['desc'])
+ repl_fail(replica_inst)
- log.info('#####################################################')
- log.info('Stressing Dynamic Plugins...')
- log.info('#####################################################\n')
+ log.info('Data is consistent across the replicas.\n')
- # Configure the plugins
- stress_tests.configureMO(topology.standalone)
- stress_tests.configureRI(topology.standalone)
-
- # Launch three new threads to add a bunch of users
- add_users = stress_tests.AddUsers(topology.standalone, 'user', True)
- add_users.start()
- add_users2 = stress_tests.AddUsers(topology.standalone, 'entry', True)
- add_users2.start()
- add_users3 = stress_tests.AddUsers(topology.standalone, 'person', True)
- add_users3.start()
- time.sleep(1)
-
- # While we are adding users restart the MO plugin
- topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
- time.sleep(3)
- topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
- time.sleep(1)
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
-
- # Restart idle plugin
- topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
- topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
-
- # Wait for the 'adding' threads to complete
- add_users.join()
- add_users2.join()
- add_users3.join()
-
- # Now launch three threads to delete the users, and restart both the MO and RI plugins
- del_users = stress_tests.DelUsers(topology.standalone, 'user')
- del_users.start()
- del_users2 = stress_tests.DelUsers(topology.standalone, 'entry')
- del_users2.start()
- del_users3 = stress_tests.DelUsers(topology.standalone, 'person')
- del_users3.start()
- time.sleep(1)
-
- # Restart the both the MO and RI plugins during these deletes
-
- topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
- topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
- topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
- time.sleep(3)
- topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
- time.sleep(1)
- topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
- time.sleep(1)
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
- time.sleep(1)
- topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
-
- # Restart idle plugin
- topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
- topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
-
- # Wait for the 'deleting' threads to complete
- del_users.join()
- del_users2.join()
- del_users3.join()
-
- # Now make sure both the MO and RI plugins still work
- plugin_tests.func_tests[8](topology.standalone) # RI plugin
- plugin_tests.func_tests[5](topology.standalone) # MO plugin
+ log.info('####################################################################')
+ log.info('Replication consistency test passed')
+ log.info('####################################################################\n')
- log.info('#####################################################')
- log.info('Successfully Stressed Dynamic Plugins.')
- log.info('#####################################################\n')
+ # Remove the replica instance
+ replica_inst.delete()
############################################################################
# We made it to the end!
@@ -291,7 +509,8 @@ def test_dynamic_plugins(topology):
log.info('#####################################################')
log.info("Dynamic Plugins Testsuite: Completed Successfully!")
log.info('#####################################################')
- log.info('#####################################################')
+ log.info('#####################################################\n')
+
def test_dynamic_plugins_final(topology):
topology.standalone.stop(timeout=10)
diff --git a/dirsrvtests/tickets/ticket47560_test.py b/dirsrvtests/tickets/ticket47560_test.py
index 0b7e436..af7fdc3 100644
--- a/dirsrvtests/tickets/ticket47560_test.py
+++ b/dirsrvtests/tickets/ticket47560_test.py
@@ -146,7 +146,7 @@ def test_ticket47560(topology):
Enable or disable mbo plugin depending on 'value' ('on'/'off')
"""
# enable/disable the mbo plugin
- if value != 'on':
+ if value == 'on':
topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
else:
topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
diff --git a/ldap/servers/plugins/acctpolicy/acct_config.c b/ldap/servers/plugins/acctpolicy/acct_config.c
index 25352b1..d1acf1a 100644
--- a/ldap/servers/plugins/acctpolicy/acct_config.c
+++ b/ldap/servers/plugins/acctpolicy/acct_config.c
@@ -53,9 +53,11 @@ acct_policy_load_config_startup( Slapi_PBlock* pb, void* plugin_id ) {
PLUGIN_CONFIG_DN, rc );
return( -1 );
}
-
+ config_wr_lock();
+ free_config();
newcfg = get_config();
rc = acct_policy_entry2config( config_entry, newcfg );
+ config_unlock();
slapi_entry_free( config_entry );
@@ -85,8 +87,8 @@ acct_policy_entry2config( Slapi_Entry *e, acctPluginCfg *newcfg ) {
} else if (!update_is_allowed_attr(newcfg->state_attr_name)) {
/* log a warning that this attribute cannot be updated */
slapi_log_error( SLAPI_LOG_FATAL, PLUGIN_NAME,
- "The configured state attribute [%s] cannot be updated, accounts will always become inactive.\n",
- newcfg->state_attr_name );
+ "The configured state attribute [%s] cannot be updated, accounts will always become inactive.\n",
+ newcfg->state_attr_name );
}
newcfg->alt_state_attr_name = get_attr_string_val( e, CFG_ALT_LASTLOGIN_STATE_ATTR );
diff --git a/ldap/servers/plugins/acctpolicy/acct_init.c b/ldap/servers/plugins/acctpolicy/acct_init.c
index c4dba22..0b1af91 100644
--- a/ldap/servers/plugins/acctpolicy/acct_init.c
+++ b/ldap/servers/plugins/acctpolicy/acct_init.c
@@ -63,6 +63,47 @@ int acct_postop_init( Slapi_PBlock *pb );
int acct_bind_preop( Slapi_PBlock *pb );
int acct_bind_postop( Slapi_PBlock *pb );
+static void *_PluginID = NULL;
+static Slapi_DN *_PluginDN = NULL;
+static Slapi_DN *_ConfigAreaDN = NULL;
+static Slapi_RWLock *config_rwlock = NULL;
+
+void
+acct_policy_set_plugin_id(void *pluginID)
+{
+ _PluginID = pluginID;
+}
+
+void *
+acct_policy_get_plugin_id()
+{
+ return _PluginID;
+}
+
+void
+acct_policy_set_plugin_sdn(Slapi_DN *pluginDN)
+{
+ _PluginDN = pluginDN;
+}
+
+Slapi_DN *
+acct_policy_get_plugin_sdn()
+{
+ return _PluginDN;
+}
+
+void
+acct_policy_set_config_area(Slapi_DN *sdn)
+{
+ _ConfigAreaDN = sdn;
+}
+
+Slapi_DN *
+acct_policy_get_config_area()
+{
+ return _ConfigAreaDN;
+}
+
/*
Master init function for the account plugin
*/
@@ -120,14 +161,32 @@ acct_policy_init( Slapi_PBlock *pb )
which is needed to retrieve the plugin configuration
*/
int
-acct_policy_start( Slapi_PBlock *pb ) {
+acct_policy_start( Slapi_PBlock *pb )
+{
acctPluginCfg *cfg;
void *plugin_id = get_identity();
+ Slapi_DN *plugindn = NULL;
+ char *config_area = NULL;
if(slapi_plugin_running(pb)){
return 0;
}
+ slapi_pblock_get(pb, SLAPI_TARGET_SDN, &plugindn);
+ acct_policy_set_plugin_sdn(slapi_sdn_dup(plugindn));
+
+ /* Set the alternate config area if one is defined. */
+ slapi_pblock_get(pb, SLAPI_PLUGIN_CONFIG_AREA, &config_area);
+ if (config_area) {
+ acct_policy_set_config_area(slapi_sdn_new_normdn_byval(config_area));
+ }
+
+ if(config_rwlock == NULL){
+ if((config_rwlock = slapi_new_rwlock()) == NULL){
+ return( CALLBACK_ERR );
+ }
+ }
+
/* Load plugin configuration */
if( acct_policy_load_config_startup( pb, plugin_id ) ) {
slapi_log_error( SLAPI_LOG_FATAL, PLUGIN_NAME,
@@ -151,6 +210,10 @@ acct_policy_close( Slapi_PBlock *pb )
{
int rc = 0;
+ slapi_destroy_rwlock(config_rwlock);
+ config_rwlock = NULL;
+ slapi_sdn_free(&_PluginDN);
+ slapi_sdn_free(&_ConfigAreaDN);
free_config();
return rc;
@@ -168,8 +231,11 @@ acct_preop_init( Slapi_PBlock *pb ) {
return( CALLBACK_ERR );
}
- if ( slapi_pblock_set( pb, SLAPI_PLUGIN_PRE_BIND_FN,
- (void *) acct_bind_preop ) != 0 ) {
+ if ( slapi_pblock_set( pb, SLAPI_PLUGIN_PRE_BIND_FN, (void *) acct_bind_preop ) != 0 ||
+ slapi_pblock_set(pb, SLAPI_PLUGIN_PRE_ADD_FN, (void *) acct_add_pre_op) != 0 ||
+ slapi_pblock_set(pb, SLAPI_PLUGIN_PRE_MODIFY_FN, (void *) acct_mod_pre_op) != 0 ||
+ slapi_pblock_set(pb, SLAPI_PLUGIN_PRE_DELETE_FN, (void *) acct_del_pre_op) != 0)
+ {
slapi_log_error( SLAPI_LOG_FATAL, PRE_PLUGIN_NAME,
"Failed to set plugin callback function\n" );
return( CALLBACK_ERR );
@@ -192,8 +258,11 @@ acct_postop_init( Slapi_PBlock *pb )
return( CALLBACK_ERR );
}
- if ( slapi_pblock_set( pb, SLAPI_PLUGIN_POST_BIND_FN,
- (void *)acct_bind_postop ) != 0 ) {
+
+ if ( slapi_pblock_set( pb, SLAPI_PLUGIN_POST_BIND_FN, (void *)acct_bind_postop ) != 0 ||
+ slapi_pblock_set(pb, SLAPI_PLUGIN_POST_ADD_FN, (void *) acct_post_op) != 0 ||
+ slapi_pblock_set(pb, SLAPI_PLUGIN_POST_MODIFY_FN, (void *) acct_post_op) != 0)
+ {
slapi_log_error( SLAPI_LOG_FATAL, POST_PLUGIN_NAME,
"Failed to set plugin callback function\n" );
return( CALLBACK_ERR );
@@ -208,3 +277,23 @@ acct_postop_init( Slapi_PBlock *pb )
return( CALLBACK_OK );
}
+/*
+ * Wrappers for config locking
+ */
+void
+config_rd_lock()
+{
+ slapi_rwlock_rdlock(config_rwlock);
+}
+
+void
+config_wr_lock()
+{
+ slapi_rwlock_wrlock(config_rwlock);
+}
+
+void
+config_unlock()
+{
+ slapi_rwlock_unlock(config_rwlock);
+}
diff --git a/ldap/servers/plugins/acctpolicy/acct_plugin.c b/ldap/servers/plugins/acctpolicy/acct_plugin.c
index 5719f27..a61a50c 100644
--- a/ldap/servers/plugins/acctpolicy/acct_plugin.c
+++ b/ldap/servers/plugins/acctpolicy/acct_plugin.c
@@ -28,6 +28,46 @@ Hewlett-Packard Development Company, L.P.
#include "acctpolicy.h"
/*
+ * acct_policy_dn_is_config()
+ *
+ * Checks if dn is a plugin config entry.
+ */
+static int
+acct_policy_dn_is_config(Slapi_DN *sdn)
+{
+ int ret = 0;
+
+ slapi_log_error(SLAPI_LOG_TRACE, PLUGIN_NAME,
+ "--> automember_dn_is_config\n");
+
+ if (sdn == NULL) {
+ goto bail;
+ }
+
+ /* If an alternate config area is configured, treat it's child
+ * entries as config entries. If the alternate config area is
+ * not configured, treat children of the top-level plug-in
+ * config entry as our config entries. */
+ if (acct_policy_get_config_area()) {
+ if (slapi_sdn_issuffix(sdn, acct_policy_get_config_area()) &&
+ slapi_sdn_compare(sdn, acct_policy_get_config_area())) {
+ ret = 1;
+ }
+ } else {
+ if (slapi_sdn_issuffix(sdn, acct_policy_get_plugin_sdn()) &&
+ slapi_sdn_compare(sdn, acct_policy_get_plugin_sdn())) {
+ ret = 1;
+ }
+ }
+
+bail:
+ slapi_log_error(SLAPI_LOG_TRACE, PLUGIN_NAME,
+ "<-- automember_dn_is_config\n");
+
+ return ret;
+}
+
+/*
Checks bind entry for last login state and compares current time with last
login time plus the limit to decide whether to deny the bind.
*/
@@ -39,6 +79,7 @@ acct_inact_limit( Slapi_PBlock *pb, const char *dn, Slapi_Entry *target_entry, a
int rc = 0; /* Optimistic default */
acctPluginCfg *cfg;
+ config_rd_lock();
cfg = get_config();
if( ( lasttimestr = get_attr_string_val( target_entry,
cfg->state_attr_name ) ) != NULL ) {
@@ -75,6 +116,7 @@ acct_inact_limit( Slapi_PBlock *pb, const char *dn, Slapi_Entry *target_entry, a
}
done:
+ config_unlock();
/* Deny bind; the account has exceeded the inactivity limit */
if( rc == 1 ) {
slapi_send_ldap_result( pb, LDAP_CONSTRAINT_VIOLATION, NULL,
@@ -106,13 +148,14 @@ acct_record_login( const char *dn )
Slapi_PBlock *modpb = NULL;
int skip_mod_attrs = 1; /* value doesn't matter as long as not NULL */
+ config_rd_lock();
cfg = get_config();
/* if we are not allowed to modify the state attr we're done
* this could be intentional, so just return
*/
if (! update_is_allowed_attr(cfg->always_record_login_attr) )
- return rc;
+ goto done;
plugin_id = get_identity();
@@ -152,6 +195,7 @@ acct_record_login( const char *dn )
}
done:
+ config_unlock();
slapi_pblock_destroy( modpb );
slapi_ch_free_string( ×tr );
@@ -274,6 +318,7 @@ acct_bind_postop( Slapi_PBlock *pb )
goto done;
}
+ config_rd_lock();
cfg = get_config();
tracklogin = cfg->always_record_login;
@@ -296,6 +341,7 @@ acct_bind_postop( Slapi_PBlock *pb )
}
}
}
+ config_unlock();
if( tracklogin ) {
rc = acct_record_login( dn );
@@ -319,3 +365,133 @@ done:
return( rc == 0 ? CALLBACK_OK : CALLBACK_ERR );
}
+
+static int acct_pre_op( Slapi_PBlock *pb, int modop )
+{
+ Slapi_DN *sdn = 0;
+ Slapi_Entry *e = 0;
+ Slapi_Mods *smods = 0;
+ LDAPMod **mods;
+ int free_entry = 0;
+ char *errstr = NULL;
+ int ret = SLAPI_PLUGIN_SUCCESS;
+
+ slapi_log_error(SLAPI_LOG_TRACE, PRE_PLUGIN_NAME, "--> acct_pre_op\n");
+
+ slapi_pblock_get(pb, SLAPI_TARGET_SDN, &sdn);
+
+ if (acct_policy_dn_is_config(sdn)) {
+ /* Validate config changes, but don't apply them.
+ * This allows us to reject invalid config changes
+ * here at the pre-op stage. Applying the config
+ * needs to be done at the post-op stage. */
+
+ if (LDAP_CHANGETYPE_ADD == modop) {
+ slapi_pblock_get(pb, SLAPI_ADD_ENTRY, &e);
+
+ /* If the entry doesn't exist, just bail and
+ * let the server handle it. */
+ if (e == NULL) {
+ goto bail;
+ }
+ } else if (LDAP_CHANGETYPE_MODIFY == modop) {
+ /* Fetch the entry being modified so we can
+ * create the resulting entry for validation. */
+ if (sdn) {
+ slapi_search_internal_get_entry(sdn, 0, &e, get_identity());
+ free_entry = 1;
+ }
+
+ /* If the entry doesn't exist, just bail and
+ * let the server handle it. */
+ if (e == NULL) {
+ goto bail;
+ }
+
+ /* Grab the mods. */
+ slapi_pblock_get(pb, SLAPI_MODIFY_MODS, &mods);
+ smods = slapi_mods_new();
+ slapi_mods_init_byref(smods, mods);
+
+ /* Apply the mods to create the resulting entry. */
+ if (mods && (slapi_entry_apply_mods(e, mods) != LDAP_SUCCESS)) {
+ /* The mods don't apply cleanly, so we just let this op go
+ * to let the main server handle it. */
+ goto bailmod;
+ }
+ } else if (modop == LDAP_CHANGETYPE_DELETE){
+ ret = LDAP_UNWILLING_TO_PERFORM;
+ slapi_log_error(SLAPI_LOG_FATAL, PRE_PLUGIN_NAME,
+ "acct_pre_op: can not delete plugin config entry [%d]\n", ret);
+ } else {
+ errstr = slapi_ch_smprintf("acct_pre_op: invalid op type %d", modop);
+ ret = LDAP_PARAM_ERROR;
+ goto bail;
+ }
+ }
+
+ bailmod:
+ /* Clean up smods. */
+ if (LDAP_CHANGETYPE_MODIFY == modop) {
+ slapi_mods_free(&smods);
+ }
+
+ bail:
+ if (free_entry && e)
+ slapi_entry_free(e);
+
+ if (ret) {
+ slapi_log_error(SLAPI_LOG_PLUGIN, PRE_PLUGIN_NAME,
+ "acct_pre_op: operation failure [%d]\n", ret);
+ slapi_send_ldap_result(pb, ret, NULL, errstr, 0, NULL);
+ slapi_ch_free((void **)&errstr);
+ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &ret);
+ ret = SLAPI_PLUGIN_FAILURE;
+ }
+
+ slapi_log_error(SLAPI_LOG_TRACE, PRE_PLUGIN_NAME, "<-- acct_pre_op\n");
+
+ return ret;
+}
+
+int
+acct_add_pre_op( Slapi_PBlock *pb )
+{
+ return acct_pre_op(pb, LDAP_CHANGETYPE_ADD);
+}
+
+int
+acct_mod_pre_op( Slapi_PBlock *pb )
+{
+ return acct_pre_op(pb, LDAP_CHANGETYPE_MODIFY);
+}
+
+int
+acct_del_pre_op( Slapi_PBlock *pb )
+{
+ return acct_pre_op(pb, LDAP_CHANGETYPE_DELETE);
+}
+
+int
+acct_post_op(Slapi_PBlock *pb)
+{
+ Slapi_DN *sdn = NULL;
+
+ slapi_log_error(SLAPI_LOG_TRACE, POST_PLUGIN_NAME,
+ "--> acct_policy_post_op\n");
+
+ slapi_pblock_get(pb, SLAPI_TARGET_SDN, &sdn);
+ if (acct_policy_dn_is_config(sdn)){
+ if( acct_policy_load_config_startup( pb, get_identity() ) ) {
+ slapi_log_error( SLAPI_LOG_FATAL, PLUGIN_NAME,
+ "acct_policy_start failed to load configuration\n" );
+ return( CALLBACK_ERR );
+ }
+ }
+
+ slapi_log_error(SLAPI_LOG_TRACE, POST_PLUGIN_NAME,
+ "<-- acct_policy_mod_post_op\n");
+
+ return SLAPI_PLUGIN_SUCCESS;
+}
+
diff --git a/ldap/servers/plugins/acctpolicy/acct_util.c b/ldap/servers/plugins/acctpolicy/acct_util.c
index 2e24da2..cff0176 100644
--- a/ldap/servers/plugins/acctpolicy/acct_util.c
+++ b/ldap/servers/plugins/acctpolicy/acct_util.c
@@ -82,7 +82,8 @@ get_attr_string_val( Slapi_Entry* target_entry, char* attr_name ) {
*/
int
get_acctpolicy( Slapi_PBlock *pb, Slapi_Entry *target_entry, void *plugin_id,
- acctPolicy **policy ) {
+ acctPolicy **policy )
+{
Slapi_DN *sdn = NULL;
Slapi_Entry *policy_entry = NULL;
Slapi_Attr *attr;
@@ -93,8 +94,6 @@ get_acctpolicy( Slapi_PBlock *pb, Slapi_Entry *target_entry, void *plugin_id,
acctPluginCfg *cfg;
int rc = 0;
- cfg = get_config();
-
if( policy == NULL ) {
/* Bad parameter */
return( -1 );
@@ -102,19 +101,22 @@ get_acctpolicy( Slapi_PBlock *pb, Slapi_Entry *target_entry, void *plugin_id,
*policy = NULL;
+ config_rd_lock();
+ cfg = get_config();
/* Return success and NULL policy */
policy_dn = get_attr_string_val( target_entry, cfg->spec_attr_name );
if( policy_dn == NULL ) {
slapi_log_error( SLAPI_LOG_PLUGIN, PLUGIN_NAME,
"\"%s\" is not governed by an account inactivity "
"policy subentry\n", slapi_entry_get_ndn( target_entry ) );
- if (cfg->inactivitylimit != ULONG_MAX) {
- goto dopolicy;
- }
+ if (cfg->inactivitylimit != ULONG_MAX) {
+ goto dopolicy;
+ }
slapi_log_error( SLAPI_LOG_PLUGIN, PLUGIN_NAME,
"\"%s\" is not governed by an account inactivity "
"global policy\n", slapi_entry_get_ndn( target_entry ) );
- return rc;
+ config_unlock();
+ return rc;
}
sdn = slapi_sdn_new_dn_byref( policy_dn );
@@ -153,7 +155,8 @@ dopolicy:
}
}
done:
- slapi_ch_free_string( &policy_dn );
+ config_unlock();
+ slapi_ch_free_string( &policy_dn );
slapi_entry_free( policy_entry );
return( rc );
}
diff --git a/ldap/servers/plugins/acctpolicy/acctpolicy.h b/ldap/servers/plugins/acctpolicy/acctpolicy.h
index 2185b95..64f37fb 100644
--- a/ldap/servers/plugins/acctpolicy/acctpolicy.h
+++ b/ldap/servers/plugins/acctpolicy/acctpolicy.h
@@ -69,10 +69,9 @@ typedef struct accountpolicy {
int get_acctpolicy( Slapi_PBlock *pb, Slapi_Entry *target_entry,
void *plugin_id, acctPolicy **policy );
void free_acctpolicy( acctPolicy **policy );
-int has_attr( Slapi_Entry* target_entry, char* attr_name,
- char** val );
+int has_attr( Slapi_Entry* target_entry, char* attr_name, char** val );
char* get_attr_string_val( Slapi_Entry* e, char* attr_name );
-void* get_identity();
+void* get_identity(void);
void set_identity(void*);
time_t gentimeToEpochtime( char *gentimestr );
char* epochtimeToGentime( time_t epochtime );
@@ -80,6 +79,22 @@ int update_is_allowed_attr (const char *attr);
/* acct_config.c */
int acct_policy_load_config_startup( Slapi_PBlock* pb, void* plugin_id );
-acctPluginCfg* get_config();
-void free_config();
+acctPluginCfg* get_config(void);
+void free_config(void);
+
+/* acct_init.c */
+void acct_policy_set_plugin_sdn(Slapi_DN *pluginDN);
+Slapi_DN * acct_policy_get_plugin_sdn(void);
+void acct_policy_set_config_area(Slapi_DN *sdn);
+Slapi_DN * acct_policy_get_config_area(void);
+void config_rd_lock(void);
+void config_wr_lock(void);
+void config_unlock(void);
+
+/* acc_plugins.c */
+int acct_add_pre_op( Slapi_PBlock *pb );
+int acct_mod_pre_op( Slapi_PBlock *pb );
+int acct_del_pre_op( Slapi_PBlock *pb );
+int acct_post_op( Slapi_PBlock *pb );
+
diff --git a/ldap/servers/plugins/linkedattrs/fixup_task.c b/ldap/servers/plugins/linkedattrs/fixup_task.c
index db3c693..f3f5c04 100644
--- a/ldap/servers/plugins/linkedattrs/fixup_task.c
+++ b/ldap/servers/plugins/linkedattrs/fixup_task.c
@@ -197,8 +197,8 @@ linked_attrs_fixup_task_thread(void *arg)
linked_attrs_unlock();
/* Log finished message. */
- slapi_task_log_notice(task, "Linked attributes fixup task complete.\n");
- slapi_task_log_status(task, "Linked attributes fixup task complete.\n");
+ slapi_task_log_notice(task, "Linked attributes fixup task complete.");
+ slapi_task_log_status(task, "Linked attributes fixup task complete.");
slapi_log_error(SLAPI_LOG_FATAL, LINK_PLUGIN_SUBSYSTEM, "Linked attributes fixup task complete.\n");
slapi_task_inc_progress(task);
diff --git a/ldap/servers/plugins/memberof/memberof_config.c b/ldap/servers/plugins/memberof/memberof_config.c
index 012e2d0..7fa5897 100644
--- a/ldap/servers/plugins/memberof/memberof_config.c
+++ b/ldap/servers/plugins/memberof/memberof_config.c
@@ -867,7 +867,6 @@ memberof_shared_config_validate(Slapi_PBlock *pb)
}
slapi_ch_free_string(&configarea_dn);
slapi_sdn_free(&config_sdn);
- slapi_entry_free(config_entry);
}
}
}
diff --git a/ldap/servers/slapd/dse.c b/ldap/servers/slapd/dse.c
index f0ce255..f80178e 100644
--- a/ldap/servers/slapd/dse.c
+++ b/ldap/servers/slapd/dse.c
@@ -2426,8 +2426,6 @@ dse_add(Slapi_PBlock *pb) /* JCM There should only be one exit point from this f
}
}
- /* entry has been freed, so make sure no one tries to use it later */
- slapi_pblock_set(pb, SLAPI_ADD_ENTRY, NULL);
slapi_send_ldap_result(pb, returncode, NULL, returntext[0] ? returntext : NULL, 0, NULL );
return dse_add_return(rc, e);
}
diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c
index 5530c70..b0b18e7 100644
--- a/ldap/servers/slapd/plugin.c
+++ b/ldap/servers/slapd/plugin.c
@@ -454,10 +454,21 @@ plugin_call_plugins( Slapi_PBlock *pb, int whichfunction )
{
/* We stash the pblock plugin pointer to preserve the callers context */
struct slapdplugin *p;
+ int locked = 0;
+
+ locked = slapi_td_get_plugin_locked();
+ if (!locked) {
+ slapi_rwlock_rdlock(global_rwlock);
+ }
+
slapi_pblock_get(pb, SLAPI_PLUGIN, &p);
/* Call the operation on the Global Plugins */
rc = plugin_call_list(global_plugin_list[plugin_list_number], whichfunction, pb);
slapi_pblock_set(pb, SLAPI_PLUGIN, p);
+
+ if (!locked) {
+ slapi_rwlock_unlock(global_rwlock);
+ }
}
else
{
@@ -1080,12 +1091,6 @@ plugin_start(Slapi_Entry *entry, char *returntext)
int ret = 0;
int i = 0;
- /*
- * Disable registered plugin functions so preops/postops/etc
- * dont get called prior to the plugin being started (due to
- * plugins performing ops on the DIT)
- */
- global_plugin_callbacks_enabled = 0;
global_plugins_started = 0;
/* Count the plugins so we can allocate memory for the config array */
@@ -1404,6 +1409,7 @@ plugin_free_plugin_dep_config(plugin_dep_config **cfg)
}
slapi_ch_free_string(&config[index].type);
slapi_ch_free_string(&config[index].name);
+ slapi_ch_free_string(&config[index].config_area);
pblock_done(&config[index].pb);
index++;
}
@@ -1909,16 +1915,6 @@ plugin_call_func (struct slapdplugin *list, int operation, Slapi_PBlock *pb, int
int rc;
int return_value = 0;
int count = 0;
- int *locked = 0;
-
- /*
- * Take the read lock
- */
- slapi_td_get_plugin_locked(&locked);
- if(locked == 0){
- slapi_rwlock_rdlock(global_rwlock);
- }
-
for (; list != NULL; list = list->plg_next)
{
@@ -1998,9 +1994,6 @@ plugin_call_func (struct slapdplugin *list, int operation, Slapi_PBlock *pb, int
if(call_one)
break;
}
- if(locked == 0){
- slapi_rwlock_unlock(global_rwlock);
- }
return( return_value );
}
@@ -2323,6 +2316,7 @@ plugin_restart(Slapi_Entry *pentryBefore, Slapi_Entry *pentryAfter)
}
slapi_rwlock_wrlock(global_rwlock);
+ slapi_td_set_plugin_locked();
if(plugin_delete(pentryBefore, returntext, 1) == LDAP_SUCCESS){
if(plugin_add(pentryAfter, returntext, 1) == LDAP_SUCCESS){
@@ -2346,6 +2340,7 @@ plugin_restart(Slapi_Entry *pentryBefore, Slapi_Entry *pentryAfter)
}
slapi_rwlock_unlock(global_rwlock);
+ slapi_td_set_plugin_unlocked();
return rc;
}
@@ -2995,12 +2990,11 @@ int
plugin_add(Slapi_Entry *entry, char *returntext, int locked)
{
int rc = LDAP_SUCCESS;
- int td_locked = 1;
if(!locked){
slapi_rwlock_wrlock(global_rwlock);
+ slapi_td_set_plugin_locked();
}
- slapi_td_set_plugin_locked(&td_locked);
if((rc = plugin_setup(entry, 0, 0, 1, returntext)) != LDAP_SUCCESS){
LDAPDebug(LDAP_DEBUG_PLUGIN, "plugin_add: plugin_setup failed for (%s)\n",slapi_entry_get_dn(entry), rc, 0);
@@ -3015,9 +3009,8 @@ plugin_add(Slapi_Entry *entry, char *returntext, int locked)
done:
if(!locked){
slapi_rwlock_unlock(global_rwlock);
+ slapi_td_set_plugin_unlocked();
}
- td_locked = 0;
- slapi_td_set_plugin_locked(&td_locked);
return rc;
}
@@ -3372,7 +3365,6 @@ plugin_delete(Slapi_Entry *plugin_entry, char *returntext, int locked)
struct slapdplugin *plugin = NULL;
const char *plugin_dn = slapi_entry_get_dn_const(plugin_entry);
char *value = NULL;
- int td_locked = 1;
int removed = PLUGIN_BUSY;
int type = 0;
int rc = LDAP_SUCCESS;
@@ -3400,8 +3392,8 @@ plugin_delete(Slapi_Entry *plugin_entry, char *returntext, int locked)
removed = PLUGIN_NOT_FOUND;
if(!locked){
slapi_rwlock_wrlock(global_rwlock);
+ slapi_td_set_plugin_locked();
}
- slapi_td_set_plugin_locked(&td_locked);
rc = plugin_get_type_and_list(value, &type, &plugin_list);
if ( rc != 0 ) {
@@ -3445,9 +3437,8 @@ plugin_delete(Slapi_Entry *plugin_entry, char *returntext, int locked)
unlock:
if(!locked){
slapi_rwlock_unlock(global_rwlock);
+ slapi_td_set_plugin_unlocked();
}
- td_locked = 0;
- slapi_td_set_plugin_locked(&td_locked);
}
}
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index cb8aad0..a61d954 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -5585,8 +5585,10 @@ int slapi_td_dn_init(void);
int slapi_td_set_dn(char *dn);
void slapi_td_get_dn(char **dn);
int slapi_td_plugin_lock_init(void);
-int slapi_td_set_plugin_locked(int *value);
-void slapi_td_get_plugin_locked(int **value);
+int slapi_td_get_plugin_locked(void);
+int slapi_td_set_plugin_locked(void);
+int slapi_td_set_plugin_unlocked(void);
+
/* Thread Local Storage Index Types */
#define SLAPI_TD_REQUESTOR_DN 1
diff --git a/ldap/servers/slapd/thread_data.c b/ldap/servers/slapd/thread_data.c
index 121e2d8..4d9bb93 100644
--- a/ldap/servers/slapd/thread_data.c
+++ b/ldap/servers/slapd/thread_data.c
@@ -168,19 +168,38 @@ slapi_td_plugin_lock_init()
}
int
-slapi_td_set_plugin_locked(int *value)
+slapi_td_set_plugin_locked()
{
- if(slapi_td_set_val(SLAPI_TD_PLUGIN_LIST_LOCK, (void *)value) == PR_FAILURE){
+ int val = 12345;
+
+ if(slapi_td_set_val(SLAPI_TD_PLUGIN_LIST_LOCK, (void *)&val) == PR_FAILURE){
return PR_FAILURE;
}
return PR_SUCCESS;
}
-void
-slapi_td_get_plugin_locked(int **value)
+int
+slapi_td_set_plugin_unlocked()
{
- slapi_td_get_val(SLAPI_TD_PLUGIN_LIST_LOCK, (void **)value);
+ if(slapi_td_set_val(SLAPI_TD_PLUGIN_LIST_LOCK, NULL) == PR_FAILURE){
+ return PR_FAILURE;
+ }
+
+ return PR_SUCCESS;
+}
+
+int
+slapi_td_get_plugin_locked()
+{
+ int *value = 0;
+
+ slapi_td_get_val(SLAPI_TD_PLUGIN_LIST_LOCK, (void **)&value);
+ if(value){
+ return 1;
+ } else{
+ return 0;
+ }
}
/* requestor dn */
8 years, 5 months
Changes to 'refs/tags/389-console-1.1.8'
by Noriko Hosoi
Changes since the dawn of time:
Mark Reynolds (3):
Ticket 97 - 389-console should provide man page
Ticket 47604 - 389-console: remove versioned jars from %{_javadir}
Bump version to 1.1.8
Nathan Kinder (3):
Initial import of fedora-idm-console
Resolves: 183962
Resolves: 393461
Noriko Hosoi (1):
Ticket 97 - 389-console should provide man page
Rich Megginson (33):
initial commit of Fedora Console for Windows - has no UI
added UI and graphics
package is not noarch
support for JAVA with spaces in the pathname
added license
updated spec for Fedora DS 1.1 release
Resolves: bug 428352
Resolves: bug 480631
Reviewed by: nkinder (Thanks!)
Resolves: bug 476095
bump version to 1.1.3 to sync with CVS tag
added unzip.vbs ; cleaned up/simplified Makefile
update idm console version to 20090310
update for 1.1.3
fix typo in Makefile
Initial commit of renaming to 389
Forgot to add provides and obsoletes for fedora-idm-console
added .gitignore
added need_libdir flag to control where libdir is needed to find jss
fix spelling error
Add 64-bit support - Use replaceable parameters for names, guids
Remove old package during upgrade
Remove old shortcuts must ignore All Users folders
Force shortcuts to be removed - removeoldpkg must run asyncNoWait
Bump version to 1.1.4
Changed version to 1.1.4.a1
make sure bitsadmin.exe path is quoted
update to version 1.1.4 rc1
update to version 1.1.4
update for nspr 4.8.4, nss 3.12.6, and idm console framework 1.1.5
Bug 592120 - console for 64bit Window2008 can not be installed
allow building without the script - bump version to 1.1.7
port to wix 3.0 from mozilla-build - use idm console 1.1.7
8 years, 5 months
build.xml .gitignore sources
by Noriko Hosoi
.gitignore | 1 +
build.xml | 3 +++
sources | 1 +
3 files changed, 5 insertions(+)
New commits:
commit 989193207e2af04e2cbe5b853c09b0ec91e5dd55
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Mon Dec 22 14:04:32 2014 -0800
Ticket 97 - 389-console should provide man page
Additional fix: ${man.dir} needs to be prepared in the build tree.
diff --git a/.gitignore b/.gitignore
index b25c15b..3596ea5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,2 @@
*~
+/389-console-1.1.8.tar.bz2
diff --git a/build.xml b/build.xml
index 5ca1dd8..8f7c4e0 100644
--- a/build.xml
+++ b/build.xml
@@ -42,6 +42,9 @@ END COPYRIGHT BLOCK
<property name="need_libdir" value=""/>
<property name="man.dir" value="/usr/share/man"/>
+ <mkdir dir="${man.dir}"/>
+ <mkdir dir="${man.dir}/man.8"/>
+
<!-- Verify that the required jars exist -->
<fail message="LDAP JDK (${ldapjdk.jar.name}) does not exist in ${ldapjdk.local.location}">
Please set the correct location with -Dldapjdk.local.location=[path]
diff --git a/sources b/sources
new file mode 100644
index 0000000..4de6e57
--- /dev/null
+++ b/sources
@@ -0,0 +1 @@
+9827b13c93cb9a9f7572779cf1209dd5 389-console-1.1.8.tar.bz2
8 years, 5 months
Branch '389-ds-base-1.3.1' - ldap/schema ldap/servers
by Noriko Hosoi
ldap/schema/01core389.ldif | 4
ldap/servers/plugins/replication/repl5.h | 10 +
ldap/servers/plugins/replication/repl5_agmt.c | 160 +++++++++++++++++
ldap/servers/plugins/replication/repl5_agmtlist.c | 26 ++
ldap/servers/plugins/replication/repl5_connection.c | 163 +++++++++++++++++-
ldap/servers/plugins/replication/repl5_inc_protocol.c | 32 +++
ldap/servers/plugins/replication/repl5_prot_private.h | 2
ldap/servers/plugins/replication/repl5_tot_protocol.c | 53 +++++
ldap/servers/plugins/replication/repl_globals.c | 2
9 files changed, 446 insertions(+), 6 deletions(-)
New commits:
commit 3f787554300db2ce901d244ff728aaa910e73f7a
Author: Thierry bordaz (tbordaz) <tbordaz(a)redhat.com>
Date: Mon Dec 15 15:12:35 2014 +0100
Ticket 47942: DS hangs during online total update
Bug Description:
During incremental or total update of a consumer the replica agreement thread may hang.
For total update:
The replica agreement thread that send the entries flowed the consumer that is not
able to process fast enough the entries. So the TCP connection get full and
the RA sender sleep on the connection to be able to write the next entries.
Sleeping on the poll or write the RA.sender holds the connection lock.
It prevents the replica agreement result thread to read the results from the
network. So the consumer is also halted because is can no longer send the results.
For incrementatl update:
During incremental update, all updates are sent by the RA.sender.
If many updates need to be send, the supplier may overflow the consumer
that is very late. This flow of updates can fill the TCP connection
so that the RA.sender hang when writing the next update.
On the hang, it holds the connection lock preventing the RA.reader
to receive the acks. And so the consumer can also hang trying to send the
acks.
Fix Description:
For total update there are two parts of the fix:
To prevent the RA.sender to sleep too long on the poll, the fix (conn_is_available)
splits the RA.timeout into 1s period.
If unable to write for 1s, it releases the connection for a short period of time 100ms.
To prevent the RA.sender to sleep on the write, the fix (check_flow_control_tot_init)
checks how late is the consumer and if it is too late, it pauses (releasing the connection
during that time). This second part of the fix is configurable and it may need to be
tune according to the observed failures.
For incremental update:
The fix is to implement a flow control on the RA.sender.
After each sent update, if the window (update.sent - update.acked) cross the limit
The RA.sender pause during a configured delay.
When the RA.sender pause it does not hold the connection lock
Tuning can be done with nsds5ReplicaFlowControlWindow (how late is the consumer in terms of
number of entries/updates acknowledged) and nsds5ReplicaFlowControlPause (how long the RA.sender will
pause if the consumer is too late)
Logging:
For total update, the first time the flow control pauses, it logs a message (FATAL level).
If flow control happened, then at the end of the total update, it also logs the number
of flow control pauses (FATAL level).
For incremental update, if flow control happened it logs the number of pause (REPL level).
https://fedorahosted.org/389/ticket/47942
Reviewed by: Mark Reynolds, Rich Megginson, Andrey Ivanov, Noriko Hosoi (many many thanks to all of you !)
Platforms tested: RHEL 7.0, Centos
Flag Day: no
Doc impact: no
(cherry picked from commit 9851929baa628a87ff701f3e7b457c99f51ff9f4)
Conflicts:
ldap/schema/01core389.ldif
ldap/servers/plugins/replication/repl5_connection.c
diff --git a/ldap/schema/01core389.ldif b/ldap/schema/01core389.ldif
index b9baae7..899584c 100644
--- a/ldap/schema/01core389.ldif
+++ b/ldap/schema/01core389.ldif
@@ -154,6 +154,8 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2152 NAME 'nsds5ReplicaProtocolTimeout'
attributeTypes: ( 2.16.840.1.113730.3.1.2154 NAME 'nsds5ReplicaBackoffMin' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2155 NAME 'nsds5ReplicaBackoffMax' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2156 NAME 'nsslapd-sasl-max-buffer-size' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
+attributeTypes: ( 2.16.840.1.113730.3.1.2310 NAME 'nsds5ReplicaFlowControlWindow' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
+attributeTypes: ( 2.16.840.1.113730.3.1.2311 NAME 'nsds5ReplicaFlowControlPause' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
#
# objectclasses
#
@@ -165,7 +167,7 @@ objectClasses: ( 2.16.840.1.113730.3.2.110 NAME 'nsMappingTree' DESC 'Netscape d
objectClasses: ( 2.16.840.1.113730.3.2.104 NAME 'nsContainer' DESC 'Netscape defined objectclass' SUP top MUST ( CN ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.108 NAME 'nsDS5Replica' DESC 'Netscape defined objectclass' SUP top MUST ( nsDS5ReplicaRoot $ nsDS5ReplicaId ) MAY (cn $ nsds5ReplicaCleanRUV $ nsds5ReplicaAbortCleanRUV $ nsDS5ReplicaType $ nsDS5ReplicaBindDN $ nsState $ nsDS5ReplicaName $ nsDS5Flags $ nsDS5Task $ nsDS5ReplicaReferral $ nsDS5ReplicaAutoReferral $ nsds5ReplicaPurgeDelay $ nsds5ReplicaTombstonePurgeInterval $ nsds5ReplicaChangeCount $ nsds5ReplicaLegacyConsumer $ nsds5ReplicaProtocolTimeout $ nsds5ReplicaBackoffMin $ nsds5ReplicaBackoffMax ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.113 NAME 'nsTombstone' DESC 'Netscape defined objectclass' SUP top MAY ( nsParentUniqueId $ nscpEntryDN ) X-ORIGIN 'Netscape Directory Server' )
-objectClasses: ( 2.16.840.1.113730.3.2.103 NAME 'nsDS5ReplicationAgreement' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( nsds5ReplicaCleanRUVNotified $ nsDS5ReplicaHost $ nsDS5ReplicaPort $ nsDS5ReplicaTransportInfo $ nsDS5ReplicaBindDN $ nsDS5ReplicaCredentials $ nsDS5ReplicaBindMethod $ nsDS5ReplicaRoot $ nsDS5ReplicatedAttributeList $ nsDS5ReplicatedAttributeListTotal $ nsDS5ReplicaUpdateSchedule $ nsds5BeginReplicaRefresh $ description $ nsds50ruv $ nsruvReplicaLastModified $ nsds5ReplicaTimeout $ nsds5replicaChangesSentSinceStartup $ nsds5replicaLastUpdateEnd $ nsds5replicaLastUpdateStart $ nsds5replicaLastUpdateStatus $ nsds5replicaUpdateInProgress $ nsds5replicaLastInitEnd $ nsds5ReplicaEnabled $ nsds5replicaLastInitStart $ nsds5replicaLastInitStatus $ nsds5debugreplicatimeout $ nsds5replicaBusyWaitTime $ nsds5ReplicaStripAttrs $ nsds5replicaSessionPauseTime $ nsds5ReplicaProtocolTimeout ) X-ORIGIN 'Netscape Directory Server' )
+objectClasses: ( 2.16.840.1.113730.3.2.103 NAME 'nsDS5ReplicationAgreement' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( nsds5ReplicaCleanRUVNotified $ nsDS5ReplicaHost $ nsDS5ReplicaPort $ nsDS5ReplicaTransportInfo $ nsDS5ReplicaBindDN $ nsDS5ReplicaCredentials $ nsDS5ReplicaBindMethod $ nsDS5ReplicaRoot $ nsDS5ReplicatedAttributeList $ nsDS5ReplicatedAttributeListTotal $ nsDS5ReplicaUpdateSchedule $ nsds5BeginReplicaRefresh $ description $ nsds50ruv $ nsruvReplicaLastModified $ nsds5ReplicaTimeout $ nsds5replicaChangesSentSinceStartup $ nsds5replicaLastUpdateEnd $ nsds5replicaLastUpdateStart $ nsds5replicaLastUpdateStatus $ nsds5replicaUpdateInProgress $ nsds5replicaLastInitEnd $ nsds5ReplicaEnabled $ nsds5replicaLastInitStart $ nsds5replicaLastInitStatus $ nsds5debugreplicatimeout $ nsds5replicaBusyWaitTime $ nsds5ReplicaStripAttrs $ nsds5replicaSessionPauseTime $ nsds5ReplicaProtocolTimeout $ nsds5ReplicaFlowControlWindow $ nsds5ReplicaFlowControlPause )
X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.39 NAME 'nsslapdConfig' DESC 'Netscape defined objectclass' SUP top MAY ( cn ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.317 NAME 'nsSaslMapping' DESC 'Netscape defined objectclass' SUP top MUST ( cn $ nsSaslMapRegexString $ nsSaslMapBaseDNTemplate $ nsSaslMapFilterTemplate ) MAY ( nsSaslMapPriority ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.43 NAME 'nsSNMP' DESC 'Netscape defined objectclass' SUP top MUST ( cn $ nsSNMPEnabled ) MAY ( nsSNMPOrganization $ nsSNMPLocation $ nsSNMPContact $ nsSNMPDescription $ nsSNMPName $ nsSNMPMasterHost $ nsSNMPMasterPort ) X-ORIGIN 'Netscape Directory Server' )
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
index 29f00d4..9396842 100644
--- a/ldap/servers/plugins/replication/repl5.h
+++ b/ldap/servers/plugins/replication/repl5.h
@@ -168,6 +168,8 @@ extern const char *type_nsds5ReplicaBusyWaitTime;
extern const char *type_nsds5ReplicaSessionPauseTime;
extern const char *type_nsds5ReplicaEnabled;
extern const char *type_nsds5ReplicaStripAttrs;
+extern const char *type_nsds5ReplicaFlowControlWindow;
+extern const char *type_nsds5ReplicaFlowControlPause;
extern const char *type_replicaProtocolTimeout;
extern const char *type_replicaBackoffMin;
extern const char *type_replicaBackoffMax;
@@ -320,6 +322,8 @@ int agmt_get_auto_initialize(const Repl_Agmt *ra);
long agmt_get_timeout(const Repl_Agmt *ra);
long agmt_get_busywaittime(const Repl_Agmt *ra);
long agmt_get_pausetime(const Repl_Agmt *ra);
+long agmt_get_flowcontrolwindow(const Repl_Agmt *ra);
+long agmt_get_flowcontrolpause(const Repl_Agmt *ra);
int agmt_start(Repl_Agmt *ra);
int windows_agmt_start(Repl_Agmt *ra);
int agmt_stop(Repl_Agmt *ra);
@@ -340,6 +344,8 @@ int agmt_replarea_matches(const Repl_Agmt *ra, const Slapi_DN *name);
int agmt_schedule_in_window_now(const Repl_Agmt *ra);
int agmt_set_schedule_from_entry( Repl_Agmt *ra, const Slapi_Entry *e );
int agmt_set_timeout_from_entry( Repl_Agmt *ra, const Slapi_Entry *e );
+int agmt_set_flowcontrolwindow_from_entry(Repl_Agmt *ra, const Slapi_Entry *e);
+int agmt_set_flowcontrolpause_from_entry(Repl_Agmt *ra, const Slapi_Entry *e);
int agmt_set_busywaittime_from_entry( Repl_Agmt *ra, const Slapi_Entry *e );
int agmt_set_pausetime_from_entry( Repl_Agmt *ra, const Slapi_Entry *e );
int agmt_set_credentials_from_entry( Repl_Agmt *ra, const Slapi_Entry *e );
@@ -478,6 +484,10 @@ void conn_lock(Repl_Connection *conn);
void conn_unlock(Repl_Connection *conn);
void conn_delete_internal_ext(Repl_Connection *conn);
const char* conn_get_bindmethod(Repl_Connection *conn);
+void conn_set_tot_update_cb(Repl_Connection *conn, void *cb_data);
+void conn_set_tot_update_cb_nolock(Repl_Connection *conn, void *cb_data);
+void conn_get_tot_update_cb(Repl_Connection *conn, void **cb_data);
+void conn_get_tot_update_cb_nolock(Repl_Connection *conn, void **cb_data);
/* In repl5_protocol.c */
typedef struct repl_protocol Repl_Protocol;
diff --git a/ldap/servers/plugins/replication/repl5_agmt.c b/ldap/servers/plugins/replication/repl5_agmt.c
index 093aa8a..cffc4da 100644
--- a/ldap/servers/plugins/replication/repl5_agmt.c
+++ b/ldap/servers/plugins/replication/repl5_agmt.c
@@ -87,6 +87,8 @@
#include "slapi-plugin.h"
#define DEFAULT_TIMEOUT 600 /* (seconds) default outbound LDAP connection */
+#define DEFAULT_FLOWCONTROL_WINDOW 1000 /* #entries sent without acknowledgment */
+#define DEFAULT_FLOWCONTROL_PAUSE 2000 /* msec of pause when #entries sent witout acknowledgment */
#define STATUS_LEN 1024
struct changecounter {
@@ -144,6 +146,12 @@ typedef struct repl5agmt {
int agreement_type;
Slapi_Counter *protocol_timeout;
char *maxcsn; /* agmt max csn */
+ long flowControlWindow; /* This is the maximum number of entries
+ * sent without acknowledgment
+ */
+ long flowControlPause; /* When nb of not acknowledged entries overpass totalUpdateWindow
+ * This is the duration (in msec) that the RA will pause before sending the next entry
+ */
Slapi_RWLock *attr_lock; /* RW lock for all the stripped attrs */
} repl5agmt;
@@ -344,6 +352,28 @@ agmt_new_from_entry(Slapi_Entry *e)
}
}
+ /* flow control update window. */
+ ra->flowControlWindow = DEFAULT_FLOWCONTROL_WINDOW;
+ if (slapi_entry_attr_find(e, type_nsds5ReplicaFlowControlWindow, &sattr) == 0)
+ {
+ Slapi_Value *sval;
+ if (slapi_attr_first_value(sattr, &sval) == 0)
+ {
+ ra->flowControlWindow = slapi_value_get_long(sval);
+ }
+ }
+
+ /* flow control update pause. */
+ ra->flowControlPause = DEFAULT_FLOWCONTROL_PAUSE;
+ if (slapi_entry_attr_find(e, type_nsds5ReplicaFlowControlPause, &sattr) == 0)
+ {
+ Slapi_Value *sval;
+ if (slapi_attr_first_value(sattr, &sval) == 0)
+ {
+ ra->flowControlPause = slapi_value_get_long(sval);
+ }
+ }
+
/* DN of entry at root of replicated area */
tmpstr = slapi_entry_attr_get_charptr(e, type_nsds5ReplicaRoot);
if (NULL != tmpstr)
@@ -991,6 +1021,26 @@ agmt_get_pausetime(const Repl_Agmt *ra)
return return_value;
}
+long
+agmt_get_flowcontrolwindow(const Repl_Agmt *ra)
+{
+ long return_value;
+ PR_ASSERT(NULL != ra);
+ PR_Lock(ra->lock);
+ return_value = ra->flowControlWindow;
+ PR_Unlock(ra->lock);
+ return return_value;
+}
+long
+agmt_get_flowcontrolpause(const Repl_Agmt *ra)
+{
+ long return_value;
+ PR_ASSERT(NULL != ra);
+ PR_Lock(ra->lock);
+ return_value = ra->flowControlPause;
+ PR_Unlock(ra->lock);
+ return return_value;
+}
/*
* Warning - reference to the long name of the agreement is returned.
* The long name of an agreement is the DN of the agreement entry,
@@ -1722,6 +1772,90 @@ agmt_set_timeout_from_entry(Repl_Agmt *ra, const Slapi_Entry *e)
return return_value;
}
+/*
+ * Set or reset the windows of entries sent without acknowledgment.
+ * The window is used during update to determine the number of
+ * entries will be send by the replica agreement without acknowledgment from the consumer
+ *
+ * Returns 0 if window set, or -1 if an error occurred.
+ */
+int
+agmt_set_flowcontrolwindow_from_entry(Repl_Agmt *ra, const Slapi_Entry *e)
+{
+ Slapi_Attr *sattr = NULL;
+ int return_value = -1;
+
+ PR_ASSERT(NULL != ra);
+ PR_Lock(ra->lock);
+ if (ra->stop_in_progress)
+ {
+ PR_Unlock(ra->lock);
+ return return_value;
+ }
+
+ slapi_entry_attr_find(e, type_nsds5ReplicaFlowControlWindow, &sattr);
+ if (NULL != sattr)
+ {
+ Slapi_Value *sval = NULL;
+ slapi_attr_first_value(sattr, &sval);
+ if (NULL != sval)
+ {
+ long tmpval = slapi_value_get_long(sval);
+ if (tmpval >= 0) {
+ ra->flowControlWindow = tmpval;
+ return_value = 0; /* success! */
+ }
+ }
+ }
+ PR_Unlock(ra->lock);
+ if (return_value == 0)
+ {
+ prot_notify_agmt_changed(ra->protocol, ra->long_name);
+ }
+ return return_value;
+}
+
+/*
+ * Set or reset the pause duration when #entries sent without acknowledgment overpass flow control window
+ *
+ * Returns 0 if pause set, or -1 if an error occurred.
+ */
+int
+agmt_set_flowcontrolpause_from_entry(Repl_Agmt *ra, const Slapi_Entry *e)
+{
+ Slapi_Attr *sattr = NULL;
+ int return_value = -1;
+
+ PR_ASSERT(NULL != ra);
+ PR_Lock(ra->lock);
+ if (ra->stop_in_progress)
+ {
+ PR_Unlock(ra->lock);
+ return return_value;
+ }
+
+ slapi_entry_attr_find(e, type_nsds5ReplicaFlowControlPause, &sattr);
+ if (NULL != sattr)
+ {
+ Slapi_Value *sval = NULL;
+ slapi_attr_first_value(sattr, &sval);
+ if (NULL != sval)
+ {
+ long tmpval = slapi_value_get_long(sval);
+ if (tmpval >= 0) {
+ ra->flowControlPause = tmpval;
+ return_value = 0; /* success! */
+ }
+ }
+ }
+ PR_Unlock(ra->lock);
+ if (return_value == 0)
+ {
+ prot_notify_agmt_changed(ra->protocol, ra->long_name);
+ }
+ return return_value;
+}
+
int
agmt_set_timeout(Repl_Agmt *ra, long timeout)
{
@@ -1735,6 +1869,32 @@ agmt_set_timeout(Repl_Agmt *ra, long timeout)
return 0;
}
+int
+agmt_set_flowcontrolwindow(Repl_Agmt *ra, long window)
+{
+ PR_Lock(ra->lock);
+ if (ra->stop_in_progress){
+ PR_Unlock(ra->lock);
+ return -1;
+ }
+ ra->flowControlWindow = window;
+ PR_Unlock(ra->lock);
+
+ return 0;
+}
+int
+agmt_set_flowcontrolpause(Repl_Agmt *ra, long pause)
+{
+ PR_Lock(ra->lock);
+ if (ra->stop_in_progress){
+ PR_Unlock(ra->lock);
+ return -1;
+ }
+ ra->flowControlPause = pause;
+ PR_Unlock(ra->lock);
+
+ return 0;
+}
/*
* Set or reset the busywaittime
diff --git a/ldap/servers/plugins/replication/repl5_agmtlist.c b/ldap/servers/plugins/replication/repl5_agmtlist.c
index 81d55c6..f09e2ee 100644
--- a/ldap/servers/plugins/replication/repl5_agmtlist.c
+++ b/ldap/servers/plugins/replication/repl5_agmtlist.c
@@ -327,6 +327,32 @@ agmtlist_modify_callback(Slapi_PBlock *pb, Slapi_Entry *entryBefore, Slapi_Entry
}
}
else if (slapi_attr_types_equivalent(mods[i]->mod_type,
+ type_nsds5ReplicaFlowControlWindow))
+ {
+ /* New replica timeout */
+ if (agmt_set_flowcontrolwindow_from_entry(agmt, e) != 0)
+ {
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name, "agmtlist_modify_callback: "
+ "failed to update the flow control window for agreement %s\n",
+ agmt_get_long_name(agmt));
+ *returncode = LDAP_OPERATIONS_ERROR;
+ rc = SLAPI_DSE_CALLBACK_ERROR;
+ }
+ }
+ else if (slapi_attr_types_equivalent(mods[i]->mod_type,
+ type_nsds5ReplicaFlowControlPause))
+ {
+ /* New replica timeout */
+ if (agmt_set_flowcontrolpause_from_entry(agmt, e) != 0)
+ {
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name, "agmtlist_modify_callback: "
+ "failed to update the flow control pause for agreement %s\n",
+ agmt_get_long_name(agmt));
+ *returncode = LDAP_OPERATIONS_ERROR;
+ rc = SLAPI_DSE_CALLBACK_ERROR;
+ }
+ }
+ else if (slapi_attr_types_equivalent(mods[i]->mod_type,
type_nsds5ReplicaBusyWaitTime))
{
/* New replica busywaittime */
diff --git a/ldap/servers/plugins/replication/repl5_connection.c b/ldap/servers/plugins/replication/repl5_connection.c
index 17d1d9c..918c5ea 100644
--- a/ldap/servers/plugins/replication/repl5_connection.c
+++ b/ldap/servers/plugins/replication/repl5_connection.c
@@ -52,6 +52,7 @@ replica locked. Seems like right thing to do.
*/
#include "repl5.h"
+#include "repl5_prot_private.h"
#if defined(USE_OPENLDAP)
#include "ldap.h"
#else
@@ -90,6 +91,7 @@ typedef struct repl_connection
struct timeval timeout;
int flag_agmt_changed;
char *plain;
+ void *tot_init_callback; /* Used during total update to do flow control */
} repl_connection;
/* #define DEFAULT_LINGER_TIME (5 * 60) */ /* 5 minutes */
@@ -269,6 +271,32 @@ conn_delete(Repl_Connection *conn)
PR_Unlock(conn->lock);
}
+void
+conn_set_tot_update_cb_nolock(Repl_Connection *conn, void *cb_data)
+{
+ conn->tot_init_callback = (void *) cb_data;
+}
+void
+conn_set_tot_update_cb(Repl_Connection *conn, void *cb_data)
+{
+ PR_Lock(conn->lock);
+ conn_set_tot_update_cb_nolock(conn, cb_data);
+ PR_Unlock(conn->lock);
+}
+
+void
+conn_get_tot_update_cb_nolock(Repl_Connection *conn, void **cb_data)
+{
+ *cb_data = (void *) conn->tot_init_callback;
+}
+void
+conn_get_tot_update_cb(Repl_Connection *conn, void **cb_data)
+{
+ PR_Lock(conn->lock);
+ conn_get_tot_update_cb_nolock(conn, cb_data);
+ PR_Unlock(conn->lock);
+}
+
/*
* Return the last operation type processed by the connection
* object, and the LDAP error encountered.
@@ -635,6 +663,131 @@ see_if_write_available(Repl_Connection *conn, PRIntervalTime timeout)
}
#endif /* ! USE_OPENLDAP */
+/*
+ * During a total update, this function checks how much entries
+ * have been sent to the consumer without having received their acknowledgment.
+ * Basically it checks how late is the consumer.
+ *
+ * If the consumer is too late, it pause the RA.sender (releasing the lock) to
+ * let the consumer to catch up and RA.reader to receive the acknowledgments.
+ *
+ * Caller must hold conn->lock
+ */
+static void
+check_flow_control_tot_init(Repl_Connection *conn, int optype, const char *extop_oid, int sent_msgid)
+{
+ int rcv_msgid;
+ int once;
+
+ if ((sent_msgid != 0) && (optype == CONN_EXTENDED_OPERATION) && (strcmp(extop_oid, REPL_NSDS50_REPLICATION_ENTRY_REQUEST_OID) == 0)) {
+ /* We are sending entries part of the total update of a consumer
+ * Wait a bit if the consumer needs to catchup from the current sent entries
+ */
+ rcv_msgid = repl5_tot_last_rcv_msgid(conn);
+ if (rcv_msgid == -1) {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "%s: check_flow_control_tot_init no callback data [ msgid sent: %d]\n",
+ agmt_get_long_name(conn->agmt),
+ sent_msgid);
+ } else if (sent_msgid < rcv_msgid) {
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
+ "%s: check_flow_control_tot_init invalid message ids [ msgid sent: %d, rcv: %d]\n",
+ agmt_get_long_name(conn->agmt),
+ sent_msgid,
+ rcv_msgid);
+ } else if ((sent_msgid - rcv_msgid) > agmt_get_flowcontrolwindow(conn->agmt)) {
+ int totalUpdatePause;
+
+ totalUpdatePause = agmt_get_flowcontrolpause(conn->agmt);
+ if (totalUpdatePause) {
+ /* The consumer is late. Last sent entry compare to last acknowledged entry
+ * overpass the allowed limit (flowcontrolwindow)
+ * Give some time to the consumer to catch up
+ */
+ once = repl5_tot_flowcontrol_detection(conn, 1);
+ PR_Unlock(conn->lock);
+ if (once == 1) {
+ /* This is the first time we hit total update flow control.
+ * Log it at least once to inform administrator there is
+ * a potential configuration issue here
+ */
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "%s: Total update flow control gives time (%d msec) to the consumer before sending more entries [ msgid sent: %d, rcv: %d])\n"
+ "If total update fails you can try to increase %s and/or decrease %s in the replica agreement configuration\n",
+ agmt_get_long_name(conn->agmt),
+ totalUpdatePause,
+ sent_msgid,
+ rcv_msgid,
+ type_nsds5ReplicaFlowControlPause,
+ type_nsds5ReplicaFlowControlWindow);
+ }
+ DS_Sleep(PR_MillisecondsToInterval(totalUpdatePause));
+ PR_Lock(conn->lock);
+ }
+ }
+ }
+
+}
+/*
+ * Test if the connection is available to do a write.
+ * This function is doing a periodic polling of the connection.
+ * If the polling times out:
+ * - it releases the connection lock (to let other thread ,i.e.
+ * replication result thread, the opportunity to use the connection)
+ * - Sleeps for a short period (100ms)
+ * - acquires the connection lock
+ *
+ * It loops until
+ * - it is available
+ * - exceeds RA complete timeout
+ * - server is shutdown
+ * - connection is disconnected (Disable, stop, delete the RA
+ * 'terminate' the replication protocol and disconnect the connection)
+ *
+ * Return:
+ * - CONN_OPERATION_SUCCESS if the connection is available
+ * - CONN_TIMEOUT if the overall polling/sleeping delay exceeds RA timeout
+ * - CONN_NOT_CONNECTED if the replication connection state is disconnected
+ * - other ConnResult
+ *
+ * Caller must hold conn->Lock. At the exit, conn->lock is held
+ */
+static ConnResult
+conn_is_available(Repl_Connection *conn)
+{
+ time_t poll_timeout_sec = 1; /* Polling for 1sec */
+ time_t yield_delay_msec = 100; /* Delay to wait */
+ time_t start_time = time( NULL );
+ time_t time_now;
+ ConnResult return_value = CONN_OPERATION_SUCCESS;
+
+ while (!slapi_is_shutting_down() && (conn->state != STATE_DISCONNECTED)) {
+ return_value = see_if_write_available(conn, PR_SecondsToInterval(poll_timeout_sec));
+ if (return_value == CONN_TIMEOUT) {
+ /* in case of timeout we return CONN_TIMEOUT only
+ * if the RA.timeout is exceeded
+ */
+ time_now = time(NULL);
+ if (conn->timeout.tv_sec <= (time_now - start_time)) {
+ break;
+ } else {
+ /* Else give connection to others threads */
+ PR_Unlock(conn->lock);
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
+ "%s: perform_operation transient timeout. retry)\n",
+ agmt_get_long_name(conn->agmt));
+ DS_Sleep(PR_MillisecondsToInterval(yield_delay_msec));
+ PR_Lock(conn->lock);
+ }
+ } else {
+ break;
+ }
+ }
+ if (conn->state == STATE_DISCONNECTED) {
+ return_value = CONN_NOT_CONNECTED;
+ }
+ return return_value;
+}
/*
* Common code to send an LDAPv3 operation and collect the result.
* Return values:
@@ -678,10 +831,13 @@ perform_operation(Repl_Connection *conn, int optype, const char *dn,
Slapi_Eq_Context eqctx = repl5_start_debug_timeout(&setlevel);
- return_value = see_if_write_available(
- conn, PR_SecondsToInterval(conn->timeout.tv_sec));
+ return_value = conn_is_available(conn);
if (return_value != CONN_OPERATION_SUCCESS) {
PR_Unlock(conn->lock);
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "%s: perform_operation connection is not available (%d)\n",
+ agmt_get_long_name(conn->agmt),
+ return_value);
return return_value;
}
conn->last_operation = optype;
@@ -753,6 +909,9 @@ perform_operation(Repl_Connection *conn, int optype, const char *dn,
*/
return_value = CONN_NOT_CONNECTED;
}
+
+ check_flow_control_tot_init(conn, optype, extop_oid, msgid);
+
PR_Unlock(conn->lock); /* release the lock */
if (message_id)
{
diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c
index 9f81c04..5cf170c 100644
--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c
@@ -108,6 +108,7 @@ typedef struct result_data
int stop_result_thread; /* Flag used to tell the result thread to exit */
int last_message_id_sent;
int last_message_id_received;
+ int flowcontrol_detection;
int result; /* The UPDATE_TRANSIENT_ERROR etc */
} result_data;
@@ -460,6 +461,23 @@ repl5_inc_destroy_async_result_thread(result_data *rd)
return retval;
}
+/* The interest of this routine is to give time to the consumer
+ * to apply the sent updates and return the acks.
+ * So the caller should not hold the replication connection lock
+ * to let the RA.reader receives the acks.
+ */
+static void
+repl5_inc_flow_control_results(Repl_Agmt *agmt, result_data *rd)
+{
+ PR_Lock(rd->lock);
+ if ((rd->last_message_id_received <= rd->last_message_id_sent) &&
+ ((rd->last_message_id_sent - rd->last_message_id_received) >= agmt_get_flowcontrolwindow(agmt))) {
+ rd->flowcontrol_detection++;
+ DS_Sleep(PR_MillisecondsToInterval(agmt_get_flowcontrolpause(agmt)));
+ }
+ PR_Unlock(rd->lock);
+}
+
static void
repl5_inc_waitfor_async_results(result_data *rd)
{
@@ -1682,7 +1700,7 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
{
int finished = 0;
ConnResult replay_crc;
- char csn_str[CSN_STRSIZE];
+ char csn_str[CSN_STRSIZE];
/* Start the results reading thread */
rd = repl5_inc_rd_new(prp);
@@ -1817,6 +1835,7 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
sop->replica_id = replica_id;
PL_strncpyz(sop->uniqueid, uniqueid, sizeof(sop->uniqueid));
repl5_int_push_operation(rd,sop);
+ repl5_inc_flow_control_results(prp->agmt, rd);
} else {
slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
"%s: Skipping update operation with no message_id (uniqueid %s, CSN %s):\n",
@@ -1905,6 +1924,17 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
}
*num_changes_sent = rd->num_changes_sent;
}
+ PR_Lock(rd->lock);
+ if (rd->flowcontrol_detection) {
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
+ "%s: Incremental update flow control triggered %d times\n"
+ "You may increase %s and/or decrease %s in the replica agreement configuration\n",
+ agmt_get_long_name(prp->agmt),
+ rd->flowcontrol_detection,
+ type_nsds5ReplicaFlowControlPause,
+ type_nsds5ReplicaFlowControlWindow);
+ }
+ PR_Unlock(rd->lock);
repl5_inc_rd_destroy(&rd);
cl5_operation_parameters_done ( entry.op );
diff --git a/ldap/servers/plugins/replication/repl5_prot_private.h b/ldap/servers/plugins/replication/repl5_prot_private.h
index 586e1eb..1b1c00b 100644
--- a/ldap/servers/plugins/replication/repl5_prot_private.h
+++ b/ldap/servers/plugins/replication/repl5_prot_private.h
@@ -79,6 +79,8 @@ typedef struct private_repl_protocol
extern Private_Repl_Protocol *Repl_5_Inc_Protocol_new();
extern Private_Repl_Protocol *Repl_5_Tot_Protocol_new();
+extern int repl5_tot_last_rcv_msgid(Repl_Connection *conn);
+extern int repl5_tot_flowcontrol_detection(Repl_Connection *conn, int increment);
extern Private_Repl_Protocol *Windows_Inc_Protocol_new();
extern Private_Repl_Protocol *Windows_Tot_Protocol_new();
diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c
index 2db5178..8ed46e8 100644
--- a/ldap/servers/plugins/replication/repl5_tot_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c
@@ -82,6 +82,7 @@ typedef struct callback_data
int stop_result_thread; /* Flag used to tell the result thread to exit */
int last_message_id_sent;
int last_message_id_received;
+ int flowcontrol_detection;
} callback_data;
/*
@@ -419,13 +420,19 @@ repl5_tot_run(Private_Repl_Protocol *prp)
LDAP_SCOPE_SUBTREE, "(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))", NULL, 0, ctrls, NULL,
repl_get_plugin_identity (PLUGIN_MULTIMASTER_REPLICATION), 0);
- cb_data.prp = prp;
- cb_data.rc = 0;
+ cb_data.prp = prp;
+ cb_data.rc = 0;
cb_data.num_entries = 0UL;
cb_data.sleep_on_busy = 0UL;
cb_data.last_busy = current_time ();
+ cb_data.flowcontrol_detection = 0;
cb_data.lock = PR_NewLock();
+ /* This allows during perform_operation to check the callback data
+ * especially to do flow contol on delta send msgid / recv msgid
+ */
+ conn_set_tot_update_cb(prp->conn, (void *) &cb_data);
+
/* Before we get started on sending entries to the replica, we need to
* setup things for async propagation:
* 1. Create a thread that will read the LDAP results from the connection.
@@ -495,6 +502,17 @@ repl5_tot_run(Private_Repl_Protocol *prp)
done:
slapi_sdn_free(&area_sdn);
slapi_ch_free_string(&hostname);
+ if (cb_data.flowcontrol_detection > 1)
+ {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "%s: Total update flow control triggered %d times\n"
+ "You may increase %s and/or decrease %s in the replica agreement configuration\n",
+ agmt_get_long_name(prp->agmt),
+ cb_data.flowcontrol_detection,
+ type_nsds5ReplicaFlowControlPause,
+ type_nsds5ReplicaFlowControlWindow);
+ }
+ conn_set_tot_update_cb(prp->conn, NULL);
if (cb_data.lock)
{
PR_DestroyLock(cb_data.lock);
@@ -634,6 +652,37 @@ void get_result (int rc, void *cb_data)
((callback_data*)cb_data)->rc = rc;
}
+/* Call must hold the connection lock */
+int
+repl5_tot_last_rcv_msgid(Repl_Connection *conn)
+{
+ struct callback_data *cb_data;
+
+ conn_get_tot_update_cb_nolock(conn, (void **) &cb_data);
+ if (cb_data == NULL) {
+ return -1;
+ } else {
+ return cb_data->last_message_id_received;
+ }
+}
+
+/* Increase the flowcontrol counter
+ * Call must hold the connection lock
+ */
+int
+repl5_tot_flowcontrol_detection(Repl_Connection *conn, int increment)
+{
+ struct callback_data *cb_data;
+
+ conn_get_tot_update_cb_nolock(conn, (void **) &cb_data);
+ if (cb_data == NULL) {
+ return -1;
+ } else {
+ cb_data->flowcontrol_detection += increment;
+ return cb_data->flowcontrol_detection;
+ }
+}
+
static
int send_entry (Slapi_Entry *e, void *cb_data)
{
diff --git a/ldap/servers/plugins/replication/repl_globals.c b/ldap/servers/plugins/replication/repl_globals.c
index 305ed25..0677f6c 100644
--- a/ldap/servers/plugins/replication/repl_globals.c
+++ b/ldap/servers/plugins/replication/repl_globals.c
@@ -133,6 +133,8 @@ const char *type_nsds5ReplicaBusyWaitTime = "nsds5ReplicaBusyWaitTime";
const char *type_nsds5ReplicaSessionPauseTime = "nsds5ReplicaSessionPauseTime";
const char *type_nsds5ReplicaEnabled = "nsds5ReplicaEnabled";
const char *type_nsds5ReplicaStripAttrs = "nsds5ReplicaStripAttrs";
+const char* type_nsds5ReplicaFlowControlWindow = "nsds5ReplicaFlowControlWindow";
+const char* type_nsds5ReplicaFlowControlPause = "nsds5ReplicaFlowControlPause";
/* windows sync specific attributes */
const char *type_nsds7WindowsReplicaArea = "nsds7WindowsReplicaSubtree";
8 years, 5 months
Branch '389-ds-base-1.2.11' - ldap/ldif ldap/servers
by Mark Reynolds
ldap/ldif/template-dse.ldif.in | 3
ldap/servers/slapd/attrsyntax.c | 23 -
ldap/servers/slapd/back-ldbm/monitor.c | 26 +-
ldap/servers/slapd/dn.c | 401 +++++++++++++++++++++++++++++++++
ldap/servers/slapd/libglobs.c | 76 ++++++
ldap/servers/slapd/main.c | 4
ldap/servers/slapd/proto-slap.h | 12
ldap/servers/slapd/schema.c | 17 -
ldap/servers/slapd/slap.h | 10
ldap/servers/slapd/slapi-private.h | 8
10 files changed, 532 insertions(+), 48 deletions(-)
New commits:
commit 2a8da7ea76d15906fdb98b47534fc3447f12c752
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Fri Dec 19 16:36:11 2014 -0500
Ticket 408 - Backport of Normalized DN Cache
https://fedorahosted.org/389/ticket/408
diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in
index ddf2b35..c626726 100644
--- a/ldap/ldif/template-dse.ldif.in
+++ b/ldap/ldif/template-dse.ldif.in
@@ -53,9 +53,10 @@ nsslapd-auditlog-maxlogsize: 100
nsslapd-auditlog-logrotationtime: 1
nsslapd-auditlog-logrotationtimeunit: day
nsslapd-rootdn: %rootdn%
+nsslapd-rootpw: %ds_passwd%
nsslapd-maxdescriptors: 1024
nsslapd-max-filter-nest-level: 40
-nsslapd-rootpw: %ds_passwd%
+nsslapd-ndn-cache-enabled: off
dn: cn=features,cn=config
objectclass: top
diff --git a/ldap/servers/slapd/attrsyntax.c b/ldap/servers/slapd/attrsyntax.c
index 79736b5..75114e0 100644
--- a/ldap/servers/slapd/attrsyntax.c
+++ b/ldap/servers/slapd/attrsyntax.c
@@ -219,29 +219,6 @@ attr_syntax_new()
}
/*
- * hashNocaseString - used for case insensitive hash lookups
- */
-static PLHashNumber
-hashNocaseString(const void *key)
-{
- PLHashNumber h = 0;
- const unsigned char *s;
-
- for (s = key; *s; s++)
- h = (h >> 28) ^ (h << 4) ^ (tolower(*s));
- return h;
-}
-
-/*
- * hashNocaseCompare - used for case insensitive hash key comparisons
- */
-static PRIntn
-hashNocaseCompare(const void *v1, const void *v2)
-{
- return (strcasecmp((char *)v1, (char *)v2) == 0);
-}
-
-/*
* Given an OID, return the syntax info. If there is more than one
* attribute syntax with the same OID (i.e. aliases), the first one
* will be returned. This is usually the "canonical" one, but it may
diff --git a/ldap/servers/slapd/back-ldbm/monitor.c b/ldap/servers/slapd/back-ldbm/monitor.c
index e3e1fb5..52a8ace 100644
--- a/ldap/servers/slapd/back-ldbm/monitor.c
+++ b/ldap/servers/slapd/back-ldbm/monitor.c
@@ -70,8 +70,8 @@ int ldbm_back_monitor_instance_search(Slapi_PBlock *pb, Slapi_Entry *e,
struct berval *vals[2];
char buf[BUFSIZ];
PRUint64 hits, tries;
- long nentries,maxentries;
- size_t size,maxsize;
+ long nentries, maxentries, count;
+ size_t size, maxsize;
/* NPCTE fix for bugid 544365, esc 0. <P.R> <04-Jul-2001> */
struct stat astat;
/* end of NPCTE fix for bugid 544365 */
@@ -145,6 +145,28 @@ int ldbm_back_monitor_instance_search(Slapi_PBlock *pb, Slapi_Entry *e,
sprintf(buf, "%ld", maxentries);
MSET("maxDnCacheCount");
}
+ /* normalized dn cache stats */
+ if(ndn_cache_started()){
+ ndn_cache_get_stats(&hits, &tries, &size, &maxsize, &count);
+ sprintf(buf, "%" NSPRIu64, tries);
+ MSET("normalizedDnCacheTries");
+ sprintf(buf, "%" NSPRIu64, hits);
+ MSET("normalizedDnCacheHits");
+ sprintf(buf, "%" NSPRIu64, tries - hits);
+ MSET("normalizedDnCacheMisses");
+ sprintf(buf, "%lu", (unsigned long)(100.0*(double)hits / (double)(tries > 0 ? tries : 1)));
+ MSET("normalizedDnCacheHitRatio");
+ sprintf(buf, "%lu", size);
+ MSET("currentNormalizedDnCacheSize");
+ if(maxsize == 0){
+ sprintf(buf, "%d", -1);
+ } else {
+ sprintf(buf, "%lu", maxsize);
+ }
+ MSET("maxNormalizedDnCacheSize");
+ sprintf(buf, "%ld", count);
+ MSET("currentNormalizedDnCacheCount");
+ }
#ifdef DEBUG
{
diff --git a/ldap/servers/slapd/dn.c b/ldap/servers/slapd/dn.c
index 804d56e..283f265 100644
--- a/ldap/servers/slapd/dn.c
+++ b/ldap/servers/slapd/dn.c
@@ -51,6 +51,7 @@
#include <sys/socket.h>
#endif
#include "slap.h"
+#include <plhash.h>
#undef SDN_DEBUG
@@ -61,6 +62,53 @@ static void sort_rdn_avs( struct berval *avs, int count, int escape );
static int rdn_av_cmp( struct berval *av1, struct berval *av2 );
static void rdn_av_swap( struct berval *av1, struct berval *av2, int escape );
+/* normalized dn cache related definitions*/
+struct
+ndn_cache_lru
+{
+ struct ndn_cache_lru *prev;
+ struct ndn_cache_lru *next;
+ char *key;
+};
+
+struct
+ndn_cache_ctx
+{
+ struct ndn_cache_lru *head;
+ struct ndn_cache_lru *tail;
+ Slapi_Counter *cache_hits;
+ Slapi_Counter *cache_tries;
+ Slapi_Counter *cache_misses;
+ size_t cache_size;
+ size_t cache_max_size;
+ long cache_count;
+};
+
+struct
+ndn_hash_val
+{
+ char *ndn;
+ size_t len;
+ int size;
+ struct ndn_cache_lru *lru_node; /* used to speed up lru shuffling */
+};
+
+#define NDN_FLUSH_COUNT 10000 /* number of DN's to remove when cache fills up */
+#define NDN_MIN_COUNT 1000 /* the minimum number of DN's to keep in the cache */
+#define NDN_CACHE_BUCKETS 2053 /* prime number */
+
+static PLHashNumber ndn_hash_string(const void *key);
+static int ndn_cache_lookup(char *dn, size_t dn_len, char **result, char **udn, int *rc);
+static void ndn_cache_update_lru(struct ndn_cache_lru **node);
+static void ndn_cache_add(char *dn, size_t dn_len, char *ndn, size_t ndn_len);
+static void ndn_cache_delete(char *dn);
+static void ndn_cache_flush();
+static void ndn_cache_free();
+static int ndn_started = 0;
+static PRLock *lru_lock = NULL;
+static Slapi_RWLock *ndn_cache_lock = NULL;
+static struct ndn_cache_ctx *ndn_cache = NULL;
+static PLHashTable *ndn_cache_hashtable = NULL;
#define ISBLANK(c) ((c) == ' ')
#define ISBLANKSTR(s) (((*(s)) == '2') && (*((s)+1) == '0'))
@@ -487,6 +535,7 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len)
char *ends = NULL;
char *endd = NULL;
char *lastesc = NULL;
+ char *udn;
/* rdn avs for the main DN */
char *typestart = NULL;
int rdn_av_count = 0;
@@ -511,6 +560,14 @@ slapi_dn_normalize_ext(char *src, size_t src_len, char **dest, size_t *dest_len)
if (0 == src_len) {
src_len = strlen(src);
}
+ /*
+ * Check the normalized dn cache
+ */
+ if(ndn_cache_lookup(src, src_len, dest, &udn, &rc)){
+ *dest_len = strlen(*dest);
+ return rc;
+ }
+
s = PL_strnchr(src, '\\', src_len);
if (s) {
*dest_len = src_len * 3;
@@ -1072,6 +1129,10 @@ bail:
/* We terminate the str with NULL only when we allocate the str */
*d = '\0';
}
+ /* add this dn to the normalized dn cache */
+ if(*dest)
+ ndn_cache_add(udn, src_len, *dest, *dest_len);
+
return rc;
}
@@ -2622,3 +2683,343 @@ slapi_sdn_get_size(const Slapi_DN *sdn)
return sz;
}
+/*
+ *
+ * Normalized DN Cache
+ *
+ */
+
+/*
+ * Hashing function using Bernstein's method
+ */
+static PLHashNumber
+ndn_hash_string(const void *key)
+{
+ PLHashNumber hash = 5381;
+ unsigned char *x = (unsigned char *)key;
+ int c;
+
+ while ((c = *x++)){
+ hash = ((hash << 5) + hash) ^ c;
+ }
+ return hash;
+}
+
+void
+ndn_cache_init()
+{
+ if(!config_get_ndn_cache_enabled() || ndn_started){
+ return;
+ }
+ ndn_cache_hashtable = PL_NewHashTable( NDN_CACHE_BUCKETS, ndn_hash_string, PL_CompareStrings, PL_CompareValues, 0, 0);
+ ndn_cache = (struct ndn_cache_ctx *)slapi_ch_malloc(sizeof(struct ndn_cache_ctx));
+ ndn_cache->cache_max_size = config_get_ndn_cache_size();
+ ndn_cache->cache_hits = slapi_counter_new();
+ ndn_cache->cache_tries = slapi_counter_new();
+ ndn_cache->cache_misses = slapi_counter_new();
+ ndn_cache->cache_count = 0;
+ ndn_cache->cache_size = sizeof(struct ndn_cache_ctx) + sizeof(PLHashTable) + sizeof(PLHashTable);
+ ndn_cache->head = NULL;
+ ndn_cache->tail = NULL;
+ ndn_started = 1;
+ if ( NULL == ( lru_lock = PR_NewLock()) || NULL == ( ndn_cache_lock = slapi_new_rwlock())) {
+ ndn_cache_destroy();
+ slapi_log_error( SLAPI_LOG_FATAL, "ndn_cache_init", "Failed to create locks. Disabling cache.\n" );
+ }
+}
+
+void
+ndn_cache_destroy()
+{
+ char *errorbuf = NULL;
+
+ if(!ndn_started){
+ return;
+ }
+ if(lru_lock){
+ PR_DestroyLock(lru_lock);
+ lru_lock = NULL;
+ }
+ if(ndn_cache_lock){
+ slapi_destroy_rwlock(ndn_cache_lock);
+ ndn_cache_lock = NULL;
+ }
+ if(ndn_cache_hashtable){
+ ndn_cache_free();
+ PL_HashTableDestroy(ndn_cache_hashtable);
+ ndn_cache_hashtable = NULL;
+ }
+ config_set_ndn_cache_enabled(CONFIG_NDN_CACHE, "off", errorbuf, 1 );
+ slapi_counter_destroy(&ndn_cache->cache_hits);
+ slapi_counter_destroy(&ndn_cache->cache_tries);
+ slapi_counter_destroy(&ndn_cache->cache_misses);
+ slapi_ch_free((void **)&ndn_cache);
+
+ ndn_started = 0;
+}
+
+int
+ndn_cache_started()
+{
+ return ndn_started;
+}
+
+/*
+ * Look up this dn in the ndn cache
+ */
+static int
+ndn_cache_lookup(char *dn, size_t dn_len, char **result, char **udn, int *rc)
+{
+ struct ndn_hash_val *ndn_ht_val = NULL;
+ char *ndn, *key;
+ int rv = 0;
+
+ if(ndn_started == 0){
+ return rv;
+ }
+ if(dn_len == 0){
+ *result = dn;
+ *rc = 0;
+ return 1;
+ }
+ slapi_counter_increment(ndn_cache->cache_tries);
+ slapi_rwlock_rdlock(ndn_cache_lock);
+ ndn_ht_val = (struct ndn_hash_val *)PL_HashTableLookupConst(ndn_cache_hashtable, dn);
+ if(ndn_ht_val){
+ ndn_cache_update_lru(&ndn_ht_val->lru_node);
+ slapi_counter_increment(ndn_cache->cache_hits);
+ if(ndn_ht_val->len == dn_len ){
+ /* the dn was already normalized, just return the dn as the result */
+ *result = dn;
+ *rc = 0;
+ } else {
+ *rc = 1; /* free result */
+ ndn = slapi_ch_malloc(ndn_ht_val->len + 1);
+ memcpy(ndn, ndn_ht_val->ndn, ndn_ht_val->len);
+ ndn[ndn_ht_val->len] = '\0';
+ *result = ndn;
+ }
+ rv = 1;
+ } else {
+ /* copy/preserve the udn, so we can use it as the key when we add dn's to the hashtable */
+ key = slapi_ch_malloc(dn_len + 1);
+ memcpy(key, dn, dn_len);
+ key[dn_len] = '\0';
+ *udn = key;
+ }
+ slapi_rwlock_unlock(ndn_cache_lock);
+
+ return rv;
+}
+
+/*
+ * Move this lru node to the top of the list
+ */
+static void
+ndn_cache_update_lru(struct ndn_cache_lru **node)
+{
+ struct ndn_cache_lru *prev, *next, *curr_node = *node;
+
+ if(curr_node == NULL){
+ return;
+ }
+ PR_Lock(lru_lock);
+ if(curr_node->prev == NULL){
+ /* already the top node */
+ PR_Unlock(lru_lock);
+ return;
+ }
+ prev = curr_node->prev;
+ next = curr_node->next;
+ if(next){
+ next->prev = prev;
+ prev->next = next;
+ } else {
+ /* this was the tail, so reset the tail */
+ ndn_cache->tail = prev;
+ prev->next = NULL;
+ }
+ curr_node->prev = NULL;
+ curr_node->next = ndn_cache->head;
+ ndn_cache->head->prev = curr_node;
+ ndn_cache->head = curr_node;
+ PR_Unlock(lru_lock);
+}
+
+/*
+ * Add a ndn to the cache. Try and do as much as possible before taking the write lock.
+ */
+static void
+ndn_cache_add(char *dn, size_t dn_len, char *ndn, size_t ndn_len)
+{
+ struct ndn_hash_val *ht_entry;
+ struct ndn_cache_lru *new_node = NULL;
+ PLHashEntry *he;
+ int size;
+
+ if(ndn_started == 0 || dn_len == 0){
+ return;
+ }
+ if(strlen(ndn) > ndn_len){
+ /* we need to null terminate the ndn */
+ *(ndn + ndn_len) = '\0';
+ }
+ /*
+ * Calculate the approximate memory footprint of the hash entry, key, and lru entry.
+ */
+ size = (dn_len * 2) + ndn_len + sizeof(PLHashEntry) + sizeof(struct ndn_hash_val) + sizeof(struct ndn_cache_lru);
+ /*
+ * Create our LRU node
+ */
+ new_node = (struct ndn_cache_lru *)slapi_ch_malloc(sizeof(struct ndn_cache_lru));
+ if(new_node == NULL){
+ slapi_log_error( SLAPI_LOG_FATAL, "ndn_cache_add", "Failed to allocate new lru node.\n");
+ return;
+ }
+ new_node->prev = NULL;
+ new_node->key = dn; /* dn has already been allocated */
+ /*
+ * Its possible this dn was added to the hash by another thread.
+ */
+ slapi_rwlock_wrlock(ndn_cache_lock);
+ ht_entry = (struct ndn_hash_val *)PL_HashTableLookupConst(ndn_cache_hashtable, dn);
+ if(ht_entry){
+ /* already exists, free the node and return */
+ slapi_rwlock_unlock(ndn_cache_lock);
+ slapi_ch_free_string(&new_node->key);
+ slapi_ch_free((void **)&new_node);
+ return;
+ }
+ /*
+ * Create the hash entry
+ */
+ ht_entry = (struct ndn_hash_val *)slapi_ch_malloc(sizeof(struct ndn_hash_val));
+ if(ht_entry == NULL){
+ slapi_rwlock_unlock(ndn_cache_lock);
+ slapi_log_error( SLAPI_LOG_FATAL, "ndn_cache_add", "Failed to allocate new hash entry.\n");
+ slapi_ch_free_string(&new_node->key);
+ slapi_ch_free((void **)&new_node);
+ return;
+ }
+ ht_entry->ndn = slapi_ch_malloc(ndn_len + 1);
+ memcpy(ht_entry->ndn, ndn, ndn_len);
+ ht_entry->ndn[ndn_len] = '\0';
+ ht_entry->len = ndn_len;
+ ht_entry->size = size;
+ ht_entry->lru_node = new_node;
+ /*
+ * Check if our cache is full
+ */
+ PR_Lock(lru_lock); /* grab the lru lock now, as ndn_cache_flush needs it */
+ if(ndn_cache->cache_max_size != 0 && ((ndn_cache->cache_size + size) > ndn_cache->cache_max_size)){
+ ndn_cache_flush();
+ }
+ /*
+ * Set the ndn cache lru nodes
+ */
+ if(ndn_cache->head == NULL && ndn_cache->tail == NULL){
+ /* this is the first node */
+ ndn_cache->head = new_node;
+ ndn_cache->tail = new_node;
+ new_node->next = NULL;
+ } else {
+ new_node->next = ndn_cache->head;
+ ndn_cache->head->prev = new_node;
+ }
+ ndn_cache->head = new_node;
+ PR_Unlock(lru_lock);
+ /*
+ * Add the new object to the hashtable, and update our stats
+ */
+ he = PL_HashTableAdd(ndn_cache_hashtable, new_node->key, (void *)ht_entry);
+ if(he == NULL){
+ slapi_log_error( SLAPI_LOG_FATAL, "ndn_cache_add", "Failed to add new entry to hash(%s)\n",dn);
+ } else {
+ ndn_cache->cache_count++;
+ ndn_cache->cache_size += size;
+ }
+ slapi_rwlock_unlock(ndn_cache_lock);
+}
+
+/*
+ * cache is full, remove the least used dn's. lru_lock/ndn_cache write lock are already taken
+ */
+static void
+ndn_cache_flush()
+{
+ struct ndn_cache_lru *node, *next, *flush_node;
+ int i;
+
+ node = ndn_cache->tail;
+ for(i = 0; node && i < NDN_FLUSH_COUNT && ndn_cache->cache_count > NDN_MIN_COUNT; i++){
+ flush_node = node;
+ /* update the lru */
+ next = node->prev;
+ next->next = NULL;
+ ndn_cache->tail = next;
+ node = next;
+ /* now update the hash */
+ ndn_cache->cache_count--;
+ ndn_cache_delete(flush_node->key);
+ slapi_ch_free_string(&flush_node->key);
+ slapi_ch_free((void **)&flush_node);
+ }
+
+ slapi_log_error( SLAPI_LOG_CACHE, "ndn_cache_flush","Flushed cache.\n");
+}
+
+static void
+ndn_cache_free()
+{
+ struct ndn_cache_lru *node, *next, *flush_node;
+
+ if(!ndn_cache){
+ return;
+ }
+
+ node = ndn_cache->tail;
+ while(node && ndn_cache->cache_count){
+ flush_node = node;
+ /* update the lru */
+ next = node->prev;
+ if(next){
+ next->next = NULL;
+ }
+ ndn_cache->tail = next;
+ node = next;
+ /* now update the hash */
+ ndn_cache->cache_count--;
+ ndn_cache_delete(flush_node->key);
+ slapi_ch_free_string(&flush_node->key);
+ slapi_ch_free((void **)&flush_node);
+ }
+}
+
+/* this is already "write" locked from ndn_cache_add */
+static void
+ndn_cache_delete(char *dn)
+{
+ struct ndn_hash_val *ht_entry;
+
+ ht_entry = (struct ndn_hash_val *)PL_HashTableLookupConst(ndn_cache_hashtable, dn);
+ if(ht_entry){
+ ndn_cache->cache_size -= ht_entry->size;
+ slapi_ch_free_string(&ht_entry->ndn);
+ slapi_ch_free((void **)&ht_entry);
+ PL_HashTableRemove(ndn_cache_hashtable, dn);
+ }
+}
+
+/* stats for monitor */
+void
+ndn_cache_get_stats(PRUint64 *hits, PRUint64 *tries, size_t *size, size_t *max_size, long *count)
+{
+ slapi_rwlock_rdlock(ndn_cache_lock);
+ *hits = slapi_counter_get_value(ndn_cache->cache_hits);
+ *tries = slapi_counter_get_value(ndn_cache->cache_tries);
+ *size = ndn_cache->cache_size;
+ *max_size = ndn_cache->cache_max_size;
+ *count = ndn_cache->cache_count;
+ slapi_rwlock_unlock(ndn_cache_lock);
+}
+
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index 79ca2bd..3c0c9f4 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -1035,7 +1035,15 @@ static struct config_get_and_set {
{CONFIG_IGNORE_TIME_SKEW, config_set_ignore_time_skew,
NULL, 0,
(void**)&global_slapdFrontendConfig.ignore_time_skew,
- CONFIG_ON_OFF, (ConfigGetFunc)config_get_ignore_time_skew, &init_ignore_time_skew}
+ CONFIG_ON_OFF, (ConfigGetFunc)config_get_ignore_time_skew, &init_ignore_time_skew},
+ {CONFIG_NDN_CACHE, config_set_ndn_cache_enabled,
+ NULL, 0,
+ (void**)&global_slapdFrontendConfig.ndn_cache_enabled, CONFIG_INT,
+ (ConfigGetFunc)config_get_ndn_cache_enabled},
+ {CONFIG_NDN_CACHE_SIZE, config_set_ndn_cache_max_size,
+ NULL, 0,
+ (void**)&global_slapdFrontendConfig.ndn_cache_max_size,
+ CONFIG_INT, (ConfigGetFunc)config_get_ndn_cache_size},
#ifdef MEMPOOL_EXPERIMENTAL
,{CONFIG_MEMPOOL_SWITCH_ATTRIBUTE, config_set_mempool_switch,
NULL, 0,
@@ -1053,7 +1061,7 @@ static struct config_get_and_set {
/*
* hashNocaseString - used for case insensitive hash lookups
*/
-static PLHashNumber
+PLHashNumber
hashNocaseString(const void *key)
{
PLHashNumber h = 0;
@@ -1067,7 +1075,7 @@ hashNocaseString(const void *key)
/*
* hashNocaseCompare - used for case insensitive hash key comparisons
*/
-static PRIntn
+PRIntn
hashNocaseCompare(const void *v1, const void *v2)
{
return (strcasecmp((char *)v1, (char *)v2) == 0);
@@ -1463,6 +1471,11 @@ FrontendConfig_init () {
init_malloc_mmap_threshold = cfg->malloc_mmap_threshold = DEFAULT_MALLOC_UNSET;
#endif
+ cfg->disk_logging_critical = LDAP_OFF;
+ cfg->ndn_cache_enabled = LDAP_OFF;
+ cfg->ndn_cache_max_size = NDN_DEFAULT_SIZE;
+
+
#ifdef MEMPOOL_EXPERIMENTAL
init_mempool_switch = cfg->mempool_switch = LDAP_ON;
cfg->mempool_maxfreelist = 1024;
@@ -1694,6 +1707,42 @@ config_set_sasl_maxbufsize(const char *attrname, char *value, char *errorbuf, in
return retVal;
}
+int
+config_set_ndn_cache_enabled(const char *attrname, char *value, char *errorbuf, int apply )
+{
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+ int retVal;
+
+ retVal = config_set_onoff ( attrname, value, &(slapdFrontendConfig->ndn_cache_enabled), errorbuf, apply);
+
+ return retVal;
+}
+
+int
+config_set_ndn_cache_max_size(const char *attrname, char *value, char *errorbuf, int apply )
+{
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+ long size;
+ int retVal = LDAP_SUCCESS;
+
+ size = atol(value);
+ if(size < 0){
+ size = 0; /* same as -1 */
+ }
+ if(size > 0 && size < 1024000){
+ PR_snprintf ( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "ndn_cache_max_size too low(%d), changing to "
+ "%d bytes.\n",(int)size, NDN_DEFAULT_SIZE);
+ size = NDN_DEFAULT_SIZE;
+ }
+ if(apply){
+ CFG_LOCK_WRITE(slapdFrontendConfig);
+ slapdFrontendConfig->ndn_cache_max_size = size;
+ CFG_UNLOCK_WRITE(slapdFrontendConfig);
+ }
+
+ return retVal;
+}
+
int
config_set_port( const char *attrname, char *port, char *errorbuf, int apply ) {
long nPort;
@@ -5626,6 +5675,27 @@ config_get_max_filter_nest_level()
return retVal;
}
+size_t
+config_get_ndn_cache_size(){
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+ size_t retVal;
+
+ CFG_LOCK_READ(slapdFrontendConfig);
+ retVal = slapdFrontendConfig->ndn_cache_max_size;
+ CFG_UNLOCK_READ(slapdFrontendConfig);
+ return retVal;
+}
+
+int
+config_get_ndn_cache_enabled(){
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+ int retVal;
+
+ CFG_LOCK_READ(slapdFrontendConfig);
+ retVal = slapdFrontendConfig->ndn_cache_enabled;
+ CFG_UNLOCK_READ(slapdFrontendConfig);
+ return retVal;
+}
char *
config_get_basedn() {
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index bc07cbb..5d3e7e7 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -1046,6 +1046,9 @@ main( int argc, char **argv)
}
}
+ /* initialize the normalized DN cache */
+ ndn_cache_init();
+
/*
* Detach ourselves from the terminal (unless running in debug mode).
* We must detach before we start any threads since detach forks() on
@@ -1267,6 +1270,7 @@ main( int argc, char **argv)
cleanup:
SSL_ShutdownServerSessionIDCache();
SSL_ClearSessionCache();
+ ndn_cache_destroy();
NSS_Shutdown();
PR_Cleanup();
#ifdef _WIN32
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index ce09260..0891608 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -389,6 +389,7 @@ int config_set_disk_threshold( const char *attrname, char *value, char *errorbuf
int config_set_disk_grace_period( const char *attrname, char *value, char *errorbuf, int apply );
int config_set_disk_logging_critical( const char *attrname, char *value, char *errorbuf, int apply );
int config_set_auditlog_unhashed_pw(const char *attrname, char *value, char *errorbuf, int apply);
+
int config_set_sasl_maxbufsize(const char *attrname, char *value, char *errorbuf, int apply );
int config_set_listen_backlog_size(const char *attrname, char *value, char *errorbuf, int apply);
int config_set_ignore_time_skew(const char *attrname, char *value, char *errorbuf, int apply);
@@ -398,6 +399,10 @@ int config_set_malloc_trim_threshold(const char *attrname, char *value, char *er
int config_set_malloc_mmap_threshold(const char *attrname, char *value, char *errorbuf, int apply);
#endif
+int config_set_ndn_cache_enabled(const char *attrname, char *value, char *errorbuf, int apply);
+int config_set_ndn_cache_max_size(const char *attrname, char *value, char *errorbuf, int apply);
+
+
#if !defined(_WIN32) && !defined(AIX)
int config_set_maxdescriptors( const char *attrname, char *value, char *errorbuf, int apply );
#endif /* !_WIN_32 && !AIX */
@@ -562,6 +567,13 @@ int config_get_malloc_trim_threshold();
int config_get_malloc_mmap_threshold();
#endif
+int config_get_ndn_cache_count();
+size_t config_get_ndn_cache_size();
+int config_get_ndn_cache_enabled();
+PLHashNumber hashNocaseString(const void *key);
+PRIntn hashNocaseCompare(const void *v1, const void *v2);
+
+
int is_abspath(const char *);
char* rel2abspath( char * );
char* rel2abspath_ext( char *, char * );
diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c
index 28c1ffc..18ae152 100644
--- a/ldap/servers/slapd/schema.c
+++ b/ldap/servers/slapd/schema.c
@@ -250,22 +250,6 @@ dont_allow_that(Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry* e, int
return SLAPI_DSE_CALLBACK_ERROR;
}
-#if 0
-/*
- * hashNocaseString - used for case insensitive hash lookups
- */
-static PLHashNumber
-hashNocaseString(const void *key)
-{
- PLHashNumber h = 0;
- const unsigned char *s;
-
- for (s = key; *s; s++)
- h = (h >> 28) ^ (h << 4) ^ (tolower(*s));
- return h;
-}
-#endif
-
static const char *
skipWS(const char *s)
{
@@ -278,7 +262,6 @@ skipWS(const char *s)
return s;
}
-
/*
* like strchr() but strings within single quotes are skipped.
*/
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 33cfeb4..70e8a51 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -2015,6 +2015,7 @@ typedef struct _slapdEntryPoints {
#define CONFIG_DISK_THRESHOLD "nsslapd-disk-monitoring-threshold"
#define CONFIG_DISK_GRACE_PERIOD "nsslapd-disk-monitoring-grace-period"
#define CONFIG_DISK_LOGGING_CRITICAL "nsslapd-disk-monitoring-logging-critical"
+
#define CONFIG_SASL_MAXBUFSIZE "nsslapd-sasl-max-buffer-size"
#define CONFIG_LISTEN_BACKLOG_SIZE "nsslapd-listen-backlog-size"
#define CONFIG_IGNORE_TIME_SKEW "nsslapd-ignore-time-skew"
@@ -2035,6 +2036,10 @@ typedef struct _slapdEntryPoints {
#define DAEMON_LISTEN_SIZE 128
#endif
+#define CONFIG_NDN_CACHE "nsslapd-ndn-cache-enabled"
+#define CONFIG_NDN_CACHE_SIZE "nsslapd-ndn-cache-max-size"
+
+
#ifdef MEMPOOL_EXPERIMENTAL
#define CONFIG_MEMPOOL_SWITCH_ATTRIBUTE "nsslapd-mempool"
#define CONFIG_MEMPOOL_MAXFREELIST_ATTRIBUTE "nsslapd-mempool-maxfreelist"
@@ -2272,12 +2277,17 @@ typedef struct _slapdFrontendConfig {
PRInt64 disk_threshold;
int disk_grace_period;
int disk_logging_critical;
+
int ignore_time_skew;
#if defined(LINUX)
int malloc_mxfast; /* mallopt M_MXFAST */
int malloc_trim_threshold; /* mallopt M_TRIM_THRESHOLD */
int malloc_mmap_threshold; /* mallopt M_MMAP_THRESHOLD */
#endif
+
+ /* normalized dn cache */
+ int ndn_cache_enabled;
+ size_t ndn_cache_max_size;
} slapdFrontendConfig_t;
/* possible values for slapdFrontendConfig_t.schemareplace */
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
index 940260f..8507f47 100644
--- a/ldap/servers/slapd/slapi-private.h
+++ b/ldap/servers/slapd/slapi-private.h
@@ -387,12 +387,16 @@ Slapi_DN *slapi_sdn_init_normdn_ndn_passin(Slapi_DN *sdn, const char *dn);
Slapi_DN *slapi_sdn_init_normdn_passin(Slapi_DN *sdn, const char *dn);
char *slapi_dn_normalize_original( char *dn );
char *slapi_dn_normalize_case_original( char *dn );
+void ndn_cache_init();
+void ndn_cache_destroy();
+int ndn_cache_started();
+void ndn_cache_get_stats(PRUint64 *hits, PRUint64 *tries, size_t *size, size_t *max_size, long *count);
+#define NDN_DEFAULT_SIZE 20971520 /* 20mb - size of normalized dn cache */
/* filter.c */
int filter_flag_is_set(const Slapi_Filter *f,unsigned char flag);
char *slapi_filter_to_string(const Slapi_Filter *f, char *buffer, size_t bufsize);
-char *
-slapi_filter_to_string_internal( const struct slapi_filter *f, char *buf, size_t *bufsize );
+char *slapi_filter_to_string_internal( const struct slapi_filter *f, char *buf, size_t *bufsize );
/* operation.c */
8 years, 5 months
Branch '389-ds-base-1.2.11' - ldap/servers
by Mark Reynolds
ldap/servers/slapd/back-ldbm/ldbm_delete.c | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)
New commits:
commit 83ecd4509b28f6d6b0cd210cd767d6b29026c182
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Thu Dec 18 09:08:52 2014 -0500
Ticket 47750 - During delete operation do not refresh cache entry if it is a tombstone
Bug Description: After calling the betxn postop plugins do not attempt to refresh the
entry if it was converted to a tombstone. A tombstone entry does
not have its entry mutex allocated, and it will be dereferenced.
Fix Description: If the entry is converted to a tombstone, there is no need to
refresh it in the first place. Skip the cache refresh if its a
tombstone. If its not a tombstone, we also need to return the
cache entry if it was not changed in betxn postop, because we
incremented its refcnt while checking if it was updated.
https://fedorahosted.org/389/ticket/47750
Reviewed by: nhosoi(Thanks!)
(cherry picked from commit 4ae67943e807b869aeda213dcd39b59feb5f8259)
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
index 87cc57e..171a9a9 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
@@ -1116,12 +1116,15 @@ ldbm_back_delete( Slapi_PBlock *pb )
/* delete from cache and clean up */
if (e) {
- struct backentry *old_e = e;
-
- e = cache_find_id(&inst->inst_cache,e->ep_id);
- if(e != old_e){
- /* return the old entry, and proceed with the new one */
- CACHE_RETURN(&inst->inst_cache, &old_e);
+ if(!create_tombstone_entry){
+ struct backentry *old_e = e;
+ e = cache_find_id(&inst->inst_cache,e->ep_id);
+ if(e != old_e){
+ cache_unlock_entry(&inst->inst_cache, old_e);
+ CACHE_RETURN(&inst->inst_cache, &old_e);
+ } else {
+ CACHE_RETURN(&inst->inst_cache, &e);
+ }
}
if (cache_is_in_cache(&inst->inst_cache, e)) {
ep_id = e->ep_id;
8 years, 5 months
Branch '389-ds-base-1.3.1' - ldap/servers
by Mark Reynolds
ldap/servers/slapd/back-ldbm/ldbm_delete.c | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)
New commits:
commit f5458c4a03cc7a3a369a80999cf6643d34bb620b
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Thu Dec 18 09:08:52 2014 -0500
Ticket 47750 - During delete operation do not refresh cache entry if it is a tombstone
Bug Description: After calling the betxn postop plugins do not attempt to refresh the
entry if it was converted to a tombstone. A tombstone entry does
not have its entry mutex allocated, and it will be dereferenced.
Fix Description: If the entry is converted to a tombstone, there is no need to
refresh it in the first place. Skip the cache refresh if its a
tombstone. If its not a tombstone, we also need to return the
cache entry if it was not changed in betxn postop, because we
incremented its refcnt while checking if it was updated.
https://fedorahosted.org/389/ticket/47750
Reviewed by: nhosoi(Thanks!)
(cherry picked from commit 4ae67943e807b869aeda213dcd39b59feb5f8259)
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
index 52f84de..02f2d24 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
@@ -1209,12 +1209,15 @@ ldbm_back_delete( Slapi_PBlock *pb )
/* delete from cache and clean up */
if (e) {
- struct backentry *old_e = e;
-
- e = cache_find_id(&inst->inst_cache,e->ep_id);
- if(e != old_e){
- /* return the old entry, and proceed with the new one */
- CACHE_RETURN(&inst->inst_cache, &old_e);
+ if(!create_tombstone_entry){
+ struct backentry *old_e = e;
+ e = cache_find_id(&inst->inst_cache,e->ep_id);
+ if(e != old_e){
+ cache_unlock_entry(&inst->inst_cache, old_e);
+ CACHE_RETURN(&inst->inst_cache, &old_e);
+ } else {
+ CACHE_RETURN(&inst->inst_cache, &e);
+ }
}
if (cache_is_in_cache(&inst->inst_cache, e)) {
ep_id = e->ep_id; /* Otherwise, e might have been freed. */
8 years, 5 months
Branch '389-ds-base-1.3.2' - ldap/servers
by Mark Reynolds
ldap/servers/slapd/back-ldbm/ldbm_delete.c | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)
New commits:
commit 6c4ad25107e76dea77785da6128cee91a27e761e
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Thu Dec 18 09:08:52 2014 -0500
Ticket 47750 - During delete operation do not refresh cache entry if it is a tombstone
Bug Description: After calling the betxn postop plugins do not attempt to refresh the
entry if it was converted to a tombstone. A tombstone entry does
not have its entry mutex allocated, and it will be dereferenced.
Fix Description: If the entry is converted to a tombstone, there is no need to
refresh it in the first place. Skip the cache refresh if its a
tombstone. If its not a tombstone, we also need to return the
cache entry if it was not changed in betxn postop, because we
incremented its refcnt while checking if it was updated.
https://fedorahosted.org/389/ticket/47750
Reviewed by: nhosoi(Thanks!)
(cherry picked from commit 4ae67943e807b869aeda213dcd39b59feb5f8259)
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
index 89710d4..928e13e 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
@@ -1199,12 +1199,15 @@ ldbm_back_delete( Slapi_PBlock *pb )
/* delete from cache and clean up */
if (e) {
- struct backentry *old_e = e;
-
- e = cache_find_id(&inst->inst_cache,e->ep_id);
- if(e != old_e){
- /* return the old entry, and proceed with the new one */
- CACHE_RETURN(&inst->inst_cache, &old_e);
+ if(!create_tombstone_entry){
+ struct backentry *old_e = e;
+ e = cache_find_id(&inst->inst_cache,e->ep_id);
+ if(e != old_e){
+ cache_unlock_entry(&inst->inst_cache, old_e);
+ CACHE_RETURN(&inst->inst_cache, &old_e);
+ } else {
+ CACHE_RETURN(&inst->inst_cache, &e);
+ }
}
if (cache_is_in_cache(&inst->inst_cache, e)) {
ep_id = e->ep_id; /* Otherwise, e might have been freed. */
8 years, 5 months
Branch '389-ds-base-1.3.3' - ldap/servers
by Mark Reynolds
ldap/servers/slapd/back-ldbm/ldbm_delete.c | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)
New commits:
commit f63473228d05c56a096580ec5e66566b35ab4535
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Thu Dec 18 09:08:52 2014 -0500
Ticket 47750 - During delete operation do not refresh cache entry if it is a tombstone
Bug Description: After calling the betxn postop plugins do not attempt to refresh the
entry if it was converted to a tombstone. A tombstone entry does
not have its entry mutex allocated, and it will be dereferenced.
Fix Description: If the entry is converted to a tombstone, there is no need to
refresh it in the first place. Skip the cache refresh if its a
tombstone. If its not a tombstone, we also need to return the
cache entry if it was not changed in betxn postop, because we
incremented its refcnt while checking if it was updated.
https://fedorahosted.org/389/ticket/47750
Reviewed by: nhosoi(Thanks!)
(cherry picked from commit 4ae67943e807b869aeda213dcd39b59feb5f8259)
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
index eaac39d..3de8efa 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
@@ -1261,12 +1261,15 @@ ldbm_back_delete( Slapi_PBlock *pb )
/* delete from cache and clean up */
if (e) {
- struct backentry *old_e = e;
-
- e = cache_find_id(&inst->inst_cache,e->ep_id);
- if(e != old_e){
- /* return the old entry, and proceed with the new one */
- CACHE_RETURN(&inst->inst_cache, &old_e);
+ if(!create_tombstone_entry){
+ struct backentry *old_e = e;
+ e = cache_find_id(&inst->inst_cache,e->ep_id);
+ if(e != old_e){
+ cache_unlock_entry(&inst->inst_cache, old_e);
+ CACHE_RETURN(&inst->inst_cache, &old_e);
+ } else {
+ CACHE_RETURN(&inst->inst_cache, &e);
+ }
}
if (cache_is_in_cache(&inst->inst_cache, e)) {
ep_id = e->ep_id; /* Otherwise, e might have been freed. */
8 years, 5 months