dirsrvtests/tickets/ticket47981_test.py | 345 ++++++++++++++++++++++++++++++++
ldap/servers/plugins/cos/cos_cache.c | 34 +--
2 files changed, 359 insertions(+), 20 deletions(-)
New commits:
commit df7bafae3c069ecf90d951d0c0f1ede26ab521fd
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Wed Jan 7 08:59:06 2015 -0500
Ticket 47981 - COS cache doesn't properly mark vattr cache as
invalid when there are multiple suffixes
Bug Description: When rebuilding the COS cache, we check each suffix for COS entries.
If the last suffix checked does not contain any COS entries, then the
virtual attribute cache is incorrectly not invalidated. This allows
for already cached entries to hold onto the old COS attributes/values.
Fix Description: Only set the vattr_cacheable flag if a suffix contains COS entries, not
if it does not - by default the flag is not set.
https://fedorahosted.org/389/ticket/47981
Reviewed by: nhosoi(Thanks!)
(cherry picked from commit 42e2df3858a4e14706d57b5c907d1d3768f4d970)
diff --git a/dirsrvtests/tickets/ticket47981_test.py b/dirsrvtests/tickets/ticket47981_test.py
new file mode 100644
index 0000000..2a16ce6
--- /dev/null
+++ b/dirsrvtests/tickets/ticket47981_test.py
@@ -0,0 +1,345 @@
+import os
+import sys
+import time
+import ldap
+import ldap.sasl
+import logging
+import socket
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from constants import *
+
+log = logging.getLogger(__name__)
+
+installation_prefix = None
+
+BRANCH = 'ou=people,' + DEFAULT_SUFFIX
+USER_DN = 'uid=user1,%s' % (BRANCH)
+BRANCH_CONTAINER = 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com'
+BRANCH_COS_DEF = 'cn=nsPwPolicy_CoS,ou=people,dc=example,dc=com'
+BRANCH_PWP = 'cn=cn\\3DnsPwPolicyEntry\\2Cou\\3DPeople\\2Cdc\\3Dexample\\2Cdc\\3Dcom,' + \
+ 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com'
+BRANCH_COS_TMPL = 'cn=cn\\3DnsPwTemplateEntry\\2Cou\\3DPeople\\2Cdc\\3Dexample\\2Cdc\\3Dcom,' + \
+ 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com'
+SECOND_SUFFIX = 'o=netscaperoot'
+BE_NAME = 'netscaperoot'
+
+
+class TopologyStandalone(object):
+ def __init__(self, standalone):
+ standalone.open()
+ self.standalone = standalone
+
+
+(a)pytest.fixture(scope="module")
+def topology(request):
+ '''
+ This fixture is used to standalone topology for the 'module'.
+ At the beginning, It may exists a standalone instance.
+ It may also exists a backup for the standalone instance.
+
+ Principle:
+ If standalone instance exists:
+ restart it
+ If backup of standalone exists:
+ create/rebind to standalone
+
+ restore standalone instance from backup
+ else:
+ Cleanup everything
+ remove instance
+ remove backup
+ Create instance
+ Create backup
+ '''
+ global installation_prefix
+
+ if installation_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation_prefix
+
+ standalone = DirSrv(verbose=False)
+
+ # Args for the standalone instance
+ args_instance[SER_HOST] = HOST_STANDALONE
+ args_instance[SER_PORT] = PORT_STANDALONE
+ args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+ args_standalone = args_instance.copy()
+ standalone.allocate(args_standalone)
+
+ # Get the status of the backups
+ backup_standalone = standalone.checkBackupFS()
+
+ # Get the status of the instance and restart it if it exists
+ instance_standalone = standalone.exists()
+ if instance_standalone:
+ # assuming the instance is already stopped, just wait 5 sec max
+ standalone.stop(timeout=5)
+ standalone.start(timeout=10)
+
+ if backup_standalone:
+ # The backup exist, assuming it is correct
+ # we just re-init the instance with it
+ if not instance_standalone:
+ standalone.create()
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # restore standalone instance from backup
+ standalone.stop(timeout=10)
+ standalone.restoreFS(backup_standalone)
+ standalone.start(timeout=10)
+
+ else:
+ # We should be here only in two conditions
+ # - This is the first time a test involve standalone instance
+ # - Something weird happened (instance/backup destroyed)
+ # so we discard everything and recreate all
+
+ # Remove the backup. So even if we have a specific backup file
+ # (e.g backup_standalone) we clear backup that an instance may have created
+ if backup_standalone:
+ standalone.clearBackupFS()
+
+ # Remove the instance
+ if instance_standalone:
+ standalone.delete()
+
+ # Create the instance
+ standalone.create()
+
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # Time to create the backups
+ standalone.stop(timeout=10)
+ standalone.backupfile = standalone.backupFS()
+ standalone.start(timeout=10)
+
+ # clear the tmp directory
+ standalone.clearTmpDir(__file__)
+
+ #
+ # Here we have standalone instance up and running
+ # Either coming from a backup recovery
+ # or from a fresh (re)init
+ # Time to return the topology
+ return TopologyStandalone(standalone)
+
+
+def addSubtreePwPolicy(inst):
+ #
+ # Add subtree policy to the people branch
+ #
+ try:
+ inst.add_s(Entry((BRANCH_CONTAINER, {
+ 'objectclass': 'top nsContainer'.split(),
+ 'cn': 'nsPwPolicyContainer'
+ })))
+ except ldap.LDAPError, e:
+ log.error('Failed to add subtree container for ou=people: error ' + e.message['desc'])
+ assert False
+
+ # Add the password policy subentry
+ try:
+ inst.add_s(Entry((BRANCH_PWP, {
+ 'objectclass': 'top ldapsubentry passwordpolicy'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+ 'passwordMustChange': 'off',
+ 'passwordExp': 'off',
+ 'passwordHistory': 'off',
+ 'passwordMinAge': '0',
+ 'passwordChange': 'off',
+ 'passwordStorageScheme': 'ssha'
+ })))
+ except ldap.LDAPError, e:
+ log.error('Failed to add passwordpolicy: error ' + e.message['desc'])
+ assert False
+
+ # Add the COS template
+ try:
+ inst.add_s(Entry((BRANCH_COS_TMPL, {
+ 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+ 'cosPriority': '1',
+ 'cn': 'cn=nsPwTemplateEntry,ou=people,dc=example,dc=com',
+ 'pwdpolicysubentry': BRANCH_PWP
+ })))
+ except ldap.LDAPError, e:
+ log.error('Failed to add COS template: error ' + e.message['desc'])
+ assert False
+
+ # Add the COS definition
+ try:
+ inst.add_s(Entry((BRANCH_COS_DEF, {
+ 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+ 'costemplatedn': BRANCH_COS_TMPL,
+ 'cosAttribute': 'pwdpolicysubentry default operational-default'
+ })))
+ except ldap.LDAPError, e:
+ log.error('Failed to add COS def: error ' + e.message['desc'])
+ assert False
+ time.sleep(0.5)
+
+
+def delSubtreePwPolicy(inst):
+ try:
+ inst.delete_s(BRANCH_COS_DEF)
+ except ldap.LDAPError, e:
+ log.error('Failed to delete COS def: error ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.delete_s(BRANCH_COS_TMPL)
+ except ldap.LDAPError, e:
+ log.error('Failed to delete COS template: error ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.delete_s(BRANCH_PWP)
+ except ldap.LDAPError, e:
+ log.error('Failed to delete COS password policy: error ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.delete_s(BRANCH_CONTAINER)
+ except ldap.LDAPError, e:
+ log.error('Failed to delete COS container: error ' + e.message['desc'])
+ assert False
+ time.sleep(0.5)
+
+
+def test_ticket47981(topology):
+ """
+ If there are multiple suffixes, and the last suffix checked does not contain any COS entries,
+ while other suffixes do, then the vattr cache is not invalidated as it should be. Then any
+ cached entries will still contain the old COS attributes/values.
+ """
+
+ log.info('Testing Ticket 47981 - Test that COS def changes are correctly reflected in affected users')
+
+ #
+ # Create a second backend that does not have any COS entries
+ #
+ log.info('Adding second suffix that will not contain any COS entries...\n')
+
+ topology.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: BE_NAME})
+ topology.standalone.mappingtree.create(SECOND_SUFFIX, bename=BE_NAME)
+ try:
+ topology.standalone.add_s(Entry((SECOND_SUFFIX, {
+ 'objectclass': 'top organization'.split(),
+ 'o': BE_NAME})))
+ except ldap.ALREADY_EXISTS:
+ pass
+ except ldap.LDAPError, e:
+ log.error('Failed to create suffix entry: error ' + e.message['desc'])
+ assert False
+
+ #
+ # Add People branch, it might already exist
+ #
+ log.info('Add our test entries to the default suffix, and proceed with the test...')
+
+ try:
+ topology.standalone.add_s(Entry((BRANCH, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'ou': 'level4'
+ })))
+ except ldap.ALREADY_EXISTS:
+ pass
+ except ldap.LDAPError, e:
+ log.error('Failed to add ou=people: error ' + e.message['desc'])
+ assert False
+
+ #
+ # Add a user to the branch
+ #
+ try:
+ topology.standalone.add_s(Entry((USER_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
+ except ldap.LDAPError, e:
+ log.error('Failed to add user1: error ' + e.message['desc'])
+ assert False
+
+ #
+ # Enable password policy and add the subtree policy
+ #
+ try:
+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])
+ except ldap.LDAPError, e:
+ log.error('Failed to set pwpolicy-local: error ' + e.message['desc'])
+ assert False
+
+ addSubtreePwPolicy(topology.standalone)
+
+ #
+ # Now check the user has its expected passwordPolicy subentry
+ #
+ try:
+ entries = topology.standalone.search_s(USER_DN,
+ ldap.SCOPE_BASE,
+ '(objectclass=top)',
+ ['pwdpolicysubentry', 'dn'])
+ if not entries[0].hasAttr('pwdpolicysubentry'):
+ log.fatal('User does not have expected pwdpolicysubentry!')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc']))
+ assert False
+
+ #
+ # Delete the password policy and make sure it is removed from the same user
+ #
+ delSubtreePwPolicy(topology.standalone)
+ try:
+ entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+ if entries[0].hasAttr('pwdpolicysubentry'):
+ log.fatal('User unexpectedly does have the pwdpolicysubentry!')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc']))
+ assert False
+
+ #
+ # Add the subtree policvy back and see if the user now has it
+ #
+ addSubtreePwPolicy(topology.standalone)
+ try:
+ entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+ if not entries[0].hasAttr('pwdpolicysubentry'):
+ log.fatal('User does not have expected pwdpolicysubentry!')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc']))
+ assert False
+
+ # If we got here the test passed
+ log.info('Test PASSED')
+
+
+def test_ticket47981_final(topology):
+ topology.standalone.stop(timeout=10)
+
+
+def run_isolated():
+ '''
+ run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
+ To run isolated without py.test, you need to
+ - edit this file and comment '@pytest.fixture' line before 'topology' function.
+ - set the installation prefix
+ - run this program
+ '''
+ global installation_prefix
+ installation_prefix = None
+
+ topo = topology(True)
+ test_ticket47981(topo)
+
+if __name__ == '__main__':
+ run_isolated()
\ No newline at end of file
diff --git a/ldap/servers/plugins/cos/cos_cache.c b/ldap/servers/plugins/cos/cos_cache.c
index 3214f82..d3d97f4 100644
--- a/ldap/servers/plugins/cos/cos_cache.c
+++ b/ldap/servers/plugins/cos/cos_cache.c
@@ -260,7 +260,7 @@ static int cos_cache_add_tmpl(cosTemplates **pTemplates, cosAttrValue *dn, cosAt
/* cosDefinitions manipulation */
static int cos_cache_build_definition_list(cosDefinitions **pDefs, int *vattr_cacheable);
-static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs, int *vattr_cacheable);
+static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs);
static int cos_cache_add_defn(cosDefinitions **pDefs, cosAttrValue **dn, int cosType, cosAttrValue **tree, cosAttrValue **tmpDn, cosAttrValue **spec, cosAttrValue **pAttrs, cosAttrValue **pOverrides, cosAttrValue **pOperational, cosAttrValue **pCosMerge, cosAttrValue **pCosOpDefault);
static int cos_cache_entry_is_cos_related( Slapi_Entry *e);
@@ -619,9 +619,9 @@ static int cos_cache_build_definition_list(cosDefinitions **pDefs, int *vattr_ca
LDAPDebug( LDAP_DEBUG_TRACE, "--> cos_cache_build_definition_list\n",0,0,0);
/*
- the class of service definitions may be anywhere in the DIT,
- so our first task is to find them.
- */
+ * The class of service definitions may be anywhere in the DIT,
+ * so our first task is to find them.
+ */
attrs[0] = "namingcontexts";
attrs[1] = 0;
@@ -629,9 +629,9 @@ static int cos_cache_build_definition_list(cosDefinitions **pDefs, int *vattr_ca
LDAPDebug( LDAP_DEBUG_PLUGIN, "cos: Building class of service cache after status change.\n",0,0,0);
/*
- * XXXrbyrne: this looks really ineficient--should be using
+ * XXXrbyrne: this looks really inefficient--should be using
* slapi_get_next_suffix(), rather than searching for namingcontexts.
- */
+ */
pSuffixSearch = slapi_search_internal("",LDAP_SCOPE_BASE,"(objectclass=*)",NULL,attrs,0);
if(pSuffixSearch)
@@ -671,19 +671,21 @@ static int cos_cache_build_definition_list(cosDefinitions **pDefs, int *vattr_ca
{
/* here's a suffix, lets search it... */
if(suffixVals[valIndex]->bv_val)
- if(!cos_cache_add_dn_defs(suffixVals[valIndex]->bv_val ,pDefs, vattr_cacheable))
+ {
+ if(!cos_cache_add_dn_defs(suffixVals[valIndex]->bv_val ,pDefs))
+ {
+ *vattr_cacheable = -1;
cos_def_available = 1;
-
+ break;
+ }
+ }
valIndex++;
}
-
-
ber_bvecfree( suffixVals );
suffixVals = NULL;
}
}
}
-
} while(!slapi_entry_next_attr(pSuffixList[suffixIndex], suffixAttr, &suffixAttr));
}
suffixIndex++;
@@ -709,7 +711,6 @@ next:
slapi_pblock_destroy(pSuffixSearch);
}
-
LDAPDebug( LDAP_DEBUG_TRACE, "<-- cos_cache_build_definition_list\n",0,0,0);
return ret;
}
@@ -750,10 +751,6 @@ cos_dn_defs_cb (Slapi_Entry* e, void *callback_data)
char *norm_dn = NULL;
info=(struct dn_defs_info *)callback_data;
-
- /* assume cacheable */
- info->vattr_cacheable = -1;
-
cos_cache_add_attrval(&pDn, slapi_entry_get_dn(e));
if(slapi_entry_first_attr(e, &dnAttr)) {
goto bail;
@@ -1076,7 +1073,7 @@ bail:
#define DN_DEF_FILTER "(&(|(objectclass=cosSuperDefinition)(objectclass=cosDefinition))(objectclass=ldapsubentry))"
-static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs, int *vattr_cacheable)
+static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs)
{
Slapi_PBlock *pDnSearch = 0;
struct dn_defs_info info = {NULL, 0, 0};
@@ -1084,7 +1081,6 @@ static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs, int *vattr_ca
if (pDnSearch) {
info.ret=-1; /* assume no good defs */
info.pDefs=pDefs;
- info.vattr_cacheable = 0; /* assume not cacheable */
slapi_search_internal_set_pb(pDnSearch, dn, LDAP_SCOPE_SUBTREE,
DN_DEF_FILTER,NULL,0,
NULL,NULL,cos_get_plugin_identity(),0);
@@ -1096,8 +1092,6 @@ static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs, int *vattr_ca
slapi_pblock_destroy (pDnSearch);
}
- *vattr_cacheable = info.vattr_cacheable;
-
return info.ret;
}
dirsrvtests/tickets/ticket47981_test.py | 345 ++++++++++++++++++++++++++++++++
ldap/servers/plugins/cos/cos_cache.c | 34 +--
2 files changed, 359 insertions(+), 20 deletions(-)
New commits:
commit 6229ad9a3d78cf6d83f09f2893c39dce5bbc71ca
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Wed Jan 7 08:59:06 2015 -0500
Ticket 47981 - COS cache doesn't properly mark vattr cache as
invalid when there are multiple suffixes
Bug Description: When rebuilding the COS cache, we check each suffix for COS entries.
If the last suffix checked does not contain any COS entries, then the
virtual attribute cache is incorrectly not invalidated. This allows
for already cached entries to hold onto the old COS attributes/values.
Fix Description: Only set the vattr_cacheable flag if a suffix contains COS entries, not
if it does not - by default the flag is not set.
https://fedorahosted.org/389/ticket/47981
Reviewed by: nhosoi(Thanks!)
(cherry picked from commit 42e2df3858a4e14706d57b5c907d1d3768f4d970)
diff --git a/dirsrvtests/tickets/ticket47981_test.py b/dirsrvtests/tickets/ticket47981_test.py
new file mode 100644
index 0000000..2a16ce6
--- /dev/null
+++ b/dirsrvtests/tickets/ticket47981_test.py
@@ -0,0 +1,345 @@
+import os
+import sys
+import time
+import ldap
+import ldap.sasl
+import logging
+import socket
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from constants import *
+
+log = logging.getLogger(__name__)
+
+installation_prefix = None
+
+BRANCH = 'ou=people,' + DEFAULT_SUFFIX
+USER_DN = 'uid=user1,%s' % (BRANCH)
+BRANCH_CONTAINER = 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com'
+BRANCH_COS_DEF = 'cn=nsPwPolicy_CoS,ou=people,dc=example,dc=com'
+BRANCH_PWP = 'cn=cn\\3DnsPwPolicyEntry\\2Cou\\3DPeople\\2Cdc\\3Dexample\\2Cdc\\3Dcom,' + \
+ 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com'
+BRANCH_COS_TMPL = 'cn=cn\\3DnsPwTemplateEntry\\2Cou\\3DPeople\\2Cdc\\3Dexample\\2Cdc\\3Dcom,' + \
+ 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com'
+SECOND_SUFFIX = 'o=netscaperoot'
+BE_NAME = 'netscaperoot'
+
+
+class TopologyStandalone(object):
+ def __init__(self, standalone):
+ standalone.open()
+ self.standalone = standalone
+
+
+(a)pytest.fixture(scope="module")
+def topology(request):
+ '''
+ This fixture is used to standalone topology for the 'module'.
+ At the beginning, It may exists a standalone instance.
+ It may also exists a backup for the standalone instance.
+
+ Principle:
+ If standalone instance exists:
+ restart it
+ If backup of standalone exists:
+ create/rebind to standalone
+
+ restore standalone instance from backup
+ else:
+ Cleanup everything
+ remove instance
+ remove backup
+ Create instance
+ Create backup
+ '''
+ global installation_prefix
+
+ if installation_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation_prefix
+
+ standalone = DirSrv(verbose=False)
+
+ # Args for the standalone instance
+ args_instance[SER_HOST] = HOST_STANDALONE
+ args_instance[SER_PORT] = PORT_STANDALONE
+ args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+ args_standalone = args_instance.copy()
+ standalone.allocate(args_standalone)
+
+ # Get the status of the backups
+ backup_standalone = standalone.checkBackupFS()
+
+ # Get the status of the instance and restart it if it exists
+ instance_standalone = standalone.exists()
+ if instance_standalone:
+ # assuming the instance is already stopped, just wait 5 sec max
+ standalone.stop(timeout=5)
+ standalone.start(timeout=10)
+
+ if backup_standalone:
+ # The backup exist, assuming it is correct
+ # we just re-init the instance with it
+ if not instance_standalone:
+ standalone.create()
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # restore standalone instance from backup
+ standalone.stop(timeout=10)
+ standalone.restoreFS(backup_standalone)
+ standalone.start(timeout=10)
+
+ else:
+ # We should be here only in two conditions
+ # - This is the first time a test involve standalone instance
+ # - Something weird happened (instance/backup destroyed)
+ # so we discard everything and recreate all
+
+ # Remove the backup. So even if we have a specific backup file
+ # (e.g backup_standalone) we clear backup that an instance may have created
+ if backup_standalone:
+ standalone.clearBackupFS()
+
+ # Remove the instance
+ if instance_standalone:
+ standalone.delete()
+
+ # Create the instance
+ standalone.create()
+
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # Time to create the backups
+ standalone.stop(timeout=10)
+ standalone.backupfile = standalone.backupFS()
+ standalone.start(timeout=10)
+
+ # clear the tmp directory
+ standalone.clearTmpDir(__file__)
+
+ #
+ # Here we have standalone instance up and running
+ # Either coming from a backup recovery
+ # or from a fresh (re)init
+ # Time to return the topology
+ return TopologyStandalone(standalone)
+
+
+def addSubtreePwPolicy(inst):
+ #
+ # Add subtree policy to the people branch
+ #
+ try:
+ inst.add_s(Entry((BRANCH_CONTAINER, {
+ 'objectclass': 'top nsContainer'.split(),
+ 'cn': 'nsPwPolicyContainer'
+ })))
+ except ldap.LDAPError, e:
+ log.error('Failed to add subtree container for ou=people: error ' + e.message['desc'])
+ assert False
+
+ # Add the password policy subentry
+ try:
+ inst.add_s(Entry((BRANCH_PWP, {
+ 'objectclass': 'top ldapsubentry passwordpolicy'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+ 'passwordMustChange': 'off',
+ 'passwordExp': 'off',
+ 'passwordHistory': 'off',
+ 'passwordMinAge': '0',
+ 'passwordChange': 'off',
+ 'passwordStorageScheme': 'ssha'
+ })))
+ except ldap.LDAPError, e:
+ log.error('Failed to add passwordpolicy: error ' + e.message['desc'])
+ assert False
+
+ # Add the COS template
+ try:
+ inst.add_s(Entry((BRANCH_COS_TMPL, {
+ 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+ 'cosPriority': '1',
+ 'cn': 'cn=nsPwTemplateEntry,ou=people,dc=example,dc=com',
+ 'pwdpolicysubentry': BRANCH_PWP
+ })))
+ except ldap.LDAPError, e:
+ log.error('Failed to add COS template: error ' + e.message['desc'])
+ assert False
+
+ # Add the COS definition
+ try:
+ inst.add_s(Entry((BRANCH_COS_DEF, {
+ 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+ 'costemplatedn': BRANCH_COS_TMPL,
+ 'cosAttribute': 'pwdpolicysubentry default operational-default'
+ })))
+ except ldap.LDAPError, e:
+ log.error('Failed to add COS def: error ' + e.message['desc'])
+ assert False
+ time.sleep(0.5)
+
+
+def delSubtreePwPolicy(inst):
+ try:
+ inst.delete_s(BRANCH_COS_DEF)
+ except ldap.LDAPError, e:
+ log.error('Failed to delete COS def: error ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.delete_s(BRANCH_COS_TMPL)
+ except ldap.LDAPError, e:
+ log.error('Failed to delete COS template: error ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.delete_s(BRANCH_PWP)
+ except ldap.LDAPError, e:
+ log.error('Failed to delete COS password policy: error ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.delete_s(BRANCH_CONTAINER)
+ except ldap.LDAPError, e:
+ log.error('Failed to delete COS container: error ' + e.message['desc'])
+ assert False
+ time.sleep(0.5)
+
+
+def test_ticket47981(topology):
+ """
+ If there are multiple suffixes, and the last suffix checked does not contain any COS entries,
+ while other suffixes do, then the vattr cache is not invalidated as it should be. Then any
+ cached entries will still contain the old COS attributes/values.
+ """
+
+ log.info('Testing Ticket 47981 - Test that COS def changes are correctly reflected in affected users')
+
+ #
+ # Create a second backend that does not have any COS entries
+ #
+ log.info('Adding second suffix that will not contain any COS entries...\n')
+
+ topology.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: BE_NAME})
+ topology.standalone.mappingtree.create(SECOND_SUFFIX, bename=BE_NAME)
+ try:
+ topology.standalone.add_s(Entry((SECOND_SUFFIX, {
+ 'objectclass': 'top organization'.split(),
+ 'o': BE_NAME})))
+ except ldap.ALREADY_EXISTS:
+ pass
+ except ldap.LDAPError, e:
+ log.error('Failed to create suffix entry: error ' + e.message['desc'])
+ assert False
+
+ #
+ # Add People branch, it might already exist
+ #
+ log.info('Add our test entries to the default suffix, and proceed with the test...')
+
+ try:
+ topology.standalone.add_s(Entry((BRANCH, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'ou': 'level4'
+ })))
+ except ldap.ALREADY_EXISTS:
+ pass
+ except ldap.LDAPError, e:
+ log.error('Failed to add ou=people: error ' + e.message['desc'])
+ assert False
+
+ #
+ # Add a user to the branch
+ #
+ try:
+ topology.standalone.add_s(Entry((USER_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
+ except ldap.LDAPError, e:
+ log.error('Failed to add user1: error ' + e.message['desc'])
+ assert False
+
+ #
+ # Enable password policy and add the subtree policy
+ #
+ try:
+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])
+ except ldap.LDAPError, e:
+ log.error('Failed to set pwpolicy-local: error ' + e.message['desc'])
+ assert False
+
+ addSubtreePwPolicy(topology.standalone)
+
+ #
+ # Now check the user has its expected passwordPolicy subentry
+ #
+ try:
+ entries = topology.standalone.search_s(USER_DN,
+ ldap.SCOPE_BASE,
+ '(objectclass=top)',
+ ['pwdpolicysubentry', 'dn'])
+ if not entries[0].hasAttr('pwdpolicysubentry'):
+ log.fatal('User does not have expected pwdpolicysubentry!')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc']))
+ assert False
+
+ #
+ # Delete the password policy and make sure it is removed from the same user
+ #
+ delSubtreePwPolicy(topology.standalone)
+ try:
+ entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+ if entries[0].hasAttr('pwdpolicysubentry'):
+ log.fatal('User unexpectedly does have the pwdpolicysubentry!')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc']))
+ assert False
+
+ #
+ # Add the subtree policvy back and see if the user now has it
+ #
+ addSubtreePwPolicy(topology.standalone)
+ try:
+ entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+ if not entries[0].hasAttr('pwdpolicysubentry'):
+ log.fatal('User does not have expected pwdpolicysubentry!')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc']))
+ assert False
+
+ # If we got here the test passed
+ log.info('Test PASSED')
+
+
+def test_ticket47981_final(topology):
+ topology.standalone.stop(timeout=10)
+
+
+def run_isolated():
+ '''
+ run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
+ To run isolated without py.test, you need to
+ - edit this file and comment '@pytest.fixture' line before 'topology' function.
+ - set the installation prefix
+ - run this program
+ '''
+ global installation_prefix
+ installation_prefix = None
+
+ topo = topology(True)
+ test_ticket47981(topo)
+
+if __name__ == '__main__':
+ run_isolated()
\ No newline at end of file
diff --git a/ldap/servers/plugins/cos/cos_cache.c b/ldap/servers/plugins/cos/cos_cache.c
index 0d9a61e..7d8e877 100644
--- a/ldap/servers/plugins/cos/cos_cache.c
+++ b/ldap/servers/plugins/cos/cos_cache.c
@@ -260,7 +260,7 @@ static int cos_cache_add_tmpl(cosTemplates **pTemplates, cosAttrValue *dn, cosAt
/* cosDefinitions manipulation */
static int cos_cache_build_definition_list(cosDefinitions **pDefs, int *vattr_cacheable);
-static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs, int *vattr_cacheable);
+static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs);
static int cos_cache_add_defn(cosDefinitions **pDefs, cosAttrValue **dn, int cosType, cosAttrValue **tree, cosAttrValue **tmpDn, cosAttrValue **spec, cosAttrValue **pAttrs, cosAttrValue **pOverrides, cosAttrValue **pOperational, cosAttrValue **pCosMerge, cosAttrValue **pCosOpDefault);
static int cos_cache_entry_is_cos_related( Slapi_Entry *e);
@@ -619,9 +619,9 @@ static int cos_cache_build_definition_list(cosDefinitions **pDefs, int *vattr_ca
LDAPDebug( LDAP_DEBUG_TRACE, "--> cos_cache_build_definition_list\n",0,0,0);
/*
- the class of service definitions may be anywhere in the DIT,
- so our first task is to find them.
- */
+ * The class of service definitions may be anywhere in the DIT,
+ * so our first task is to find them.
+ */
attrs[0] = "namingcontexts";
attrs[1] = 0;
@@ -629,9 +629,9 @@ static int cos_cache_build_definition_list(cosDefinitions **pDefs, int *vattr_ca
LDAPDebug( LDAP_DEBUG_PLUGIN, "cos: Building class of service cache after status change.\n",0,0,0);
/*
- * XXXrbyrne: this looks really ineficient--should be using
+ * XXXrbyrne: this looks really inefficient--should be using
* slapi_get_next_suffix(), rather than searching for namingcontexts.
- */
+ */
pSuffixSearch = slapi_search_internal("",LDAP_SCOPE_BASE,"(objectclass=*)",NULL,attrs,0);
if(pSuffixSearch)
@@ -671,19 +671,21 @@ static int cos_cache_build_definition_list(cosDefinitions **pDefs, int *vattr_ca
{
/* here's a suffix, lets search it... */
if(suffixVals[valIndex]->bv_val)
- if(!cos_cache_add_dn_defs(suffixVals[valIndex]->bv_val ,pDefs, vattr_cacheable))
+ {
+ if(!cos_cache_add_dn_defs(suffixVals[valIndex]->bv_val ,pDefs))
+ {
+ *vattr_cacheable = -1;
cos_def_available = 1;
-
+ break;
+ }
+ }
valIndex++;
}
-
-
ber_bvecfree( suffixVals );
suffixVals = NULL;
}
}
}
-
} while(!slapi_entry_next_attr(pSuffixList[suffixIndex], suffixAttr, &suffixAttr));
}
suffixIndex++;
@@ -709,7 +711,6 @@ next:
slapi_pblock_destroy(pSuffixSearch);
}
-
LDAPDebug( LDAP_DEBUG_TRACE, "<-- cos_cache_build_definition_list\n",0,0,0);
return ret;
}
@@ -750,10 +751,6 @@ cos_dn_defs_cb (Slapi_Entry* e, void *callback_data)
char *norm_dn = NULL;
info=(struct dn_defs_info *)callback_data;
-
- /* assume cacheable */
- info->vattr_cacheable = -1;
-
cos_cache_add_attrval(&pDn, slapi_entry_get_dn(e));
if(slapi_entry_first_attr(e, &dnAttr)) {
goto bail;
@@ -1076,7 +1073,7 @@ bail:
#define DN_DEF_FILTER "(&(|(objectclass=cosSuperDefinition)(objectclass=cosDefinition))(objectclass=ldapsubentry))"
-static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs, int *vattr_cacheable)
+static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs)
{
Slapi_PBlock *pDnSearch = 0;
struct dn_defs_info info = {NULL, 0, 0};
@@ -1084,7 +1081,6 @@ static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs, int *vattr_ca
if (pDnSearch) {
info.ret=-1; /* assume no good defs */
info.pDefs=pDefs;
- info.vattr_cacheable = 0; /* assume not cacheable */
slapi_search_internal_set_pb(pDnSearch, dn, LDAP_SCOPE_SUBTREE,
DN_DEF_FILTER,NULL,0,
NULL,NULL,cos_get_plugin_identity(),0);
@@ -1096,8 +1092,6 @@ static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs, int *vattr_ca
slapi_pblock_destroy (pDnSearch);
}
- *vattr_cacheable = info.vattr_cacheable;
-
return info.ret;
}
dirsrvtests/tickets/ticket47981_test.py | 345 ++++++++++++++++++++++++++++++++
ldap/servers/plugins/cos/cos_cache.c | 34 +--
2 files changed, 359 insertions(+), 20 deletions(-)
New commits:
commit 42e2df3858a4e14706d57b5c907d1d3768f4d970
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Wed Jan 7 08:59:06 2015 -0500
Ticket 47981 - COS cache doesn't properly mark vattr cache as
invalid when there are multiple suffixes
Bug Description: When rebuilding the COS cache, we check each suffix for COS entries.
If the last suffix checked does not contain any COS entries, then the
virtual attribute cache is incorrectly not invalidated. This allows
for already cached entries to hold onto the old COS attributes/values.
Fix Description: Only set the vattr_cacheable flag if a suffix contains COS entries, not
if it does not - by default the flag is not set.
https://fedorahosted.org/389/ticket/47981
Reviewed by: nhosoi(Thanks!)
diff --git a/dirsrvtests/tickets/ticket47981_test.py b/dirsrvtests/tickets/ticket47981_test.py
new file mode 100644
index 0000000..2a16ce6
--- /dev/null
+++ b/dirsrvtests/tickets/ticket47981_test.py
@@ -0,0 +1,345 @@
+import os
+import sys
+import time
+import ldap
+import ldap.sasl
+import logging
+import socket
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from constants import *
+
+log = logging.getLogger(__name__)
+
+installation_prefix = None
+
+BRANCH = 'ou=people,' + DEFAULT_SUFFIX
+USER_DN = 'uid=user1,%s' % (BRANCH)
+BRANCH_CONTAINER = 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com'
+BRANCH_COS_DEF = 'cn=nsPwPolicy_CoS,ou=people,dc=example,dc=com'
+BRANCH_PWP = 'cn=cn\\3DnsPwPolicyEntry\\2Cou\\3DPeople\\2Cdc\\3Dexample\\2Cdc\\3Dcom,' + \
+ 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com'
+BRANCH_COS_TMPL = 'cn=cn\\3DnsPwTemplateEntry\\2Cou\\3DPeople\\2Cdc\\3Dexample\\2Cdc\\3Dcom,' + \
+ 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com'
+SECOND_SUFFIX = 'o=netscaperoot'
+BE_NAME = 'netscaperoot'
+
+
+class TopologyStandalone(object):
+ def __init__(self, standalone):
+ standalone.open()
+ self.standalone = standalone
+
+
+(a)pytest.fixture(scope="module")
+def topology(request):
+ '''
+ This fixture is used to standalone topology for the 'module'.
+ At the beginning, It may exists a standalone instance.
+ It may also exists a backup for the standalone instance.
+
+ Principle:
+ If standalone instance exists:
+ restart it
+ If backup of standalone exists:
+ create/rebind to standalone
+
+ restore standalone instance from backup
+ else:
+ Cleanup everything
+ remove instance
+ remove backup
+ Create instance
+ Create backup
+ '''
+ global installation_prefix
+
+ if installation_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation_prefix
+
+ standalone = DirSrv(verbose=False)
+
+ # Args for the standalone instance
+ args_instance[SER_HOST] = HOST_STANDALONE
+ args_instance[SER_PORT] = PORT_STANDALONE
+ args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+ args_standalone = args_instance.copy()
+ standalone.allocate(args_standalone)
+
+ # Get the status of the backups
+ backup_standalone = standalone.checkBackupFS()
+
+ # Get the status of the instance and restart it if it exists
+ instance_standalone = standalone.exists()
+ if instance_standalone:
+ # assuming the instance is already stopped, just wait 5 sec max
+ standalone.stop(timeout=5)
+ standalone.start(timeout=10)
+
+ if backup_standalone:
+ # The backup exist, assuming it is correct
+ # we just re-init the instance with it
+ if not instance_standalone:
+ standalone.create()
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # restore standalone instance from backup
+ standalone.stop(timeout=10)
+ standalone.restoreFS(backup_standalone)
+ standalone.start(timeout=10)
+
+ else:
+ # We should be here only in two conditions
+ # - This is the first time a test involve standalone instance
+ # - Something weird happened (instance/backup destroyed)
+ # so we discard everything and recreate all
+
+ # Remove the backup. So even if we have a specific backup file
+ # (e.g backup_standalone) we clear backup that an instance may have created
+ if backup_standalone:
+ standalone.clearBackupFS()
+
+ # Remove the instance
+ if instance_standalone:
+ standalone.delete()
+
+ # Create the instance
+ standalone.create()
+
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # Time to create the backups
+ standalone.stop(timeout=10)
+ standalone.backupfile = standalone.backupFS()
+ standalone.start(timeout=10)
+
+ # clear the tmp directory
+ standalone.clearTmpDir(__file__)
+
+ #
+ # Here we have standalone instance up and running
+ # Either coming from a backup recovery
+ # or from a fresh (re)init
+ # Time to return the topology
+ return TopologyStandalone(standalone)
+
+
+def addSubtreePwPolicy(inst):
+ #
+ # Add subtree policy to the people branch
+ #
+ try:
+ inst.add_s(Entry((BRANCH_CONTAINER, {
+ 'objectclass': 'top nsContainer'.split(),
+ 'cn': 'nsPwPolicyContainer'
+ })))
+ except ldap.LDAPError, e:
+ log.error('Failed to add subtree container for ou=people: error ' + e.message['desc'])
+ assert False
+
+ # Add the password policy subentry
+ try:
+ inst.add_s(Entry((BRANCH_PWP, {
+ 'objectclass': 'top ldapsubentry passwordpolicy'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+ 'passwordMustChange': 'off',
+ 'passwordExp': 'off',
+ 'passwordHistory': 'off',
+ 'passwordMinAge': '0',
+ 'passwordChange': 'off',
+ 'passwordStorageScheme': 'ssha'
+ })))
+ except ldap.LDAPError, e:
+ log.error('Failed to add passwordpolicy: error ' + e.message['desc'])
+ assert False
+
+ # Add the COS template
+ try:
+ inst.add_s(Entry((BRANCH_COS_TMPL, {
+ 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+ 'cosPriority': '1',
+ 'cn': 'cn=nsPwTemplateEntry,ou=people,dc=example,dc=com',
+ 'pwdpolicysubentry': BRANCH_PWP
+ })))
+ except ldap.LDAPError, e:
+ log.error('Failed to add COS template: error ' + e.message['desc'])
+ assert False
+
+ # Add the COS definition
+ try:
+ inst.add_s(Entry((BRANCH_COS_DEF, {
+ 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+ 'costemplatedn': BRANCH_COS_TMPL,
+ 'cosAttribute': 'pwdpolicysubentry default operational-default'
+ })))
+ except ldap.LDAPError, e:
+ log.error('Failed to add COS def: error ' + e.message['desc'])
+ assert False
+ time.sleep(0.5)
+
+
+def delSubtreePwPolicy(inst):
+ try:
+ inst.delete_s(BRANCH_COS_DEF)
+ except ldap.LDAPError, e:
+ log.error('Failed to delete COS def: error ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.delete_s(BRANCH_COS_TMPL)
+ except ldap.LDAPError, e:
+ log.error('Failed to delete COS template: error ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.delete_s(BRANCH_PWP)
+ except ldap.LDAPError, e:
+ log.error('Failed to delete COS password policy: error ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.delete_s(BRANCH_CONTAINER)
+ except ldap.LDAPError, e:
+ log.error('Failed to delete COS container: error ' + e.message['desc'])
+ assert False
+ time.sleep(0.5)
+
+
+def test_ticket47981(topology):
+ """
+ If there are multiple suffixes, and the last suffix checked does not contain any COS entries,
+ while other suffixes do, then the vattr cache is not invalidated as it should be. Then any
+ cached entries will still contain the old COS attributes/values.
+ """
+
+ log.info('Testing Ticket 47981 - Test that COS def changes are correctly reflected in affected users')
+
+ #
+ # Create a second backend that does not have any COS entries
+ #
+ log.info('Adding second suffix that will not contain any COS entries...\n')
+
+ topology.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: BE_NAME})
+ topology.standalone.mappingtree.create(SECOND_SUFFIX, bename=BE_NAME)
+ try:
+ topology.standalone.add_s(Entry((SECOND_SUFFIX, {
+ 'objectclass': 'top organization'.split(),
+ 'o': BE_NAME})))
+ except ldap.ALREADY_EXISTS:
+ pass
+ except ldap.LDAPError, e:
+ log.error('Failed to create suffix entry: error ' + e.message['desc'])
+ assert False
+
+ #
+ # Add People branch, it might already exist
+ #
+ log.info('Add our test entries to the default suffix, and proceed with the test...')
+
+ try:
+ topology.standalone.add_s(Entry((BRANCH, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'ou': 'level4'
+ })))
+ except ldap.ALREADY_EXISTS:
+ pass
+ except ldap.LDAPError, e:
+ log.error('Failed to add ou=people: error ' + e.message['desc'])
+ assert False
+
+ #
+ # Add a user to the branch
+ #
+ try:
+ topology.standalone.add_s(Entry((USER_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
+ except ldap.LDAPError, e:
+ log.error('Failed to add user1: error ' + e.message['desc'])
+ assert False
+
+ #
+ # Enable password policy and add the subtree policy
+ #
+ try:
+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])
+ except ldap.LDAPError, e:
+ log.error('Failed to set pwpolicy-local: error ' + e.message['desc'])
+ assert False
+
+ addSubtreePwPolicy(topology.standalone)
+
+ #
+ # Now check the user has its expected passwordPolicy subentry
+ #
+ try:
+ entries = topology.standalone.search_s(USER_DN,
+ ldap.SCOPE_BASE,
+ '(objectclass=top)',
+ ['pwdpolicysubentry', 'dn'])
+ if not entries[0].hasAttr('pwdpolicysubentry'):
+ log.fatal('User does not have expected pwdpolicysubentry!')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc']))
+ assert False
+
+ #
+ # Delete the password policy and make sure it is removed from the same user
+ #
+ delSubtreePwPolicy(topology.standalone)
+ try:
+ entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+ if entries[0].hasAttr('pwdpolicysubentry'):
+ log.fatal('User unexpectedly does have the pwdpolicysubentry!')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc']))
+ assert False
+
+ #
+ # Add the subtree policvy back and see if the user now has it
+ #
+ addSubtreePwPolicy(topology.standalone)
+ try:
+ entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+ if not entries[0].hasAttr('pwdpolicysubentry'):
+ log.fatal('User does not have expected pwdpolicysubentry!')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc']))
+ assert False
+
+ # If we got here the test passed
+ log.info('Test PASSED')
+
+
+def test_ticket47981_final(topology):
+ topology.standalone.stop(timeout=10)
+
+
+def run_isolated():
+ '''
+ run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
+ To run isolated without py.test, you need to
+ - edit this file and comment '@pytest.fixture' line before 'topology' function.
+ - set the installation prefix
+ - run this program
+ '''
+ global installation_prefix
+ installation_prefix = None
+
+ topo = topology(True)
+ test_ticket47981(topo)
+
+if __name__ == '__main__':
+ run_isolated()
\ No newline at end of file
diff --git a/ldap/servers/plugins/cos/cos_cache.c b/ldap/servers/plugins/cos/cos_cache.c
index 0d9a61e..7d8e877 100644
--- a/ldap/servers/plugins/cos/cos_cache.c
+++ b/ldap/servers/plugins/cos/cos_cache.c
@@ -260,7 +260,7 @@ static int cos_cache_add_tmpl(cosTemplates **pTemplates, cosAttrValue *dn, cosAt
/* cosDefinitions manipulation */
static int cos_cache_build_definition_list(cosDefinitions **pDefs, int *vattr_cacheable);
-static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs, int *vattr_cacheable);
+static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs);
static int cos_cache_add_defn(cosDefinitions **pDefs, cosAttrValue **dn, int cosType, cosAttrValue **tree, cosAttrValue **tmpDn, cosAttrValue **spec, cosAttrValue **pAttrs, cosAttrValue **pOverrides, cosAttrValue **pOperational, cosAttrValue **pCosMerge, cosAttrValue **pCosOpDefault);
static int cos_cache_entry_is_cos_related( Slapi_Entry *e);
@@ -619,9 +619,9 @@ static int cos_cache_build_definition_list(cosDefinitions **pDefs, int *vattr_ca
LDAPDebug( LDAP_DEBUG_TRACE, "--> cos_cache_build_definition_list\n",0,0,0);
/*
- the class of service definitions may be anywhere in the DIT,
- so our first task is to find them.
- */
+ * The class of service definitions may be anywhere in the DIT,
+ * so our first task is to find them.
+ */
attrs[0] = "namingcontexts";
attrs[1] = 0;
@@ -629,9 +629,9 @@ static int cos_cache_build_definition_list(cosDefinitions **pDefs, int *vattr_ca
LDAPDebug( LDAP_DEBUG_PLUGIN, "cos: Building class of service cache after status change.\n",0,0,0);
/*
- * XXXrbyrne: this looks really ineficient--should be using
+ * XXXrbyrne: this looks really inefficient--should be using
* slapi_get_next_suffix(), rather than searching for namingcontexts.
- */
+ */
pSuffixSearch = slapi_search_internal("",LDAP_SCOPE_BASE,"(objectclass=*)",NULL,attrs,0);
if(pSuffixSearch)
@@ -671,19 +671,21 @@ static int cos_cache_build_definition_list(cosDefinitions **pDefs, int *vattr_ca
{
/* here's a suffix, lets search it... */
if(suffixVals[valIndex]->bv_val)
- if(!cos_cache_add_dn_defs(suffixVals[valIndex]->bv_val ,pDefs, vattr_cacheable))
+ {
+ if(!cos_cache_add_dn_defs(suffixVals[valIndex]->bv_val ,pDefs))
+ {
+ *vattr_cacheable = -1;
cos_def_available = 1;
-
+ break;
+ }
+ }
valIndex++;
}
-
-
ber_bvecfree( suffixVals );
suffixVals = NULL;
}
}
}
-
} while(!slapi_entry_next_attr(pSuffixList[suffixIndex], suffixAttr, &suffixAttr));
}
suffixIndex++;
@@ -709,7 +711,6 @@ next:
slapi_pblock_destroy(pSuffixSearch);
}
-
LDAPDebug( LDAP_DEBUG_TRACE, "<-- cos_cache_build_definition_list\n",0,0,0);
return ret;
}
@@ -750,10 +751,6 @@ cos_dn_defs_cb (Slapi_Entry* e, void *callback_data)
char *norm_dn = NULL;
info=(struct dn_defs_info *)callback_data;
-
- /* assume cacheable */
- info->vattr_cacheable = -1;
-
cos_cache_add_attrval(&pDn, slapi_entry_get_dn(e));
if(slapi_entry_first_attr(e, &dnAttr)) {
goto bail;
@@ -1076,7 +1073,7 @@ bail:
#define DN_DEF_FILTER "(&(|(objectclass=cosSuperDefinition)(objectclass=cosDefinition))(objectclass=ldapsubentry))"
-static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs, int *vattr_cacheable)
+static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs)
{
Slapi_PBlock *pDnSearch = 0;
struct dn_defs_info info = {NULL, 0, 0};
@@ -1084,7 +1081,6 @@ static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs, int *vattr_ca
if (pDnSearch) {
info.ret=-1; /* assume no good defs */
info.pDefs=pDefs;
- info.vattr_cacheable = 0; /* assume not cacheable */
slapi_search_internal_set_pb(pDnSearch, dn, LDAP_SCOPE_SUBTREE,
DN_DEF_FILTER,NULL,0,
NULL,NULL,cos_get_plugin_identity(),0);
@@ -1096,8 +1092,6 @@ static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs, int *vattr_ca
slapi_pblock_destroy (pDnSearch);
}
- *vattr_cacheable = info.vattr_cacheable;
-
return info.ret;
}
ldap/servers/slapd/back-ldbm/back-ldbm.h | 4 -
ldap/servers/slapd/back-ldbm/dblayer.c | 71 +++++++++++++++++--------------
ldap/servers/slapd/back-ldbm/vlv.c | 24 ++++++++--
ldap/servers/slapd/back-ldbm/vlv_srch.c | 17 +++----
4 files changed, 70 insertions(+), 46 deletions(-)
New commits:
commit ebf24ffd5ac79adcb288960377605feb60c52d7f
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Mon Jan 5 13:07:07 2015 -0800
Ticket #47966 - slapd crashes during Dogtag clone reinstallation
Bug description: There were 2 VLV index issues.
1) When a VLV index is removed, it did not call dblayer_erase_index_file_
nolock, which removes the physical index file as well as a back pointer
set to the dblayer handle. The back pointer is scanned at the backend
close time where the garbage address is used to close the already removed
vlv index.
2) VLV plugin callbacks are registered in vlv_init. The function could be
called multiple times without unregster the VLV plugin callbacks, e.g.,
from bulk import, which reopens the backend instance. If create an
instance and a vlv index, then initialize the database with bulk import,
2 VLV index callbacks are registered. The first set points the old instance
address, which is already freed.
Note: the unregister callback functions are called only when the instance
is deleted in vlv_remove_callbacks via ldbm_instance_unregister_callbacks.
The callback is set as the post delete plugin with "(objectclass=nsBackendInstance)".
Fix description:
1) When a VLV index is removed, it calls dblayer_erase_index_file_nolock.
2) Before registering VLV plugin callbacks, calling unregister callbacks to
make sure cleaning up the existing callbacks.
https://fedorahosted.org/389/ticket/47966
Reviewed by rmeggins(a)redhat.com and mreynolds(a)redhat.com (Thank you, Rich and Mark!!)
diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h
index fd17453..b3badbc 100644
--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h
@@ -718,15 +718,15 @@ typedef struct _modify_context modify_context;
/* This structure was moved here from dblayer.c because the ldbm_instance
* structure uses the dblayer_handle structure. */
-struct tag_dblayer_handle; typedef struct tag_dblayer_handle dblayer_handle;
struct tag_dblayer_handle
{
DB* dblayer_dbp;
PRLock *dblayer_lock; /* used when anyone wants exclusive access to a file */
- dblayer_handle *dblayer_handle_next;
+ struct tag_dblayer_handle *dblayer_handle_next;
void **dblayer_handle_ai_backpointer; /* Voodo magic pointer to the place where we store a
pointer to this handle in the attrinfo structure */
};
+typedef struct tag_dblayer_handle dblayer_handle;
/* This structure was moved here from perfctrs.c so the ldbm_instance structure
* could use it. */
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
index 2524355..e7339fb 100644
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
@@ -2740,7 +2740,7 @@ int dblayer_close_indexes(backend *be)
pDB = handle->dblayer_dbp;
return_value |= pDB->close(pDB,0);
next = handle->dblayer_handle_next;
- *(handle->dblayer_handle_ai_backpointer) = NULL;
+ *((dblayer_handle **)handle->dblayer_handle_ai_backpointer) = NULL;
slapi_ch_free((void**)&handle);
}
@@ -3341,39 +3341,32 @@ int dblayer_get_index_file(backend *be, struct attrinfo *a, DB** ppDB, int open_
a, &pDB);
if (0 == return_value) {
/* Opened it OK */
- dblayer_handle *handle = (dblayer_handle *)
- slapi_ch_calloc(1, sizeof(dblayer_handle));
-
- if (NULL == handle) {
- /* Memory allocation failed */
- return_value = -1;
- } else {
- dblayer_handle *prev_handle = inst->inst_handle_tail;
+ dblayer_handle *handle = (dblayer_handle *)slapi_ch_calloc(1, sizeof(dblayer_handle));
+ dblayer_handle *prev_handle = inst->inst_handle_tail;
- PR_ASSERT(NULL != pDB);
- /* Store the returned DB* in our own private list of
- * open files */
- if (NULL == prev_handle) {
+ PR_ASSERT(NULL != pDB);
+ /* Store the returned DB* in our own private list of
+ * open files */
+ if (NULL == prev_handle) {
/* List was empty */
inst->inst_handle_tail = handle;
inst->inst_handle_head = handle;
- } else {
+ } else {
/* Chain the handle onto the last structure in the
* list */
inst->inst_handle_tail = handle;
prev_handle->dblayer_handle_next = handle;
- }
- /* Stash a pointer to our wrapper structure in the
- * attrinfo structure */
- handle->dblayer_dbp = pDB;
- /* And, most importantly, return something to the caller!*/
- *ppDB = pDB;
- /* and save the hande in the attrinfo structure for
- * next time */
- a->ai_dblayer = handle;
- /* don't need to update count -- we incr'd it already */
- handle->dblayer_handle_ai_backpointer = &(a->ai_dblayer);
}
+ /* Stash a pointer to our wrapper structure in the
+ * attrinfo structure */
+ handle->dblayer_dbp = pDB;
+ /* And, most importantly, return something to the caller!*/
+ *ppDB = pDB;
+ /* and save the hande in the attrinfo structure for
+ * next time */
+ a->ai_dblayer = handle;
+ /* don't need to update count -- we incr'd it already */
+ handle->dblayer_handle_ai_backpointer = &(a->ai_dblayer);
} else {
/* Did not open it OK ! */
/* Do nothing, because return value and fact that we didn't
@@ -3445,10 +3438,10 @@ dblayer_db_remove(dblayer_private_env * env, char const path[], char const dbNam
int dblayer_erase_index_file_ex(backend *be, struct attrinfo *a,
PRBool use_lock, int no_force_checkpoint)
{
- struct ldbminfo *li = (struct ldbminfo *) be->be_database->plg_private;
- dblayer_private *priv = (dblayer_private*) li->li_dblayer_private;
- struct dblayer_private_env *pEnv = priv->dblayer_env;
- ldbm_instance *inst = (ldbm_instance *) be->be_instance_info;
+ struct ldbminfo *li = NULL;
+ dblayer_private *priv = NULL;
+ struct dblayer_private_env *pEnv = NULL;
+ ldbm_instance *inst = NULL;
dblayer_handle *handle = NULL;
char dbName[MAXPATHLEN];
char *dbNamep;
@@ -3457,9 +3450,25 @@ int dblayer_erase_index_file_ex(backend *be, struct attrinfo *a,
int rc = 0;
DB *db = 0;
- if (NULL == pEnv) /* db does not exist */
+ if ((NULL == be) || (NULL == be->be_database)) {
return rc;
-
+ }
+ inst = (ldbm_instance *)be->be_instance_info;
+ if (NULL == inst) {
+ return rc;
+ }
+ li = (struct ldbminfo *)be->be_database->plg_private;
+ if (NULL == li) {
+ return rc;
+ }
+ priv = (dblayer_private*)li->li_dblayer_private;
+ if (NULL == priv) {
+ return rc;
+ }
+ pEnv = priv->dblayer_env;
+ if (NULL == pEnv) { /* db does not exist */
+ return rc;
+ }
/* Added for bug 600401. Somehow the checkpoint thread deadlocked on
index file with this function, index file couldn't be removed on win2k.
Force a checkpoint here to break deadlock.
diff --git a/ldap/servers/slapd/back-ldbm/vlv.c b/ldap/servers/slapd/back-ldbm/vlv.c
index d061bf2..384fd46 100644
--- a/ldap/servers/slapd/back-ldbm/vlv.c
+++ b/ldap/servers/slapd/back-ldbm/vlv.c
@@ -74,7 +74,10 @@ int vlv_AddSearchEntry(Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry*
{
ldbm_instance *inst = (ldbm_instance *)arg;
struct vlvSearch* newVlvSearch= vlvSearch_new();
- backend *be = inst->inst_be;
+ backend *be = NULL;
+ if (inst) {
+ be = inst->inst_be;
+ }
if (NULL == be) { /* backend is not associated */
vlvSearch_delete(&newVlvSearch);
@@ -427,8 +430,19 @@ vlv_init(ldbm_instance *inst)
}
/* Only need to register these callbacks for SLAPD mode... */
- if(basedn!=NULL)
+ if(basedn)
{
+ /* In case the vlv indexes are already registered, clean them up before register them. */
+ slapi_config_remove_callback(SLAPI_OPERATION_SEARCH,DSE_FLAG_PREOP,basedn,scope,indexfilter,vlv_SearchIndexEntry);
+ slapi_config_remove_callback(SLAPI_OPERATION_ADD,DSE_FLAG_PREOP,basedn,scope,searchfilter,vlv_AddSearchEntry);
+ slapi_config_remove_callback(SLAPI_OPERATION_ADD,DSE_FLAG_PREOP,basedn,scope,indexfilter,vlv_AddIndexEntry);
+ slapi_config_remove_callback(SLAPI_OPERATION_MODIFY,DSE_FLAG_PREOP,basedn,scope,searchfilter,vlv_ModifySearchEntry);
+ slapi_config_remove_callback(SLAPI_OPERATION_MODIFY,DSE_FLAG_PREOP,basedn,scope,indexfilter,vlv_ModifyIndexEntry);
+ slapi_config_remove_callback(SLAPI_OPERATION_DELETE,DSE_FLAG_PREOP,basedn,scope,searchfilter,vlv_DeleteSearchEntry);
+ slapi_config_remove_callback(SLAPI_OPERATION_DELETE,DSE_FLAG_PREOP,basedn,scope,indexfilter,vlv_DeleteIndexEntry);
+ slapi_config_remove_callback(SLAPI_OPERATION_MODRDN,DSE_FLAG_PREOP,basedn,scope,searchfilter,vlv_ModifyRDNSearchEntry);
+ slapi_config_remove_callback(SLAPI_OPERATION_MODRDN,DSE_FLAG_PREOP,basedn,scope,indexfilter,vlv_ModifyRDNIndexEntry);
+
slapi_config_register_callback(SLAPI_OPERATION_SEARCH,DSE_FLAG_PREOP,basedn,scope,indexfilter,vlv_SearchIndexEntry,(void*)inst);
slapi_config_register_callback(SLAPI_OPERATION_ADD,DSE_FLAG_PREOP,basedn,scope,searchfilter,vlv_AddSearchEntry,(void*)inst);
slapi_config_register_callback(SLAPI_OPERATION_ADD,DSE_FLAG_PREOP,basedn,scope,indexfilter,vlv_AddIndexEntry,(void*)inst);
@@ -438,7 +452,7 @@ vlv_init(ldbm_instance *inst)
slapi_config_register_callback(SLAPI_OPERATION_DELETE,DSE_FLAG_PREOP,basedn,scope,indexfilter,vlv_DeleteIndexEntry,(void*)inst);
slapi_config_register_callback(SLAPI_OPERATION_MODRDN,DSE_FLAG_PREOP,basedn,scope,searchfilter,vlv_ModifyRDNSearchEntry,(void*)inst);
slapi_config_register_callback(SLAPI_OPERATION_MODRDN,DSE_FLAG_PREOP,basedn,scope,indexfilter,vlv_ModifyRDNIndexEntry,(void*)inst);
- slapi_ch_free_string(&basedn);
+ slapi_ch_free_string(&basedn);
}
out:
@@ -448,8 +462,8 @@ out:
/* Removes callbacks from above when instance is removed. */
int
-vlv_remove_callbacks(ldbm_instance *inst) {
-
+vlv_remove_callbacks(ldbm_instance *inst)
+{
int return_value= LDAP_SUCCESS;
int scope= LDAP_SCOPE_SUBTREE;
char *basedn = NULL;
diff --git a/ldap/servers/slapd/back-ldbm/vlv_srch.c b/ldap/servers/slapd/back-ldbm/vlv_srch.c
index d7e28c1..bd94865 100644
--- a/ldap/servers/slapd/back-ldbm/vlv_srch.c
+++ b/ldap/servers/slapd/back-ldbm/vlv_srch.c
@@ -572,6 +572,7 @@ vlvIndex_delete(struct vlvIndex** ppvs)
}
}
internal_ldap_free_sort_keylist((*ppvs)->vlv_sortkey);
+ dblayer_erase_index_file_nolock((*ppvs)->vlv_be, (*ppvs)->vlv_attrinfo, 1 /* chkpt if not busy */);
attrinfo_delete(&((*ppvs)->vlv_attrinfo));
slapi_ch_free((void**)&((*ppvs)->vlv_name));
slapi_ch_free((void**)&((*ppvs)->vlv_filename));
@@ -618,7 +619,7 @@ vlvIndex_init(struct vlvIndex* p, backend *be, struct vlvSearch* pSearch, const
{
if(p->vlv_sortkey[n]->sk_matchruleoid!=NULL)
{
- create_matchrule_indexer(&p->vlv_mrpb[n],p->vlv_sortkey[n]->sk_matchruleoid,p->vlv_sortkey[n]->sk_attrtype);
+ create_matchrule_indexer(&p->vlv_mrpb[n],p->vlv_sortkey[n]->sk_matchruleoid,p->vlv_sortkey[n]->sk_attrtype);
}
}
@@ -632,7 +633,7 @@ vlvIndex_init(struct vlvIndex* p, backend *be, struct vlvSearch* pSearch, const
/* Create an attrinfo structure */
p->vlv_attrinfo->ai_type= slapi_ch_smprintf("%s%s",file_prefix,filename);
- p->vlv_attrinfo->ai_indexmask= INDEX_VLV;
+ p->vlv_attrinfo->ai_indexmask= INDEX_VLV;
/* Check if the index file actually exists */
if(li!=NULL)
@@ -657,14 +658,14 @@ vlvIndex_get_indexlength(struct vlvIndex* p, DB *db, back_txn *txn)
if(!p->vlv_indexlength_cached)
{
- DBC *dbc = NULL;
- DB_TXN *db_txn = NULL;
+ DBC *dbc = NULL;
+ DB_TXN *db_txn = NULL;
int err= 0;
- if (NULL != txn)
+ if (NULL != txn)
{
- db_txn = txn->back_txn_txn;
+ db_txn = txn->back_txn_txn;
}
- err = db->cursor(db, db_txn, &dbc, 0);
+ err = db->cursor(db, db_txn, &dbc, 0);
if(err==0)
{
DBT key= {0};
@@ -924,7 +925,7 @@ vlvIndex_createfilename(struct vlvIndex* pIndex, char **ppc)
*p= '\0';
if(strlen(filename)==0)
{
- LDAPDebug( LDAP_DEBUG_ANY, "Couldn't generate valid filename from Virtual List View Index Name (%s). Need some alphabetical characters.\n", pIndex->vlv_name, 0, 0);
+ LDAPDebug( LDAP_DEBUG_ANY, "Couldn't generate valid filename from Virtual List View Index Name (%s). Need some alphabetical characters.\n", pIndex->vlv_name, 0, 0);
filenameValid= 0;
}
/* JCM: Check if this file clashes with another VLV Index filename */