[389-commits] dirsrvtests/create_test.py dirsrvtests/data dirsrvtests/suites dirsrvtests/tickets

Mark Reynolds mreynolds at fedoraproject.org
Wed Feb 18 00:06:23 UTC 2015


 dirsrvtests/create_test.py                                 |  577 +++++
 dirsrvtests/data/basic/dse.ldif.broken                     |   95 
 dirsrvtests/data/ticket47953.ldif                          |   27 
 dirsrvtests/data/ticket47953/ticket47953.ldif              |   27 
 dirsrvtests/suites/basic/basic_test.py                     |  695 ++++++
 dirsrvtests/suites/betxns/betxn_test.py                    |  187 +
 dirsrvtests/suites/clu/clu_test.py                         |  107 
 dirsrvtests/suites/clu/db2ldif_test.py                     |   84 
 dirsrvtests/suites/config/config_test.py                   |  189 +
 dirsrvtests/suites/dynamic-plugins/plugin_tests.py         |  295 +-
 dirsrvtests/suites/dynamic-plugins/stress_tests.py         |   14 
 dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py |    2 
 dirsrvtests/suites/filter/filter_test.py                   |  144 +
 dirsrvtests/suites/memory_leaks/range_search_test.py       |  145 +
 dirsrvtests/suites/password/password_test.py               |  135 +
 dirsrvtests/suites/password/pwdAdmin_test.py               |  439 +++
 dirsrvtests/suites/password/pwdPolicy_test.py              |   74 
 dirsrvtests/suites/replication/cleanallruv_test.py         | 1486 +++++++++++++
 dirsrvtests/suites/rootdn_plugin/rootdn_plugin_test.py     |  770 ++++++
 dirsrvtests/suites/schema/test_schema.py                   |   14 
 dirsrvtests/tickets/create_testcase.py                     |  530 ----
 dirsrvtests/tickets/ticket365_test.py                      |  161 +
 dirsrvtests/tickets/ticket47384_test.py                    |  159 +
 dirsrvtests/tickets/ticket47953_test.py                    |    2 
 24 files changed, 5640 insertions(+), 718 deletions(-)

New commits:
commit d33676f840672f6377ccdcce93c0db38039028ff
Author: Mark Reynolds <mreynolds at redhat.com>
Date:   Tue Feb 17 12:18:50 2015 -0500

    Ticket 48003 - build "suite" framework
    
    Description:  Started building the lib389 test suite structure in 389 DS.
                  The "basic" test suite from TET has been ported.
    
                  cleanallruv testsuite was written from scratch, as well at the
                  rootdn plugin test suite.
    
    https://fedorahosted.org/389/ticket/48003
    
    Reviewed by: nhosoi(Thanks!!)

diff --git a/dirsrvtests/create_test.py b/dirsrvtests/create_test.py
new file mode 100755
index 0000000..1f2971c
--- /dev/null
+++ b/dirsrvtests/create_test.py
@@ -0,0 +1,577 @@
+#!/usr/bin/python
+
+import sys
+import optparse
+
+'''
+    This script generates a template test script that handles the
+    non-interesting parts of a test script:
+        topology,
+        test (to be completed by the user),
+        final,
+        and run-isolated functions
+'''
+
+
+def displayUsage():
+    print ('\nUsage:\ncreate_ticket.py -t|--ticket <ticket number> -s|--suite <suite name> ' +
+           '[ i|--instances <number of standalone instances> [ -m|--masters <number of masters> ' +
+           '-h|--hubs <number of hubs> -c|--consumers <number of consumers> ] ' +
+           '-o|--outputfile ]\n')
+    print ('If only "-t" is provided then a single standalone instance is created.  ' +
+           'Or you can create a test suite script using "-s|--suite" instead of using "-t|--ticket".' +
+           'The "-i" option can add mulitple standalone instances(maximum 10).  ' +
+           'However, you can not mix "-i" with the replication options(-m, -h , -c).  ' +
+           'There is a maximum of 10 masters, 10 hubs, and 10 consumers.')
+    exit(1)
+
+desc = 'Script to generate an initial lib389 test script.  ' + \
+       'This generates the topology, test, final, and run-isolated functions.'
+
+if len(sys.argv) > 0:
+    parser = optparse.OptionParser(description=desc, add_help_option=False)
+
+    # Script options
+    parser.add_option('-t', '--ticket', dest='ticket', default=None)
+    parser.add_option('-s', '--suite', dest='suite', default=None)
+    parser.add_option('-i', '--instances', dest='inst', default=None)
+    parser.add_option('-m', '--masters', dest='masters', default='0')
+    parser.add_option('-h', '--hubs', dest='hubs', default='0')
+    parser.add_option('-c', '--consumers', dest='consumers', default='0')
+    parser.add_option('-o', '--outputfile', dest='filename', default=None)
+
+    # Validate the options
+    try:
+        (args, opts) = parser.parse_args()
+    except:
+        displayUsage()
+
+    if args.ticket is None and args.suite is None:
+        print 'Missing required ticket number/suite name'
+        displayUsage()
+
+    if args.ticket and args.suite:
+        print 'You must choose either "-t|--ticket" or "-s|--suite", but not both.'
+        displayUsage()
+
+    if int(args.masters) == 0:
+        if int(args.hubs) > 0 or int(args.consumers) > 0:
+            print 'You must use "-m|--masters" if you want to have hubs and/or consumers'
+            displayUsage()
+
+    if not args.masters.isdigit() or int(args.masters) > 10 or int(args.masters) < 0:
+        print 'Invalid value for "--masters", it must be a number and it can not be greater than 10'
+        displayUsage()
+
+    if not args.hubs.isdigit() or int(args.hubs) > 10 or int(args.hubs) < 0:
+        print 'Invalid value for "--hubs", it must be a number and it can not be greater than 10'
+        displayUsage()
+
+    if not args.consumers.isdigit() or int(args.consumers) > 10 or int(args.consumers) < 0:
+        print 'Invalid value for "--consumers", it must be a number and it can not be greater than 10'
+        displayUsage()
+
+    if args.inst:
+        if not args.inst.isdigit() or int(args.inst) > 10 or int(args.inst) < 1:
+            print ('Invalid value for "--instances", it must be a number greater than 0 ' +
+                   'and not greater than 10')
+            displayUsage()
+        if int(args.inst) > 0:
+            if int(args.masters) > 0 or int(args.hubs) > 0 or int(args.consumers) > 0:
+                print 'You can not mix "--instances" with replication.'
+                displayUsage()
+
+    # Extract usable values
+    masters = int(args.masters)
+    hubs = int(args.hubs)
+    consumers = int(args.consumers)
+    ticket = args.ticket
+    suite = args.suite
+    if not args.inst:
+        instances = 1
+    else:
+        instances = int(args.inst)
+    filename = args.filename
+
+    #
+    # Create/open the new test script file
+    #
+    if not filename:
+        if ticket:
+            filename = 'ticket' + ticket + '_test.py'
+        else:
+            # suite
+            filename = suite + '_test.py'
+
+    try:
+        TEST = open(filename, "w")
+    except IOError:
+        print "Can\'t open file:", filename
+        exit(1)
+
+    #
+    # Write the imports
+    #
+    TEST.write('import os\nimport sys\nimport time\nimport ldap\nimport logging\nimport pytest\n')
+    TEST.write('from lib389 import DirSrv, Entry, tools, tasks\nfrom lib389.tools import DirSrvTools\n' +
+               'from lib389._constants import *\nfrom lib389.properties import *\n' +
+               'from lib389.tasks import *\nfrom lib389.utils import *\n\n')
+
+    #
+    # Set the logger and installation prefix
+    #
+    TEST.write('logging.getLogger(__name__).setLevel(logging.DEBUG)\n')
+    TEST.write('log = logging.getLogger(__name__)\n\n')
+    TEST.write('installation1_prefix = None\n\n\n')
+
+    #
+    # Write the replication or standalone classes
+    #
+    repl_deployment = False
+    if masters + hubs + consumers > 0:
+        #
+        # Write the replication class
+        #
+        repl_deployment = True
+
+        TEST.write('class TopologyReplication(object):\n')
+        TEST.write('    def __init__(self')
+        for idx in range(masters):
+            TEST.write(', master' + str(idx + 1))
+        for idx in range(hubs):
+            TEST.write(', hub' + str(idx + 1))
+        for idx in range(consumers):
+            TEST.write(', consumer' + str(idx + 1))
+        TEST.write('):\n')
+
+        for idx in range(masters):
+            TEST.write('        master' + str(idx + 1) + '.open()\n')
+            TEST.write('        self.master' + str(idx + 1) + ' = master' + str(idx + 1) + '\n')
+        for idx in range(hubs):
+            TEST.write('        hub' + str(idx + 1) + '.open()\n')
+            TEST.write('        self.hub' + str(idx + 1) + ' = hub' + str(idx + 1) + '\n')
+        for idx in range(consumers):
+            TEST.write('        consumer' + str(idx + 1) + '.open()\n')
+            TEST.write('        self.consumer' + str(idx + 1) + ' = consumer' + str(idx + 1) + '\n')
+        TEST.write('\n\n')
+    else:
+        #
+        # Write the standalone class
+        #
+        TEST.write('class TopologyStandalone(object):\n')
+        TEST.write('    def __init__(self')
+        for idx in range(instances):
+            idx += 1
+            if idx == 1:
+                idx = ''
+            else:
+                idx = str(idx)
+            TEST.write(', standalone' + idx)
+        TEST.write('):\n')
+
+        for idx in range(instances):
+            idx += 1
+            if idx == 1:
+                idx = ''
+            else:
+                idx = str(idx)
+            TEST.write('        standalone' + idx + '.open()\n')
+            TEST.write('        self.standalone' + idx + ' = standalone' + idx + '\n')
+        TEST.write('\n\n')
+
+    #
+    # Write the 'topology function'
+    #
+    TEST.write('@pytest.fixture(scope="module")\n')
+    TEST.write('def topology(request):\n')
+    TEST.write('    global installation1_prefix\n')
+    TEST.write('    if installation1_prefix:\n')
+    TEST.write('        args_instance[SER_DEPLOYED_DIR] = installation1_prefix\n\n')
+
+    if repl_deployment:
+        #
+        # Create the replication instances
+        #
+        for idx in range(masters):
+            idx = str(idx + 1)
+            TEST.write('    # Creating master ' + idx + '...\n')
+            TEST.write('    master' + idx + ' = DirSrv(verbose=False)\n')
+            TEST.write('    args_instance[SER_HOST] = HOST_MASTER_' + idx + '\n')
+            TEST.write('    args_instance[SER_PORT] = PORT_MASTER_' + idx + '\n')
+            TEST.write('    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_' + idx + '\n')
+            TEST.write('    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX\n')
+            TEST.write('    args_master = args_instance.copy()\n')
+            TEST.write('    master' + idx + '.allocate(args_master)\n')
+            TEST.write('    instance_master' + idx + ' = master' + idx + '.exists()\n')
+            TEST.write('    if instance_master' + idx + ':\n')
+            TEST.write('        master' + idx + '.delete()\n')
+            TEST.write('    master' + idx + '.create()\n')
+            TEST.write('    master' + idx + '.open()\n')
+            TEST.write('    master' + idx + '.replica.enableReplication(suffix=SUFFIX, ' +
+                                            'role=REPLICAROLE_MASTER, ' +
+                                            'replicaId=REPLICAID_MASTER_' + idx + ')\n\n')
+
+        for idx in range(hubs):
+            idx = str(idx + 1)
+            TEST.write('    # Creating hub ' + idx + '...\n')
+            TEST.write('    hub' + idx + ' = DirSrv(verbose=False)\n')
+            TEST.write('    args_instance[SER_HOST] = HOST_HUB_' + idx + '\n')
+            TEST.write('    args_instance[SER_PORT] = PORT_HUB_' + idx + '\n')
+            TEST.write('    args_instance[SER_SERVERID_PROP] = SERVERID_HUB_' + idx + '\n')
+            TEST.write('    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX\n')
+            TEST.write('    args_hub = args_instance.copy()\n')
+            TEST.write('    hub' + idx + '.allocate(args_hub)\n')
+            TEST.write('    instance_hub' + idx + ' = hub' + idx + '.exists()\n')
+            TEST.write('    if instance_hub' + idx + ':\n')
+            TEST.write('        hub' + idx + '.delete()\n')
+            TEST.write('    hub' + idx + '.create()\n')
+            TEST.write('    hub' + idx + '.open()\n')
+            TEST.write('    hub' + idx + '.replica.enableReplication(suffix=SUFFIX, ' +
+                                            'role=REPLICAROLE_HUB, ' +
+                                            'replicaId=REPLICAID_HUB_' + idx + ')\n\n')
+
+        for idx in range(consumers):
+            idx = str(idx + 1)
+            TEST.write('    # Creating consumer ' + idx + '...\n')
+            TEST.write('    consumer' + idx + ' = DirSrv(verbose=False)\n')
+            TEST.write('    args_instance[SER_HOST] = HOST_CONSUMER_' + idx + '\n')
+            TEST.write('    args_instance[SER_PORT] = PORT_CONSUMER_' + idx + '\n')
+            TEST.write('    args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_' + idx + '\n')
+            TEST.write('    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX\n')
+            TEST.write('    args_consumer = args_instance.copy()\n')
+            TEST.write('    consumer' + idx + '.allocate(args_consumer)\n')
+            TEST.write('    instance_consumer' + idx + ' = consumer' + idx + '.exists()\n')
+            TEST.write('    if instance_consumer' + idx + ':\n')
+            TEST.write('        consumer' + idx + '.delete()\n')
+            TEST.write('    consumer' + idx + '.create()\n')
+            TEST.write('    consumer' + idx + '.open()\n')
+            TEST.write('    consumer' + idx + '.replica.enableReplication(suffix=SUFFIX, ' +
+                                            'role=REPLICAROLE_CONSUMER, ' +
+                                            'replicaId=CONSUMER_REPLICAID)\n\n')
+
+        #
+        # Create the master agreements
+        #
+        TEST.write('    #\n')
+        TEST.write('    # Create all the agreements\n')
+        TEST.write('    #\n')
+        agmt_count = 0
+        for idx in range(masters):
+            master_idx = idx + 1
+            for idx in range(masters):
+                #
+                # Create agreements with the other masters (master -> master)
+                #
+                idx += 1
+                if master_idx == idx:
+                    # skip ourselves
+                    continue
+                TEST.write('    # Creating agreement from master ' + str(master_idx) + ' to master ' + str(idx) + '\n')
+                TEST.write("    properties = {RA_NAME:      r'meTo_$host:$port',\n")
+                TEST.write("                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],\n")
+                TEST.write("                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],\n")
+                TEST.write("                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],\n")
+                TEST.write("                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}\n")
+                TEST.write('    m' + str(master_idx) + '_m' + str(idx) + '_agmt = master' + str(master_idx) +
+                            '.agreement.create(suffix=SUFFIX, host=master' +
+                            str(idx) + '.host, port=master' + str(idx) + '.port, properties=properties)\n')
+                TEST.write('    if not m' + str(master_idx) + '_m' + str(idx) + '_agmt:\n')
+                TEST.write('        log.fatal("Fail to create a master -> master replica agreement")\n')
+                TEST.write('        sys.exit(1)\n')
+                TEST.write('    log.debug("%s created" % m' + str(master_idx) + '_m' + str(idx) + '_agmt)\n\n')
+                agmt_count += 1
+
+            for idx in range(hubs):
+                idx += 1
+                #
+                # Create agreements from each master to each hub (master -> hub)
+                #
+                TEST.write('    # Creating agreement from master ' + str(master_idx) + ' to hub ' + str(idx) + '\n')
+                TEST.write("    properties = {RA_NAME:      r'meTo_$host:$port',\n")
+                TEST.write("                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],\n")
+                TEST.write("                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],\n")
+                TEST.write("                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],\n")
+                TEST.write("                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}\n")
+                TEST.write('    m' + str(master_idx) + '_h' + str(idx) + '_agmt = master' + str(master_idx) +
+                            '.agreement.create(suffix=SUFFIX, host=hub' +
+                            str(idx) + '.host, port=hub' + str(idx) + '.port, properties=properties)\n')
+                TEST.write('    if not m' + str(master_idx) + '_h' + str(idx) + '_agmt:\n')
+                TEST.write('        log.fatal("Fail to create a master -> hub replica agreement")\n')
+                TEST.write('        sys.exit(1)\n')
+                TEST.write('    log.debug("%s created" % m' + str(master_idx) + '_h' + str(idx) + '_agmt)\n\n')
+                agmt_count += 1
+
+        #
+        # Create the hub agreements
+        #
+        for idx in range(hubs):
+            hub_idx = idx + 1
+            #
+            # Add agreements from each hub to each consumer (hub -> consumer)
+            #
+            for idx in range(consumers):
+                idx += 1
+                #
+                # Create agreements from each hub to each consumer
+                #
+                TEST.write('    # Creating agreement from hub ' + str(hub_idx) + ' to consumer ' + str(idx) + '\n')
+                TEST.write("    properties = {RA_NAME:      r'meTo_$host:$port',\n")
+                TEST.write("                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],\n")
+                TEST.write("                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],\n")
+                TEST.write("                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],\n")
+                TEST.write("                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}\n")
+                TEST.write('    h' + str(hub_idx) + '_c' + str(idx) + '_agmt = hub' +
+                            str(hub_idx) + '.agreement.create(suffix=SUFFIX, host=consumer' +
+                            str(idx) + '.host, port=consumer' + str(idx) + '.port, properties=properties)\n')
+                TEST.write('    if not h' + str(hub_idx) + '_c' + str(idx) + '_agmt:\n')
+                TEST.write('        log.fatal("Fail to create a hub -> consumer replica agreement")\n')
+                TEST.write('        sys.exit(1)\n')
+                TEST.write('    log.debug("%s created" % h' + str(hub_idx) + '_c' + str(idx) + '_agmt)\n\n')
+                agmt_count += 1
+
+        if hubs == 0:
+            #
+            # No Hubs, see if there are any consumers to create agreements to...
+            #
+            for idx in range(masters):
+                master_idx = idx + 1
+                #
+                # Create agreements with the consumers (master -> consumer)
+                #
+                for idx in range(consumers):
+                    idx += 1
+                    #
+                    # Create agreements from each master to each consumer
+                    #
+                    TEST.write('    # Creating agreement from master ' + str(master_idx) +
+                               ' to consumer ' + str(idx) + '\n')
+                    TEST.write("    properties = {RA_NAME:      r'meTo_$host:$port',\n")
+                    TEST.write("                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],\n")
+                    TEST.write("                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],\n")
+                    TEST.write("                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],\n")
+                    TEST.write("                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}\n")
+                    TEST.write('    m' + str(master_idx) + '_c' + str(idx) + '_agmt = master' + str(master_idx) +
+                                '.agreement.create(suffix=SUFFIX, host=consumer' +
+                                str(idx) + '.host, port=consumer' + str(idx) +
+                                '.port, properties=properties)\n')
+                    TEST.write('    if not m' + str(master_idx) + '_c' + str(idx) + '_agmt:\n')
+                    TEST.write('        log.fatal("Fail to create a hub -> consumer replica agreement")\n')
+                    TEST.write('        sys.exit(1)\n')
+                    TEST.write('    log.debug("%s created" % m' + str(master_idx) + '_c' + str(idx) + '_agmt)\n\n')
+                    agmt_count += 1
+
+        #
+        # Add sleep that allows all the agreemnts to get situated
+        #
+        TEST.write('    # Allow the replicas to get situated with the new agreements...\n')
+        TEST.write('    time.sleep(5)\n\n')
+
+        #
+        # Write the replication initializations
+        #
+        TEST.write('    #\n')
+        TEST.write('    # Initialize all the agreements\n')
+        TEST.write('    #\n')
+
+        # Masters
+        for idx in range(masters):
+            idx += 1
+            if idx == 1:
+                continue
+            TEST.write('    master1.agreement.init(SUFFIX, HOST_MASTER_' +
+                       str(idx) + ', PORT_MASTER_' + str(idx) + ')\n')
+            TEST.write('    master1.waitForReplInit(m1_m' + str(idx) + '_agmt)\n')
+
+        # Hubs
+        consumers_inited = False
+        for idx in range(hubs):
+            idx += 1
+            TEST.write('    master1.agreement.init(SUFFIX, HOST_HUB_' +
+                   str(idx) + ', PORT_HUB_' + str(idx) + ')\n')
+            TEST.write('    master1.waitForReplInit(m1_h' + str(idx) + '_agmt)\n')
+            for idx in range(consumers):
+                if consumers_inited:
+                    continue
+                idx += 1
+                TEST.write('    hub1.agreement.init(SUFFIX, HOST_CONSUMER_' +
+                           str(idx) + ', PORT_CONSUMER_' + str(idx) + ')\n')
+                TEST.write('    hub1.waitForReplInit(h1_c' + str(idx) + '_agmt)\n')
+            consumers_inited = True
+
+        # Consumers (master -> consumer)
+        if hubs == 0:
+            for idx in range(consumers):
+                idx += 1
+                TEST.write('    master1.agreement.init(SUFFIX, HOST_CONSUMER_' +
+                           str(idx) + ', PORT_CONSUMER_' + str(idx) + ')\n')
+                TEST.write('    master1.waitForReplInit(m1_c' + str(idx) + '_agmt)\n')
+
+        TEST.write('\n')
+
+        #
+        # Write replicaton check
+        #
+        if agmt_count > 0:
+            # Find the lowest replica type in the deployment(consumer -> master)
+            if consumers > 0:
+                replica = 'consumer1'
+            elif hubs > 0:
+                replica = 'hub1'
+            else:
+                replica = 'master2'
+            TEST.write('    # Check replication is working...\n')
+            TEST.write('    if master1.testReplication(DEFAULT_SUFFIX, ' + replica + '):\n')
+            TEST.write("        log.info('Replication is working.')\n")
+            TEST.write('    else:\n')
+            TEST.write("        log.fatal('Replication is not working.')\n")
+            TEST.write('        assert False\n')
+            TEST.write('\n')
+
+        #
+        # Write the finals steps for replication
+        #
+        TEST.write('    # Clear out the tmp dir\n')
+        TEST.write('    master1.clearTmpDir(__file__)\n\n')
+        TEST.write('    return TopologyReplication(master1')
+        for idx in range(masters):
+            idx += 1
+            if idx == 1:
+                continue
+            TEST.write(', master' + str(idx))
+        for idx in range(hubs):
+            TEST.write(', hub' + str(idx + 1))
+        for idx in range(consumers):
+            TEST.write(', consumer' + str(idx + 1))
+        TEST.write(')\n')
+    else:
+        #
+        # Standalone servers
+        #
+
+        # Args for the standalone instance
+        for idx in range(instances):
+            idx += 1
+            if idx == 1:
+                idx = ''
+            else:
+                idx = str(idx)
+            TEST.write('    # Creating standalone instance ' + idx + '...\n')
+            TEST.write('    standalone' + idx + ' = DirSrv(verbose=False)\n')
+            TEST.write('    args_instance[SER_HOST] = HOST_STANDALONE' + idx + '\n')
+            TEST.write('    args_instance[SER_PORT] = PORT_STANDALONE' + idx + '\n')
+            TEST.write('    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE' + idx + '\n')
+            TEST.write('    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX\n')
+            TEST.write('    args_standalone' + idx + ' = args_instance.copy()\n')
+            TEST.write('    standalone' + idx + '.allocate(args_standalone' + idx + ')\n')
+
+            # Get the status of the instance and restart it if it exists
+            TEST.write('    instance_standalone' + idx + ' = standalone' + idx + '.exists()\n')
+
+            # Remove the instance
+            TEST.write('    if instance_standalone' + idx + ':\n')
+            TEST.write('        standalone' + idx + '.delete()\n')
+
+            # Create and open the instance
+            TEST.write('    standalone' + idx + '.create()\n')
+            TEST.write('    standalone' + idx + '.open()\n\n')
+
+        TEST.write('    # Clear out the tmp dir\n')
+        TEST.write('    standalone.clearTmpDir(__file__)\n')
+        TEST.write('\n')
+        TEST.write('    return TopologyStandalone(standalone')
+        for idx in range(instances):
+            idx += 1
+            if idx == 1:
+                continue
+            TEST.write(', standalone' + str(idx))
+        TEST.write(')\n')
+
+    TEST.write('\n\n')
+
+    #
+    # Write the test function
+    #
+    if ticket:
+        TEST.write('def test_ticket' + ticket + '(topology):\n')
+        TEST.write("    '''\n")
+        if repl_deployment:
+            TEST.write('    Write your replication testcase here.\n\n')
+            TEST.write('    To access each DirSrv instance use:  topology.master1, topology.master2,\n' +
+                       '        ..., topology.hub1, ..., topology.consumer1, ...\n')
+        else:
+            TEST.write('    Write your testcase here...\n')
+        TEST.write("    '''\n\n")
+        TEST.write("    log.info('Test complete')\n")
+        TEST.write("\n\n")
+    else:
+        # For suite we start with an init function
+        TEST.write('def test_' + suite + '_init(topology):\n')
+        TEST.write("    '''\n")
+        TEST.write('    Write any test suite initialization here(if needed)\n')
+        TEST.write("    '''\n\n    return\n\n\n")
+
+        # Write the first initial empty test function
+        TEST.write('def test_' + suite + '_#####(topology):\n')
+        TEST.write("    '''\n")
+        TEST.write('    Write a single test here...\n')
+        TEST.write("    '''\n\n    return\n\n\n")
+
+    #
+    # Write the final function here - delete each instance
+    #
+    if ticket:
+        TEST.write('def test_ticket' + ticket + '_final(topology):\n')
+    else:
+        # suite
+        TEST.write('def test_' + suite + '_final(topology):\n')
+    if repl_deployment:
+        for idx in range(masters):
+            idx += 1
+            TEST.write('    topology.master' + str(idx) + '.delete()\n')
+        for idx in range(hubs):
+            idx += 1
+            TEST.write('    topology.hub' + str(idx) + '.delete()\n')
+        for idx in range(consumers):
+            idx += 1
+            TEST.write('    topology.consumer' + str(idx) + '.delete()\n')
+    else:
+        for idx in range(instances):
+            idx += 1
+            if idx == 1:
+                idx = ''
+            else:
+                idx = str(idx)
+            TEST.write('    topology.standalone' + idx + '.delete()\n')
+
+    if ticket:
+        TEST.write("    log.info('Testcase PASSED')\n")
+    else:
+        # suite
+        TEST.write("    log.info('" + suite + " test suite PASSED')\n")
+    TEST.write('\n\n')
+
+    #
+    # Write the main function
+    #
+    TEST.write('def run_isolated():\n')
+    TEST.write('    global installation1_prefix\n')
+    TEST.write('    installation1_prefix = None\n\n')
+    TEST.write('    topo = topology(True)\n')
+    if ticket:
+        TEST.write('    test_ticket' + ticket + '(topo)\n')
+        TEST.write('    test_ticket' + ticket + '_final(topo)\n')
+    else:
+        # suite
+        TEST.write('    test_' + suite + '_init(topo)\n')
+        TEST.write('    test_' + suite + '_#####(topo)\n')
+        TEST.write('    test_' + suite + '_final(topo)\n')
+    TEST.write('\n\n')
+
+    TEST.write("if __name__ == '__main__':\n")
+    TEST.write('    run_isolated()\n\n')
+
+    #
+    # Done, close things up
+    #
+    TEST.close()
+    print('Created: ' + filename)
diff --git a/dirsrvtests/data/basic/dse.ldif.broken b/dirsrvtests/data/basic/dse.ldif.broken
new file mode 100644
index 0000000..489b443
--- /dev/null
+++ b/dirsrvtests/data/basic/dse.ldif.broken
@@ -0,0 +1,95 @@
+dn:
+objectClass: top
+aci: (targetattr != "aci")(version 3.0; aci "rootdse anon read access"; allow(
+ read,search,compare) userdn="ldap:///anyone";)
+creatorsName: cn=server,cn=plugins,cn=config
+modifiersName: cn=server,cn=plugins,cn=config
+createTimestamp: 20150204165610Z
+modifyTimestamp: 20150204165610Z
+
+dn: cn=config
+cn: config
+objectClass: top
+objectClass: extensibleObject
+objectClass: nsslapdConfig
+nsslapd-schemadir: /etc/dirsrv/slapd-localhost/schema
+nsslapd-lockdir: /var/lock/dirsrv/slapd-localhost
+nsslapd-tmpdir: /tmp
+nsslapd-certdir: /etc/dirsrv/slapd-localhost
+nsslapd-ldifdir: /var/lib/dirsrv/slapd-localhost/ldif
+nsslapd-bakdir: /var/lib/dirsrv/slapd-localhost/bak
+nsslapd-rundir: /var/run/dirsrv
+nsslapd-instancedir: /usr/lib64/dirsrv/slapd-localhost
+nsslapd-accesslog-logging-enabled: on
+nsslapd-accesslog-maxlogsperdir: 10
+nsslapd-accesslog-mode: 600
+nsslapd-accesslog-maxlogsize: 100
+nsslapd-accesslog-logrotationtime: 1
+nsslapd-accesslog-logrotationtimeunit: day
+nsslapd-accesslog-logrotationsync-enabled: off
+nsslapd-accesslog-logrotationsynchour: 0
+nsslapd-accesslog-logrotationsyncmin: 0
+nsslapd-accesslog: /var/log/dirsrv/slapd-localhost/access
+nsslapd-enquote-sup-oc: off
+nsslapd-localhost: localhost.localdomain
+nsslapd-schemacheck: on
+nsslapd-syntaxcheck: on
+nsslapd-dn-validate-strict: off
+nsslapd-rewrite-rfc1274: off
+nsslapd-return-exact-case: on
+nsslapd-ssl-check-hostname: on
+nsslapd-validate-cert: warn
+nsslapd-allow-unauthenticated-binds: off
+nsslapd-require-secure-binds: off
+nsslapd-allow-anonymous####-access: on
+nsslapd-localssf: 71
+nsslapd-minssf: 0
+nsslapd-port: 389
+nsslapd-localuser: nobody
+nsslapd-errorlog-logging-enabled: on
+nsslapd-errorlog-mode: 600
+nsslapd-errorlog-maxlogsperdir: 2
+nsslapd-errorlog-maxlogsize: 100
+nsslapd-errorlog-logrotationtime: 1
+nsslapd-errorlog-logrotationtimeunit: week
+nsslapd-errorlog-logrotationsync-enabled: off
+nsslapd-errorlog-logrotationsynchour: 0
+nsslapd-errorlog-logrotationsyncmin: 0
+nsslapd-errorlog: /var/log/dirsrv/slapd-localhost/errors
+nsslapd-auditlog: /var/log/dirsrv/slapd-localhost/audit
+nsslapd-auditlog-mode: 600
+nsslapd-auditlog-maxlogsize: 100
+nsslapd-auditlog-logrotationtime: 1
+nsslapd-auditlog-logrotationtimeunit: day
+nsslapd-rootdn: cn=dm
+nsslapd-maxdescriptors: 1024
+nsslapd-max-filter-nest-level: 40
+nsslapd-ndn-cache-enabled: on
+nsslapd-sasl-mapping-fallback: off
+nsslapd-dynamic-plugins: off
+nsslapd-allow-hashed-passwords: off
+nsslapd-ldapifilepath: /var/run/slapd-localhost.socket
+nsslapd-ldapilisten: off
+nsslapd-ldapiautobind: off
+nsslapd-ldapimaprootdn: cn=dm
+nsslapd-ldapimaptoentries: off
+nsslapd-ldapiuidnumbertype: uidNumber
+nsslapd-ldapigidnumbertype: gidNumber
+nsslapd-ldapientrysearchbase: dc=example,dc=com
+nsslapd-defaultnamingcontext: dc=example,dc=com
+aci: (targetattr="*")(version 3.0; acl "Configuration Administrators Group"; a
+ llow (all) groupdn="ldap:///cn=Configuration Administrators,ou=Groups,ou=Topo
+ logyManagement,o=NetscapeRoot";)
+aci: (targetattr="*")(version 3.0; acl "Configuration Administrator"; allow (a
+ ll) userdn="ldap:///uid=admin,ou=Administrators,ou=TopologyManagement,o=Netsc
+ apeRoot";)
+aci: (targetattr = "*")(version 3.0; acl "SIE Group"; allow (all) groupdn = "l
+ dap:///cn=slapd-localhost,cn=389 Directory Server,cn=Server Group,cn=localhos
+ t.localdomain,ou=example.com,o=NetscapeRoot";)
+modifiersName: cn=dm
+modifyTimestamp: 20150205195242Z
+nsslapd-auditlog-logging-enabled: on
+nsslapd-auditlog-logging-hide-unhashed-pw: off
+nsslapd-rootpw: {SSHA}AQH9bTYZW4kfkfyHg1k+lG88H2dFOuwakzFEpw==
+numSubordinates: 10
+
diff --git a/dirsrvtests/data/ticket47953.ldif b/dirsrvtests/data/ticket47953.ldif
deleted file mode 100644
index e59977e..0000000
--- a/dirsrvtests/data/ticket47953.ldif
+++ /dev/null
@@ -1,27 +0,0 @@
-dn: dc=example,dc=com
-objectClass: top
-objectClass: domain
-dc: example
-aci: (targetattr!="userPassword")(version 3.0; acl "Enable anonymous access";
- allow (read, search, compare) userdn="ldap:///anyone";)
-aci: (targetattr="carLicense || description || displayName || facsimileTelepho
- neNumber || homePhone || homePostalAddress || initials || jpegPhoto || labele
- dURI || mail || mobile || pager || photo || postOfficeBox || postalAddress ||
-  postalCode || preferredDeliveryMethod || preferredLanguage || registeredAddr
- ess || roomNumber || secretary || seeAlso || st || street || telephoneNumber
- || telexNumber || title || userCertificate || userPassword || userSMIMECertif
- icate || x500UniqueIdentifier")(version 3.0; acl "Enable self write for commo
- n attributes"; allow (write) userdn="ldap:///self";)
-aci: (targetattr ="fffff")(version 3.0;acl "Directory Administrators Group";al
- low (all) (groupdn = "ldap:///cn=Directory Administrators, dc=example,dc=com"
- );)
-aci: (targetattr="*")(version 3.0; acl "Configuration Administrators Group"; a
- llow (all) groupdn="ldap:///cn=Configuration Administrators,ou=Groups,ou=Topo
- logyManagement,o=NetscapeRoot";)
-aci: (targetattr="*")(version 3.0; acl "Configuration Administrator"; allow (a
- ll) userdn="ldap:///uid=admin,ou=Administrators,ou=TopologyManagement,o=Netsc
- apeRoot";)
-aci: (targetattr = "*")(version 3.0; acl "TEST ACI"; allow (writ
- e) groupdn = "ldap:///cn=slapd-localhost,cn=389 Directory Server,cn=Server Gr
- oup,cn=localhost.localdomain,ou=example.com,o=NetscapeRoot";)
-
diff --git a/dirsrvtests/data/ticket47953/ticket47953.ldif b/dirsrvtests/data/ticket47953/ticket47953.ldif
new file mode 100644
index 0000000..e59977e
--- /dev/null
+++ b/dirsrvtests/data/ticket47953/ticket47953.ldif
@@ -0,0 +1,27 @@
+dn: dc=example,dc=com
+objectClass: top
+objectClass: domain
+dc: example
+aci: (targetattr!="userPassword")(version 3.0; acl "Enable anonymous access";
+ allow (read, search, compare) userdn="ldap:///anyone";)
+aci: (targetattr="carLicense || description || displayName || facsimileTelepho
+ neNumber || homePhone || homePostalAddress || initials || jpegPhoto || labele
+ dURI || mail || mobile || pager || photo || postOfficeBox || postalAddress ||
+  postalCode || preferredDeliveryMethod || preferredLanguage || registeredAddr
+ ess || roomNumber || secretary || seeAlso || st || street || telephoneNumber
+ || telexNumber || title || userCertificate || userPassword || userSMIMECertif
+ icate || x500UniqueIdentifier")(version 3.0; acl "Enable self write for commo
+ n attributes"; allow (write) userdn="ldap:///self";)
+aci: (targetattr ="fffff")(version 3.0;acl "Directory Administrators Group";al
+ low (all) (groupdn = "ldap:///cn=Directory Administrators, dc=example,dc=com"
+ );)
+aci: (targetattr="*")(version 3.0; acl "Configuration Administrators Group"; a
+ llow (all) groupdn="ldap:///cn=Configuration Administrators,ou=Groups,ou=Topo
+ logyManagement,o=NetscapeRoot";)
+aci: (targetattr="*")(version 3.0; acl "Configuration Administrator"; allow (a
+ ll) userdn="ldap:///uid=admin,ou=Administrators,ou=TopologyManagement,o=Netsc
+ apeRoot";)
+aci: (targetattr = "*")(version 3.0; acl "TEST ACI"; allow (writ
+ e) groupdn = "ldap:///cn=slapd-localhost,cn=389 Directory Server,cn=Server Gr
+ oup,cn=localhost.localdomain,ou=example.com,o=NetscapeRoot";)
+
diff --git a/dirsrvtests/suites/basic/basic_test.py b/dirsrvtests/suites/basic/basic_test.py
new file mode 100644
index 0000000..18d079d
--- /dev/null
+++ b/dirsrvtests/suites/basic/basic_test.py
@@ -0,0 +1,695 @@
+import os
+import sys
+import time
+import ldap
+import ldap.sasl
+import logging
+import pytest
+import shutil
+from subprocess import check_output
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+
+log = logging.getLogger(__name__)
+
+installation_prefix = None
+
+# Globals
+USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX
+USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX
+USER3_DN = 'uid=user3,' + DEFAULT_SUFFIX
+
+
+class TopologyStandalone(object):
+    def __init__(self, standalone):
+        standalone.open()
+        self.standalone = standalone
+
+
+ at pytest.fixture(scope="module")
+def topology(request):
+    '''
+        This fixture is used to standalone topology for the 'module'.
+    '''
+    global installation_prefix
+
+    if installation_prefix:
+        args_instance[SER_DEPLOYED_DIR] = installation_prefix
+
+    standalone = DirSrv(verbose=False)
+
+    # Args for the standalone instance
+    args_instance[SER_HOST] = HOST_STANDALONE
+    args_instance[SER_PORT] = PORT_STANDALONE
+    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+    args_standalone = args_instance.copy()
+    standalone.allocate(args_standalone)
+
+    # Get the status of the instance and restart it if it exists
+    instance_standalone = standalone.exists()
+
+    # Remove the instance
+    if instance_standalone:
+        standalone.delete()
+
+    # Create the instance
+    standalone.create()
+
+    # Used to retrieve configuration information (dbdir, confdir...)
+    standalone.open()
+
+    # clear the tmp directory
+    standalone.clearTmpDir(__file__)
+
+    # Here we have standalone instance up and running
+    return TopologyStandalone(standalone)
+
+
+def test_basic_init(topology):
+    '''
+    Initialize our setup
+    '''
+    #
+    # Import the Example LDIF for the tests in this suite
+    #
+    log.info('Initializing the "basic" test suite')
+
+    import_ldif = '%s/Example.ldif' % get_data_dir(topology.standalone.prefix)
+    try:
+        topology.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX, input_file=import_ldif, args={TASK_WAIT: True})
+    except ValueError:
+        log.error('Online import failed')
+        assert False
+
+
+def test_basic_ops(topology):
+    '''
+    Test doing adds, mods, modrdns, and deletes
+    '''
+
+    log.info('Running test_basic_ops...')
+
+    USER1_NEWDN = 'cn=user1'
+    USER2_NEWDN = 'cn=user2'
+    USER3_NEWDN = 'cn=user3'
+    NEW_SUPERIOR = 'ou=people,' + DEFAULT_SUFFIX
+    USER1_RDN_DN = 'cn=user1,' + DEFAULT_SUFFIX
+    USER2_RDN_DN = 'cn=user2,' + DEFAULT_SUFFIX
+    USER3_RDN_DN = 'cn=user3,' + NEW_SUPERIOR  # New superior test
+
+    #
+    # Adds
+    #
+    try:
+        topology.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(),
+                                 'sn': '1',
+                                 'cn': 'user1',
+                                 'uid': 'user1',
+                                 'userpassword': 'password'})))
+    except ldap.LDAPError, e:
+        log.error('Failed to add test user' + USER1_DN + ': error ' + e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
+                                 'sn': '2',
+                                 'cn': 'user2',
+                                 'uid': 'user2',
+                                 'userpassword': 'password'})))
+    except ldap.LDAPError, e:
+        log.error('Failed to add test user' + USER2_DN + ': error ' + e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.add_s(Entry((USER3_DN, {'objectclass': "top extensibleObject".split(),
+                                 'sn': '3',
+                                 'cn': 'user3',
+                                 'uid': 'user3',
+                                 'userpassword': 'password'})))
+    except ldap.LDAPError, e:
+        log.error('Failed to add test user' + USER3_DN + ': error ' + e.message['desc'])
+        assert False
+
+    #
+    # Mods
+    #
+    try:
+        topology.standalone.modify_s(USER1_DN, [(ldap.MOD_ADD, 'description', 'New description')])
+    except ldap.LDAPError, e:
+        log.error('Failed to add description: error ' + e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'description', 'Modified description')])
+    except ldap.LDAPError, e:
+        log.error('Failed to modify description: error ' + e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.modify_s(USER1_DN, [(ldap.MOD_DELETE, 'description', None)])
+    except ldap.LDAPError, e:
+        log.error('Failed to delete description: error ' + e.message['desc'])
+        assert False
+
+    #
+    # Modrdns
+    #
+    try:
+        topology.standalone.rename_s(USER1_DN, USER1_NEWDN, delold=1)
+    except ldap.LDAPError, e:
+        log.error('Failed to modrdn user1: error ' + e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.rename_s(USER2_DN, USER2_NEWDN, delold=0)
+    except ldap.LDAPError, e:
+        log.error('Failed to modrdn user2: error ' + e.message['desc'])
+        assert False
+
+    # Modrdn - New superior
+    try:
+        topology.standalone.rename_s(USER3_DN, USER3_NEWDN, newsuperior=NEW_SUPERIOR, delold=1)
+    except ldap.LDAPError, e:
+        log.error('Failed to modrdn(new superior) user3: error ' + e.message['desc'])
+        assert False
+
+    #
+    # Deletes
+    #
+    try:
+        topology.standalone.delete_s(USER1_RDN_DN)
+    except ldap.LDAPError, e:
+        log.error('Failed to delete test entry1: ' + e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.delete_s(USER2_RDN_DN)
+    except ldap.LDAPError, e:
+        log.error('Failed to delete test entry2: ' + e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.delete_s(USER3_RDN_DN)
+    except ldap.LDAPError, e:
+        log.error('Failed to delete test entry3: ' + e.message['desc'])
+        assert False
+
+    log.info('test_basic_ops: PASSED')
+
+
+def test_basic_import_export(topology):
+    '''
+    Test online and offline LDIF imports & exports
+    '''
+
+    log.info('Running test_basic_import_export...')
+
+    tmp_dir = topology.standalone.getDir(__file__, TMP_DIR)
+
+    #
+    # Test online/offline LDIF imports
+    #
+
+    # Generate a test ldif (50k entries)
+    import_ldif = tmp_dir + '/basic_import.ldif'
+    try:
+        os.system('dbgen.pl -n 50000 -o ' + import_ldif)
+    except OSError, e:
+        log.fatal('test_basic_import_export: failed to create test ldif, error: %s - %s' % (e.errno, e.strerror))
+        assert False
+
+    # Online
+    try:
+        topology.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX, input_file=import_ldif,
+                                             args={TASK_WAIT: True})
+    except ValueError:
+        log.fatal('test_basic_import_export: Online import failed')
+        assert False
+
+    # Offline
+    if not topology.standalone.ldif2db(DEFAULT_BENAME, None, None, None, import_ldif):
+        log.fatal('test_basic_import_export: Offline import failed')
+        assert False
+
+    #
+    # Test online and offline LDIF export
+    #
+
+    # Online export
+    export_ldif = tmp_dir + 'export.ldif'
+    exportTask = Tasks(topology.standalone)
+    try:
+        args = {TASK_WAIT: True}
+        exportTask.exportLDIF(DEFAULT_SUFFIX, None, export_ldif, args)
+    except ValueError:
+        log.fatal('test_basic_import_export: Online export failed')
+        assert False
+
+    # Offline export
+    if not topology.standalone.db2ldif(DEFAULT_BENAME, (DEFAULT_SUFFIX,), None, None, None, export_ldif):
+        log.fatal('test_basic_import_export: Failed to run offline db2ldif')
+        assert False
+
+    #
+    # Cleanup - Import the Example LDIF for the other tests in this suite
+    #
+    import_ldif = '%s/Example.ldif' % get_data_dir(topology.standalone.prefix)
+    try:
+        topology.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX, input_file=import_ldif, args={TASK_WAIT: True})
+    except ValueError:
+        log.fatal('test_basic_import_export: Online import failed')
+        assert False
+
+    log.info('test_basic_import_export: PASSED')
+
+
+def test_basic_backup(topology):
+    '''
+    Test online and offline back and restore
+    '''
+
+    log.info('Running test_basic_backup...')
+
+    backup_dir = '%sbasic_backup/' % topology.standalone.getDir(__file__, TMP_DIR)
+
+    # Test online backup
+    try:
+        topology.standalone.tasks.db2bak(backup_dir=backup_dir, args={TASK_WAIT: True})
+    except ValueError:
+        log.fatal('test_basic_backup: Online backup failed')
+        assert False
+
+    # Test online restore
+    try:
+        topology.standalone.tasks.bak2db(backup_dir=backup_dir, args={TASK_WAIT: True})
+    except ValueError:
+        log.fatal('test_basic_backup: Online restore failed')
+        assert False
+
+    # Test offline backup
+    if not topology.standalone.db2bak(backup_dir):
+        log.fatal('test_basic_backup: Offline backup failed')
+        assert False
+
+    # Test offline restore
+    if not topology.standalone.bak2db(backup_dir):
+        log.fatal('test_basic_backup: Offline backup failed')
+        assert False
+
+    log.info('test_basic_backup: PASSED')
+
+
+def test_basic_acl(topology):
+    '''
+    Run some basic access control(ACL) tests
+    '''
+
+    log.info('Running test_basic_acl...')
+
+    DENY_ACI = ('(targetattr = "*") (version 3.0;acl "deny user";deny (all)' +
+                '(userdn = "ldap:///' + USER1_DN + '");)')
+
+    #
+    # Add two users
+    #
+    try:
+        topology.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(),
+                                 'sn': '1',
+                                 'cn': 'user 1',
+                                 'uid': 'user1',
+                                 'userpassword': PASSWORD})))
+    except ldap.LDAPError, e:
+        log.fatal('test_basic_acl: Failed to add test user ' + USER1_DN + ': error ' + e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
+                                 'sn': '2',
+                                 'cn': 'user 2',
+                                 'uid': 'user2',
+                                 'userpassword': PASSWORD})))
+    except ldap.LDAPError, e:
+        log.fatal('test_basic_acl: Failed to add test user ' + USER1_DN + ': error ' + e.message['desc'])
+        assert False
+
+    #
+    # Add an aci that denies USER1 from doing anything, and also set the default anonymous access
+    #
+    try:
+        topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', DENY_ACI)])
+    except ldap.LDAPError, e:
+        log.fatal('test_basic_acl: Failed to add DENY ACI: error ' + e.message['desc'])
+        assert False
+
+    #
+    # Make sure USER1_DN can not search anything, but USER2_dn can...
+    #
+    try:
+        topology.standalone.simple_bind_s(USER1_DN, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_basic_acl: Failed to bind as user1, error: ' + e.message['desc'])
+        assert False
+
+    try:
+        entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
+        if entries:
+            log.fatal('test_basic_acl: User1 was incorrectly able to search the suffix!')
+            assert False
+    except ldap.LDAPError, e:
+        log.fatal('test_basic_acl: Search suffix failed(as user1): ' + e.message['desc'])
+        assert False
+
+    # Now try user2...  Also check that userpassword is stripped out
+    try:
+        topology.standalone.simple_bind_s(USER2_DN, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_basic_acl: Failed to bind as user2, error: ' + e.message['desc'])
+        assert False
+
+    try:
+        entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=user1)')
+        if not entries:
+            log.fatal('test_basic_acl: User1 incorrectly not able to search the suffix')
+            assert False
+        if entries[0].hasAttr('userpassword'):
+            # The default anonymous access aci should have stripped out userpassword
+            log.fatal('test_basic_acl: User2 was incorrectly able to see userpassword')
+            assert False
+    except ldap.LDAPError, e:
+        log.fatal('test_basic_acl: Search for user1 failed(as user2): ' + e.message['desc'])
+        assert False
+
+    # Make sure Root DN can also search (this also resets the bind dn to the
+    # Root DN for future operations)
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PW_DM)
+    except ldap.LDAPError, e:
+        log.fatal('test_basic_acl: Failed to bind as ROotDN, error: ' + e.message['desc'])
+        assert False
+
+    try:
+        entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
+        if not entries:
+            log.fatal('test_basic_acl: Root DN incorrectly not able to search the suffix')
+            assert False
+    except ldap.LDAPError, e:
+        log.fatal('test_basic_acl: Search for user1 failed(as user2): ' + e.message['desc'])
+        assert False
+
+    #
+    # Cleanup
+    #
+    try:
+        topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', DENY_ACI)])
+    except ldap.LDAPError, e:
+        log.fatal('test_basic_acl: Failed to delete DENY ACI: error ' + e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.delete_s(USER1_DN)
+    except ldap.LDAPError, e:
+        log.fatal('test_basic_acl: Failed to delete test entry1: ' + e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.delete_s(USER2_DN)
+    except ldap.LDAPError, e:
+        log.fatal('test_basic_acl: Failed to delete test entry2: ' + e.message['desc'])
+        assert False
+
+    log.info('test_basic_acl: PASSED')
+
+
+def test_basic_searches(topology):
+    '''
+        The search results are gathered from testing with Example.ldif
+    '''
+
+    log.info('Running test_basic_searches...')
+
+    filters = (('(uid=scarter)', 1),
+               ('(uid=tmorris*)', 1),
+               ('(uid=*hunt*)', 4),
+               ('(uid=*cope)', 2),
+               ('(mail=*)', 150),
+               ('(roomnumber>=4000)', 35),
+               ('(roomnumber<=4000)', 115),
+               ('(&(roomnumber>=4000)(roomnumber<=4500))', 18),
+               ('(!(l=sunnyvale))', 120),
+               ('(&(uid=t*)(l=santa clara))', 7),
+               ('(|(uid=k*)(uid=r*))', 18),
+               ('(|(uid=t*)(l=sunnyvale))', 50),
+               ('(&(!(uid=r*))(ou=people))', 139),
+               ('(&(uid=m*)(l=sunnyvale)(ou=people)(mail=*example*)(roomNumber=*))', 3),
+               ('(&(|(uid=m*)(l=santa clara))(roomNumber=22*))', 5),
+               ('(&(|(uid=m*)(l=santa clara))(roomNumber=22*)(!(roomnumber=2254)))', 4))
+
+    for (search_filter, search_result) in filters:
+        try:
+            entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_filter)
+            if len(entries) != search_result:
+                log.fatal('test_basic_searches: An incorrect number of entries was returned from filter (%s): (%d) expected (%d)' %
+                          (search_filter, len(entries), search_result))
+                assert False
+        except ldap.LDAPError, e:
+            log.fatal('Search failed: ' + e.message['desc'])
+            assert False
+
+    log.info('test_basic_searches: PASSED')
+
+
+def test_basic_referrals(topology):
+    '''
+        Set the server to referral mode, and make sure we recive the referal error(10)
+    '''
+
+    log.info('Running test_basic_referrals...')
+
+    SUFFIX_CONFIG = 'cn="dc=example,dc=com",cn=mapping tree,cn=config'
+
+    #
+    # Set the referral, adn the backend state
+    #
+    try:
+        topology.standalone.modify_s(SUFFIX_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-referral',
+                      'ldap://localhost.localdomain:389/o%3dnetscaperoot')])
+    except ldap.LDAPError, e:
+        log.fatal('test_basic_referrals: Failed to set referral: error ' + e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.modify_s(SUFFIX_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-state', 'Referral')])
+    except ldap.LDAPError, e:
+        log.fatal('test_basic_referrals: Failed to set backend state: error ' + e.message['desc'])
+        assert False
+
+    #
+    # Test that a referral error is returned
+    #
+    topology.standalone.set_option(ldap.OPT_REFERRALS, 0)  # Do not follow referral
+    try:
+        topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=top')
+    except ldap.REFERRAL:
+        pass
+    except ldap.LDAPError, e:
+        log.fatal('test_basic_referrals: Search failed: ' + e.message['desc'])
+        assert False
+
+    #
+    # Make sure server can restart in referral mode
+    #
+    topology.standalone.restart(timeout=10)
+
+    #
+    # Cleanup
+    #
+    try:
+        topology.standalone.modify_s(SUFFIX_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-state', 'Backend')])
+    except ldap.LDAPError, e:
+        log.fatal('test_basic_referrals: Failed to set backend state: error ' + e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.modify_s(SUFFIX_CONFIG, [(ldap.MOD_DELETE, 'nsslapd-referral', None)])
+    except ldap.LDAPError, e:
+        log.fatal('test_basic_referrals: Failed to delete referral: error ' + e.message['desc'])
+        assert False
+    topology.standalone.set_option(ldap.OPT_REFERRALS, 1)
+
+    log.info('test_basic_referrals: PASSED')
+
+
+def test_basic_systemctl(topology):
+    '''
+    Test systemctl can stop and start the server.  Also test that start reports an
+    error when the instance does not start.  Only for RPM builds
+    '''
+
+    log.info('Running test_basic_systemctl...')
+
+    # We can only use systemctl on RPM installations
+    if topology.standalone.prefix and topology.standalone.prefix != '/':
+        return
+
+    data_dir = topology.standalone.getDir(__file__, DATA_DIR)
+    tmp_dir = topology.standalone.getDir(__file__, TMP_DIR)
+    config_dir = topology.standalone.confdir
+    start_ds = 'sudo systemctl start dirsrv@' + topology.standalone.serverid + '.service'
+    stop_ds = 'sudo systemctl stop dirsrv@' + topology.standalone.serverid + '.service'
+    is_running = 'sudo systemctl is-active dirsrv@' + topology.standalone.serverid + '.service'
+
+    #
+    # Stop the server
+    #
+    log.info('Stopping the server...')
+    rc = os.system(stop_ds)
+    log.info('Check the status...')
+    if rc != 0 or os.system(is_running) == 0:
+        log.fatal('test_basic_systemctl: Failed to stop the server')
+        assert False
+    log.info('Stopped the server.')
+
+    #
+    # Start the server
+    #
+    log.info('Starting the server...')
+    rc = os.system(start_ds)
+    log.info('Check the status...')
+    if rc != 0 or os.system(is_running) != 0:
+        log.fatal('test_basic_systemctl: Failed to start the server')
+        assert False
+    log.info('Started the server.')
+
+    #
+    # Stop the server, break the dse.ldif so a start fails,
+    # and verify that systemctl detects the failed start
+    #
+    log.info('Stopping the server...')
+    rc = os.system(stop_ds)
+    log.info('Check the status...')
+    if rc != 0 or os.system(is_running) == 0:
+        log.fatal('test_basic_systemctl: Failed to stop the server')
+        assert False
+    log.info('Stopped the server before breaking the dse.ldif.')
+
+    shutil.copy(config_dir + '/dse.ldif', tmp_dir)
+    shutil.copy(data_dir + 'basic/dse.ldif.broken', config_dir + '/dse.ldif')
+
+    log.info('Attempting to start the server with broken dse.ldif...')
+    rc = os.system(start_ds)
+    log.info('Check the status...')
+    if rc == 0 or os.system(is_running) == 0:
+        log.fatal('test_basic_systemctl: The server incorrectly started')
+        assert False
+    log.info('Server failed to start as expected')
+
+    #
+    # Fix the dse.ldif, nad make sure the server starts up,
+    # and systemctl correctly identifies the successful start
+    #
+    shutil.copy(tmp_dir + 'dse.ldif', config_dir)
+    log.info('Starting the server...')
+    rc = os.system(start_ds)
+    log.info('Check the status...')
+    if rc != 0 or os.system(is_running) != 0:
+        log.fatal('test_basic_systemctl: Failed to start the server')
+        assert False
+    log.info('Server started after fixing dse.ldif.')
+    time.sleep(1)
+
+    log.info('test_basic_systemctl: PASSED')
+
+
+def test_basic_ldapagent(topology):
+    '''
+    Test that the ldap agnet starts
+    '''
+
+    log.info('Running test_basic_ldapagent...')
+
+    tmp_dir = topology.standalone.getDir(__file__, TMP_DIR)
+    var_dir = topology.standalone.prefix + '/var'
+    config_file = tmp_dir + '/agent.conf'
+    cmd = 'sudo %s/ldap-agent %s' % (get_sbin_dir(prefix=topology.standalone.prefix), config_file)
+
+    agent_config_file = open(config_file, 'w')
+    agent_config_file.write('agentx-master ' + var_dir + '/agentx/master\n')
+    agent_config_file.write('agent-logdir ' + var_dir + '/log/dirsrv\n')
+    agent_config_file.write('server slapd-' + topology.standalone.serverid + '\n')
+    agent_config_file.close()
+
+    rc = os.system(cmd)
+    if rc != 0:
+        log.fatal('test_basic_ldapagent: Failed to start snmp ldap agent: error %d' % rc)
+        assert False
+
+    log.info('snmp ldap agent started')
+
+    #
+    # Cleanup - kill the agent
+    #
+    pid = check_output(['pidof', '-s', 'ldap-agent-bin'])
+    log.info('Cleanup - killing agent: ' + pid)
+    rc = os.system('sudo kill -9 ' + pid)
+
+    log.info('test_basic_ldapagent: PASSED')
+
+
+def test_basic_dse(topology):
+    '''
+    Test that the dse.ldif is not wipped out after the process is killed (bug 910581)
+    '''
+
+    log.info('Running test_basic_dse...')
+
+    dse_file = topology.standalone.confdir + '/dse.ldif'
+    pid = check_output(['pidof', '-s', 'ns-slapd'])
+    os.system('sudo kill -9 ' + pid)
+    if os.path.getsize(dse_file) == 0:
+        log.fatal('test_basic_dse: dse.ldif\'s content was incorrectly removed!')
+        assert False
+
+    topology.standalone.start(timeout=10)
+    log.info('dse.ldif was not corrupted, and the server was restarted')
+
+    log.info('test_basic_dse: PASSED')
+
+
+def test_basic_final(topology):
+    topology.standalone.delete()
+    log.info('Basic test suite PASSED')
+
+
+def run_isolated():
+    '''
+        run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
+        To run isolated without py.test, you need to
+            - edit this file and comment '@pytest.fixture' line before 'topology' function.
+            - set the installation prefix
+            - run this program
+    '''
+    global installation_prefix
+    installation_prefix = None
+
+    topo = topology(True)
+
+    test_basic_init(topo)
+    test_basic_ops(topo)
+    test_basic_import_export(topo)
+    test_basic_backup(topo)
+    test_basic_acl(topo)
+    test_basic_searches(topo)
+    test_basic_referrals(topo)
+    test_basic_systemctl(topo)
+    test_basic_ldapagent(topo)
+    test_basic_dse(topo)
+
+    test_basic_final(topo)
+
+
+if __name__ == '__main__':
+    run_isolated()
diff --git a/dirsrvtests/suites/betxns/betxn_test.py b/dirsrvtests/suites/betxns/betxn_test.py
new file mode 100644
index 0000000..cee3d59
--- /dev/null
+++ b/dirsrvtests/suites/betxns/betxn_test.py
@@ -0,0 +1,187 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+installation1_prefix = None
+
+
+class TopologyStandalone(object):
+    def __init__(self, standalone):
+        standalone.open()
+        self.standalone = standalone
+
+
+ at pytest.fixture(scope="module")
+def topology(request):
+    global installation1_prefix
+    if installation1_prefix:
+        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+
+    # Creating standalone instance ...
+    standalone = DirSrv(verbose=False)
+    args_instance[SER_HOST] = HOST_STANDALONE
+    args_instance[SER_PORT] = PORT_STANDALONE
+    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+    args_standalone = args_instance.copy()
+    standalone.allocate(args_standalone)
+    instance_standalone = standalone.exists()
+    if instance_standalone:
+        standalone.delete()
+    standalone.create()
+    standalone.open()
+
+    # Clear out the tmp dir
+    standalone.clearTmpDir(__file__)
+
+    return TopologyStandalone(standalone)
+
+
+def test_betxn_init(topology):
+    # First enable dynamic plugins - makes plugin testing much easier
+    try:
+        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
+    except ldap.LDAPError, e:
+        ldap.error('Failed to enable dynamic plugin!' + e.message['desc'])
+        assert False
+
+
+def test_betxt_7bit(topology):
+    '''
+    Test that the 7-bit plugin correctly rejects an invlaid update
+    '''
+
+    log.info('Running test_betxt_7bit...')
+
+    USER_DN = 'uid=test_entry,' + DEFAULT_SUFFIX
+    eight_bit_rdn = u'uid=Fu\u00c4\u00e8'
+    BAD_RDN = eight_bit_rdn.encode('utf-8')
+
+    # This plugin should on by default, but just in case...
+    topology.standalone.plugins.enable(name=PLUGIN_7_BIT_CHECK)
+
+    # Add our test user
+    try:
+        topology.standalone.add_s(Entry((USER_DN, {'objectclass': "top extensibleObject".split(),
+                                 'sn': '1',
+                                 'cn': 'test 1',
+                                 'uid': 'test_entry',
+                                 'userpassword': 'password'})))
+    except ldap.LDAPError, e:
+        log.error('Failed to add test user' + USER_DN + ': error ' + e.message['desc'])
+        assert False
+
+    # Attempt a modrdn, this should fail
+    try:
+        topology.standalone.rename_s(USER_DN, BAD_RDN, delold=0)
+        log.fatal('test_betxt_7bit: Modrdn operation incorrectly succeeded')
+        assert False
+    except ldap.LDAPError, e:
+        log.info('Modrdn failed as expected: error ' + e.message['desc'])
+
+    # Make sure the operation did not succeed, attempt to search for the new RDN
+    try:
+        entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, BAD_RDN)
+        if entries:
+            log.fatal('test_betxt_7bit: Incorrectly found the entry using the invalid RDN')
+            assert False
+    except ldap.LDAPError, e:
+        log.fatal('Error whiles earching for test entry: ' + e.message['desc'])
+        assert False
+
+    #
+    # Cleanup - remove the user
+    #
+    try:
+        topology.standalone.delete_s(USER_DN)
+    except ldap.LDAPError, e:
+        log.fatal('Failed to delete test entry: ' + e.message['desc'])
+        assert False
+
+    log.info('test_betxt_7bit: PASSED')
+
+
+def test_betxn_attr_uniqueness(topology):
+    '''
+    Test that we can not add two entries that have the same attr value that is
+    defined by the plugin.
+    '''
+
+    log.info('Running test_betxn_attr_uniqueness...')
+
+    USER1_DN = 'uid=test_entry1,' + DEFAULT_SUFFIX
+    USER2_DN = 'uid=test_entry2,' + DEFAULT_SUFFIX
+
+    topology.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS)
+
+    # Add the first entry
+    try:
+        topology.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(),
+                                     'sn': '1',
+                                     'cn': 'test 1',
+                                     'uid': 'test_entry1',
+                                     'userpassword': 'password1'})))
+    except ldap.LDAPError, e:
+        log.fatal('test_betxn_attr_uniqueness: Failed to add test user: ' +
+                  USER1_DN + ', error ' + e.message['desc'])
+        assert False
+
+    # Add the second entry with a dupliate uid
+    try:
+        topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
+                                     'sn': '2',
+                                     'cn': 'test 2',
+                                     'uid': 'test_entry2',
+                                     'uid': 'test_entry1',  # Duplicate value
+                                     'userpassword': 'password2'})))
+        log.fatal('test_betxn_attr_uniqueness: The second entry was incorrectly added.')
+        assert False
+    except ldap.LDAPError, e:
+        log.error('test_betxn_attr_uniqueness: Failed to add test user as expected: ' +
+                  USER1_DN + ', error ' + e.message['desc'])
+
+    #
+    # Cleanup - disable plugin, remove test entry
+    #
+    topology.standalone.plugins.disable(name=PLUGIN_ATTR_UNIQUENESS)
+
+    try:
+        topology.standalone.delete_s(USER1_DN)
+    except ldap.LDAPError, e:
+        log.fatal('test_betxn_attr_uniqueness: Failed to delete test entry1: ' +
+                  e.message['desc'])
+        assert False
+
+    log.info('test_betxn_attr_uniqueness: PASSED')
+
+
+def test_betxn_final(topology):
+    topology.standalone.delete()
+    log.info('betxn test suite PASSED')
+
+
+def run_isolated():
+    global installation1_prefix
+    installation1_prefix = None
+
+    topo = topology(True)
+    test_betxn_init(topo)
+    test_betxt_7bit(topo)
+    test_betxn_attr_uniqueness(topo)
+    test_betxn_final(topo)
+
+
+if __name__ == '__main__':
+    run_isolated()
+
diff --git a/dirsrvtests/suites/clu/clu_test.py b/dirsrvtests/suites/clu/clu_test.py
new file mode 100644
index 0000000..2d7fc04
--- /dev/null
+++ b/dirsrvtests/suites/clu/clu_test.py
@@ -0,0 +1,107 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+installation1_prefix = None
+
+
+class TopologyStandalone(object):
+    def __init__(self, standalone):
+        standalone.open()
+        self.standalone = standalone
+
+
+ at pytest.fixture(scope="module")
+def topology(request):
+    global installation1_prefix
+    if installation1_prefix:
+        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+
+    # Creating standalone instance ...
+    standalone = DirSrv(verbose=False)
+    args_instance[SER_HOST] = HOST_STANDALONE
+    args_instance[SER_PORT] = PORT_STANDALONE
+    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+    args_standalone = args_instance.copy()
+    standalone.allocate(args_standalone)
+    instance_standalone = standalone.exists()
+    if instance_standalone:
+        standalone.delete()
+    standalone.create()
+    standalone.open()
+
+    # Clear out the tmp dir
+    standalone.clearTmpDir(__file__)
+
+    return TopologyStandalone(standalone)
+
+
+def test_clu_init(topology):
+    '''
+    Write any test suite initialization here(if needed)
+    '''
+
+    return
+
+
+def test_clu_pwdhash(topology):
+    '''
+    Test the pwdhash script
+    '''
+
+    log.info('Running test_clu_pwdhash...')
+
+    cmd = 'pwdhash -s ssha testpassword'
+
+    p = os.popen(cmd)
+    result = p.readline()
+    p.close()
+
+    if not result:
+        log.fatal('test_clu_pwdhash: Failed to run pwdhash')
+        assert False
+
+    if len(result) < 20:
+        log.fatal('test_clu_pwdhash: Encrypted password is too short')
+        assert False
+
+    log.info('pwdhash generated: ' + result)
+    log.info('test_clu_pwdhash: PASSED')
+
+
+def test_clu_final(topology):
+    topology.standalone.delete()
+    log.info('clu test suite PASSED')
+
+
+def run_isolated():
+    '''
+    This test is for the simple scripts that don't have a lot of options or
+    points of failure.  Scripts that do, should have their own individual tests.
+    '''
+    global installation1_prefix
+    installation1_prefix = None
+
+    topo = topology(True)
+    test_clu_init(topo)
+
+    test_clu_pwdhash(topo)
+
+    test_clu_final(topo)
+
+
+if __name__ == '__main__':
+    run_isolated()
+
diff --git a/dirsrvtests/suites/clu/db2ldif_test.py b/dirsrvtests/suites/clu/db2ldif_test.py
new file mode 100644
index 0000000..1fc27f8
--- /dev/null
+++ b/dirsrvtests/suites/clu/db2ldif_test.py
@@ -0,0 +1,84 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+installation1_prefix = None
+
+
+class TopologyStandalone(object):
+    def __init__(self, standalone):
+        standalone.open()
+        self.standalone = standalone
+
+
+ at pytest.fixture(scope="module")
+def topology(request):
+    global installation1_prefix
+    if installation1_prefix:
+        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+
+    # Creating standalone instance ...
+    standalone = DirSrv(verbose=False)
+    args_instance[SER_HOST] = HOST_STANDALONE
+    args_instance[SER_PORT] = PORT_STANDALONE
+    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+    args_standalone = args_instance.copy()
+    standalone.allocate(args_standalone)
+    instance_standalone = standalone.exists()
+    if instance_standalone:
+        standalone.delete()
+    standalone.create()
+    standalone.open()
+
+    # Clear out the tmp dir
+    standalone.clearTmpDir(__file__)
+
+    return TopologyStandalone(standalone)
+
+
+def test_db2ldif_init(topology):
+    '''
+    Write any test suite initialization here(if needed)
+    '''
+
+    return
+
+
+def test_db2ldif_final(topology):
+    topology.standalone.delete()
+    log.info('db2ldif test suite PASSED')
+
+
+def run_isolated():
+    '''
+    Test db2lidf/db2ldif.pl - test/stress functionality, all the command line options,
+    valid/invalid option combinations, etc, etc.
+    '''
+    global installation1_prefix
+    installation1_prefix = None
+
+    topo = topology(True)
+    test_db2ldif_init(topo)
+
+    # test 1 function...
+    # test 2 function...
+    # ...
+
+    test_db2ldif_final(topo)
+
+
+if __name__ == '__main__':
+    run_isolated()
+
diff --git a/dirsrvtests/suites/config/config_test.py b/dirsrvtests/suites/config/config_test.py
new file mode 100644
index 0000000..cc72b8b
--- /dev/null
+++ b/dirsrvtests/suites/config/config_test.py
@@ -0,0 +1,189 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+installation1_prefix = None
+
+
+class TopologyStandalone(object):
+    def __init__(self, standalone):
+        standalone.open()
+        self.standalone = standalone
+
+
+ at pytest.fixture(scope="module")
+def topology(request):
+    global installation1_prefix
+    if installation1_prefix:
+        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+
+    # Creating standalone instance ...
+    standalone = DirSrv(verbose=False)
+    args_instance[SER_HOST] = HOST_STANDALONE
+    args_instance[SER_PORT] = PORT_STANDALONE
+    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+    args_standalone = args_instance.copy()
+    standalone.allocate(args_standalone)
+    instance_standalone = standalone.exists()
+    if instance_standalone:
+        standalone.delete()
+    standalone.create()
+    standalone.open()
+
+    # Clear out the tmp dir
+    standalone.clearTmpDir(__file__)
+
+    return TopologyStandalone(standalone)
+
+
+def test_config_init(topology):
+    '''
+    Initialization function
+    '''
+    return
+
+
+def test_config_listen_backport_size(topology):
+    '''
+    We need to check that we can search on nsslapd-listen-backlog-size,
+    and change its value: to a psoitive number and a negative number.
+    Verify invalid value is rejected.
+    '''
+
+    log.info('Running test_config_listen_backport_size...')
+
+    try:
+        entry = topology.standalone.search_s(DN_CONFIG, ldap.SCOPE_BASE, 'objectclass=top',
+                                             ['nsslapd-listen-backlog-size'])
+        default_val = entry[0].getValue('nsslapd-listen-backlog-size')
+        if not default_val:
+            log.fatal('test_config_listen_backport_size: Failed to get nsslapd-listen-backlog-size from config')
+            assert False
+    except ldap.LDAPError, e:
+        log.fatal('test_config_listen_backport_size: Failed to search config, error: ' + e.message('desc'))
+        assert False
+
+    try:
+        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-listen-backlog-size', '256')])
+    except ldap.LDAPError, e:
+        log.fatal('test_config_listen_backport_size: Failed to modify config, error: ' + e.message('desc'))
+        assert False
+
+    try:
+        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-listen-backlog-size', '-1')])
+    except ldap.LDAPError, e:
+        log.fatal('test_config_listen_backport_size: Failed to modify config(negative value), error: ' + e.message('desc'))
+        assert False
+
+    try:
+        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-listen-backlog-size', 'ZZ')])
+        log.fatal('test_config_listen_backport_size: Invalid value was successfully added')
+        assert False
+    except ldap.LDAPError, e:
+        pass
+
+    #
+    # Cleanup - undo what we've done
+    #
+    try:
+        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-listen-backlog-size', default_val)])
+    except ldap.LDAPError, e:
+        log.fatal('test_config_listen_backport_size: Failed to reset config, error: ' + e.message('desc'))
+        assert False
+
+    log.info('test_config_listen_backport_size: PASSED')
+
+
+def test_config_deadlock_policy(topology):
+    '''
+    We need to check that nsslapd-db-deadlock-policy exists, that we can
+    change the value, and invalid values are rejected
+    '''
+
+    log.info('Running test_config_deadlock_policy...')
+
+    LDBM_DN = 'cn=config,cn=ldbm database,cn=plugins,cn=config'
+    default_val = '9'
+
+    try:
+        entry = topology.standalone.search_s(LDBM_DN, ldap.SCOPE_BASE, 'objectclass=top',
+                                             ['nsslapd-db-deadlock-policy'])
+        val = entry[0].getValue('nsslapd-db-deadlock-policy')
+        if not val:
+            log.fatal('test_config_deadlock_policy: Failed to get nsslapd-db-deadlock-policy from config')
+            assert False
+        if val != default_val:
+            log.fatal('test_config_deadlock_policy: The wrong derfualt value was present:  (%s) but expected (%s)' %
+                      (val, default_val))
+            assert False
+    except ldap.LDAPError, e:
+        log.fatal('test_config_deadlock_policy: Failed to search config, error: ' + e.message('desc'))
+        assert False
+
+    # Try a range of valid values
+    for val in ('0', '5', '9'):
+        try:
+            topology.standalone.modify_s(LDBM_DN, [(ldap.MOD_REPLACE, 'nsslapd-db-deadlock-policy', val)])
+        except ldap.LDAPError, e:
+            log.fatal('test_config_deadlock_policy: Failed to modify config: nsslapd-db-deadlock-policy to (%s), error: %s' %
+                      (val, e.message('desc')))
+            assert False
+
+    # Try a range of invalid values
+    for val in ('-1', '10'):
+        try:
+            topology.standalone.modify_s(LDBM_DN, [(ldap.MOD_REPLACE, 'nsslapd-db-deadlock-policy', val)])
+            log.fatal('test_config_deadlock_policy: Able to add invalid value to nsslapd-db-deadlock-policy(%s)' % (val))
+            assert False
+        except ldap.LDAPError, e:
+            pass
+    #
+    # Cleanup - undo what we've done
+    #
+    try:
+        topology.standalone.modify_s(LDBM_DN, [(ldap.MOD_REPLACE, 'nsslapd-db-deadlock-policy', default_val)])
+    except ldap.LDAPError, e:
+        log.fatal('test_config_deadlock_policy: Failed to reset nsslapd-db-deadlock-policy to the default value(%s), error: %s' %
+                  (default_val, e.message('desc')))
+
+    log.info('test_config_deadlock_policy: PASSED')
+
+
+def test_config_final(topology):
+    topology.standalone.delete()
+    log.info('Testcase PASSED')
+
+
+def run_isolated():
+    '''
+    This test suite is designed to test all things cn=config Like, the core cn=config settings,
+    or the ldbm database settings, etc.  This suite shoud not test individual plugins - there
+    should be individual suites for each plugin.
+    '''
+    global installation1_prefix
+    installation1_prefix = None
+
+    topo = topology(True)
+    test_config_init(topo)
+
+    test_config_listen_backport_size(topo)
+    test_config_deadlock_policy(topo)
+
+    test_config_final(topo)
+
+
+if __name__ == '__main__':
+    run_isolated()
+
diff --git a/dirsrvtests/suites/dynamic-plugins/plugin_tests.py b/dirsrvtests/suites/dynamic-plugins/plugin_tests.py
index a552142..43d0244 100644
--- a/dirsrvtests/suites/dynamic-plugins/plugin_tests.py
+++ b/dirsrvtests/suites/dynamic-plugins/plugin_tests.py
@@ -57,7 +57,7 @@ def test_dependency(inst, plugin):
                       [(ldap.MOD_REPLACE, 'nsslapd-plugin-depends-on-named', plugin)])
 
     except ldap.LDAPError, e:
-        log.error('test_dependency: Failed to modify ' + PLUGIN_ACCT_USABILITY + ': error ' + e.message['desc'])
+        log.fatal('test_dependency: Failed to modify ' + PLUGIN_ACCT_USABILITY + ': error ' + e.message['desc'])
         assert False
 
     try:
@@ -69,7 +69,7 @@ def test_dependency(inst, plugin):
         pass
     else:
         # Incorrectly succeeded
-        log.error('test_dependency: Plugin dependency check failed (%s)' % plugin)
+        log.fatal('test_dependency: Plugin dependency check failed (%s)' % plugin)
         assert False
 
     # Now undo the change
@@ -77,7 +77,7 @@ def test_dependency(inst, plugin):
         inst.modify_s('cn=' + PLUGIN_ACCT_USABILITY + ',cn=plugins,cn=config',
                       [(ldap.MOD_DELETE, 'nsslapd-plugin-depends-on-named', None)])
     except ldap.LDAPError, e:
-        log.error('test_dependency: Failed to reset ' + plugin + ': error ' + e.message['desc'])
+        log.fatal('test_dependency: Failed to reset ' + plugin + ': error ' + e.message['desc'])
         assert False
 
 
@@ -106,7 +106,7 @@ def wait_for_task(conn, task_dn):
         time.sleep(1)
         count += 1
     if not finished:
-        log.error('wait_for_task: Task (%s) did not complete!' % task_dn)
+        log.fatal('wait_for_task: Task (%s) did not complete!' % task_dn)
         assert False
 
 
@@ -145,10 +145,10 @@ def test_acctpolicy(inst, args=None):
                       [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'),
                        (ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime')])
         except ldap.LDAPError, e:
-            log.error('test_acctpolicy: Failed to modify config entry: error ' + e.message['desc'])
+            log.fatal('test_acctpolicy: Failed to modify config entry: error ' + e.message['desc'])
             assert False
     except ldap.LDAPError, e:
-        log.error('test_acctpolicy: Failed to add config entry: error ' + e.message['desc'])
+        log.fatal('test_acctpolicy: Failed to add config entry: error ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -164,14 +164,14 @@ def test_acctpolicy(inst, args=None):
                                  'uid': 'user1',
                                  'userpassword': 'password'})))
     except ldap.LDAPError, e:
-        log.error('test_acctpolicy: Failed to add test user' + USER1_DN + ': error ' + e.message['desc'])
+        log.fatal('test_acctpolicy: Failed to add test user' + USER1_DN + ': error ' + e.message['desc'])
         assert False
 
     # bind as user
     try:
         inst.simple_bind_s(USER1_DN, "password")
     except ldap.LDAPError, e:
-        log.error('test_acctpolicy: Failed to bind as user1: ' + e.message['desc'])
+        log.fatal('test_acctpolicy: Failed to bind as user1: ' + e.message['desc'])
         assert False
 
     # Bind as Root DN
@@ -179,7 +179,7 @@ def test_acctpolicy(inst, args=None):
     try:
         inst.simple_bind_s(DN_DM, PASSWORD)
     except ldap.LDAPError, e:
-        log.error('test_acctpolicy: Failed to bind as rootDN: ' + e.message['desc'])
+        log.fatal('test_acctpolicy: Failed to bind as rootDN: ' + e.message['desc'])
         assert False
 
     # Check lastLoginTime of USER1
@@ -200,7 +200,7 @@ def test_acctpolicy(inst, args=None):
         inst.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'stateattrname', 'testLastLoginTime')])
 
     except ldap.LDAPError, e:
-        log.error('test_acctpolicy: Failed to modify config entry: error ' + e.message['desc'])
+        log.fatal('test_acctpolicy: Failed to modify config entry: error ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -212,7 +212,7 @@ def test_acctpolicy(inst, args=None):
     try:
         inst.simple_bind_s(USER1_DN, "password")
     except ldap.LDAPError, e:
-        log.error('test_acctpolicy: Failed to bind(2nd) as user1: ' + e.message['desc'])
+        log.fatal('test_acctpolicy: Failed to bind(2nd) as user1: ' + e.message['desc'])
         assert False
 
     time.sleep(1)
@@ -220,7 +220,7 @@ def test_acctpolicy(inst, args=None):
     try:
         inst.simple_bind_s(DN_DM, PASSWORD)
     except ldap.LDAPError, e:
-        log.error('test_acctpolicy: Failed to bind as rootDN: ' + e.message['desc'])
+        log.fatal('test_acctpolicy: Failed to bind as rootDN: ' + e.message['desc'])
         assert False
 
     # Check testLastLoginTime was added to USER1
@@ -246,7 +246,7 @@ def test_acctpolicy(inst, args=None):
     try:
         inst.delete_s(USER1_DN)
     except ldap.LDAPError, e:
-        log.error('test_acctpolicy: Failed to delete test entry: ' + e.message['desc'])
+        log.fatal('test_acctpolicy: Failed to delete test entry: ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -282,7 +282,7 @@ def test_attruniq(inst, args=None):
                       [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'uid')])
 
     except ldap.LDAPError, e:
-        log.error('test_attruniq: Failed to configure plugin for "uid": error ' + e.message['desc'])
+        log.fatal('test_attruniq: Failed to configure plugin for "uid": error ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -298,7 +298,7 @@ def test_attruniq(inst, args=None):
                                      'mail': 'user1 at example.com',
                                      'userpassword': 'password'})))
     except ldap.LDAPError, e:
-        log.error('test_attruniq: Failed to add test user' + USER1_DN + ': error ' + e.message['desc'])
+        log.fatal('test_attruniq: Failed to add test user' + USER1_DN + ': error ' + e.message['desc'])
         assert False
 
     # Add an entry with a duplicate "uid"
@@ -313,7 +313,7 @@ def test_attruniq(inst, args=None):
     except ldap.CONSTRAINT_VIOLATION:
         pass
     else:
-        log.error('test_attruniq: Adding of 2nd entry(uid) incorrectly succeeded')
+        log.fatal('test_attruniq: Adding of 2nd entry(uid) incorrectly succeeded')
         assert False
 
     ############################################################################
@@ -325,7 +325,7 @@ def test_attruniq(inst, args=None):
                       [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'mail')])
 
     except ldap.LDAPError, e:
-        log.error('test_attruniq: Failed to configure plugin for "mail": error ' + e.message['desc'])
+        log.fatal('test_attruniq: Failed to configure plugin for "mail": error ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -342,7 +342,7 @@ def test_attruniq(inst, args=None):
     except ldap.CONSTRAINT_VIOLATION:
         pass
     else:
-        log.error('test_attruniq: Adding of 2nd entry(mail) incorrectly succeeded')
+        log.fatal('test_attruniq: Adding of 2nd entry(mail) incorrectly succeeded')
         assert False
 
     ############################################################################
@@ -358,7 +358,7 @@ def test_attruniq(inst, args=None):
     try:
         inst.delete_s(USER1_DN)
     except ldap.LDAPError, e:
-        log.error('test_attruniq: Failed to delete test entry: ' + e.message['desc'])
+        log.fatal('test_attruniq: Failed to delete test entry: ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -397,7 +397,7 @@ def test_automember(inst, args=None):
                           'cn': 'group'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_automember: Failed to add group: error ' + e.message['desc'])
+        log.fatal('test_automember: Failed to add group: error ' + e.message['desc'])
         assert False
 
     # Add ou=branch1
@@ -407,7 +407,7 @@ def test_automember(inst, args=None):
                           'ou': 'branch1'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_automember: Failed to add branch1: error ' + e.message['desc'])
+        log.fatal('test_automember: Failed to add branch1: error ' + e.message['desc'])
         assert False
 
     # Add ou=branch2
@@ -417,7 +417,7 @@ def test_automember(inst, args=None):
                           'ou': 'branch2'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_automember: Failed to add branch2: error ' + e.message['desc'])
+        log.fatal('test_automember: Failed to add branch2: error ' + e.message['desc'])
         assert False
 
     # Add the automember config entry
@@ -431,7 +431,7 @@ def test_automember(inst, args=None):
                           'autoMemberGroupingAttr': 'member:dn'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_automember: Failed to add config entry: error ' + e.message['desc'])
+        log.fatal('test_automember: Failed to add config entry: error ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -445,7 +445,7 @@ def test_automember(inst, args=None):
                           'uid': 'user1'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_automember: Failed to add user: error ' + e.message['desc'])
+        log.fatal('test_automember: Failed to add user: error ' + e.message['desc'])
         assert False
 
     # Check the group
@@ -469,7 +469,7 @@ def test_automember(inst, args=None):
                        (ldap.MOD_REPLACE, 'autoMemberScope', 'ou=branch2,' + DEFAULT_SUFFIX)])
 
     except ldap.LDAPError, e:
-        log.error('test_automember: Failed to modify config entry: error ' + e.message['desc'])
+        log.fatal('test_automember: Failed to modify config entry: error ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -483,7 +483,7 @@ def test_automember(inst, args=None):
                           'uid': 'user2'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_automember: Failed to user to branch2: error ' + e.message['desc'])
+        log.fatal('test_automember: Failed to user to branch2: error ' + e.message['desc'])
         assert False
 
     # Check the group
@@ -511,7 +511,7 @@ def test_automember(inst, args=None):
                           'uid': 'user3'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_automember: Failed to user3 to branch2: error ' + e.message['desc'])
+        log.fatal('test_automember: Failed to user3 to branch2: error ' + e.message['desc'])
         assert False
 
     # Check the group - uniquemember should not exist
@@ -536,7 +536,7 @@ def test_automember(inst, args=None):
                           'basedn': 'ou=branch2,' + DEFAULT_SUFFIX,
                           'filter': 'objectclass=top'})))
     except ldap.LDAPError, e:
-        log.error('test_automember: Failed to add task: error ' + e.message['desc'])
+        log.fatal('test_automember: Failed to add task: error ' + e.message['desc'])
         assert False
 
     wait_for_task(inst, TASK_DN)
@@ -565,43 +565,43 @@ def test_automember(inst, args=None):
     try:
         inst.delete_s(BUSER1_DN)
     except ldap.LDAPError, e:
-        log.error('test_automember: Failed to delete test entry1: ' + e.message['desc'])
+        log.fatal('test_automember: Failed to delete test entry1: ' + e.message['desc'])
         assert False
 
     try:
         inst.delete_s(BUSER2_DN)
     except ldap.LDAPError, e:
-        log.error('test_automember: Failed to delete test entry2: ' + e.message['desc'])
+        log.fatal('test_automember: Failed to delete test entry2: ' + e.message['desc'])
         assert False
 
     try:
         inst.delete_s(BUSER3_DN)
     except ldap.LDAPError, e:
-        log.error('test_automember: Failed to delete test entry3: ' + e.message['desc'])
+        log.fatal('test_automember: Failed to delete test entry3: ' + e.message['desc'])
         assert False
 
     try:
         inst.delete_s(BRANCH1_DN)
     except ldap.LDAPError, e:
-        log.error('test_automember: Failed to delete branch1: ' + e.message['desc'])
+        log.fatal('test_automember: Failed to delete branch1: ' + e.message['desc'])
         assert False
 
     try:
         inst.delete_s(BRANCH2_DN)
     except ldap.LDAPError, e:
-        log.error('test_automember: Failed to delete test branch2: ' + e.message['desc'])
+        log.fatal('test_automember: Failed to delete test branch2: ' + e.message['desc'])
         assert False
 
     try:
         inst.delete_s(GROUP_DN)
     except ldap.LDAPError, e:
-        log.error('test_automember: Failed to delete test group: ' + e.message['desc'])
+        log.fatal('test_automember: Failed to delete test group: ' + e.message['desc'])
         assert False
 
     try:
         inst.delete_s(CONFIG_DN)
     except ldap.LDAPError, e:
-        log.error('test_automember: Failed to delete plugin config entry: ' + e.message['desc'])
+        log.fatal('test_automember: Failed to delete plugin config entry: ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -649,10 +649,10 @@ def test_dna(inst, args=None):
             inst.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'dnaNextValue', '1'),
                                       (ldap.MOD_REPLACE, 'dnaMagicRegen', '-1')])
         except ldap.LDAPError, e:
-            log.error('test_dna: Failed to set the DNA plugin: error ' + e.message['desc'])
+            log.fatal('test_dna: Failed to set the DNA plugin: error ' + e.message['desc'])
             assert False
     except ldap.LDAPError, e:
-        log.error('test_dna: Failed to add config entry: error ' + e.message['desc'])
+        log.fatal('test_dna: Failed to add config entry: error ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -665,7 +665,7 @@ def test_dna(inst, args=None):
                           'uid': 'user1'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_dna: Failed to user1: error ' + e.message['desc'])
+        log.fatal('test_dna: Failed to user1: error ' + e.message['desc'])
         assert False
 
     # See if the entry now has the new uidNumber assignment - uidNumber=1
@@ -682,7 +682,7 @@ def test_dna(inst, args=None):
     try:
         inst.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'uidNumber', '-1')])
     except ldap.LDAPError, e:
-        log.error('test_dna: Failed to set the magic reg value: error ' + e.message['desc'])
+        log.fatal('test_dna: Failed to set the magic reg value: error ' + e.message['desc'])
         assert False
 
     # See if the entry now has the new uidNumber assignment - uidNumber=2
@@ -702,7 +702,7 @@ def test_dna(inst, args=None):
     try:
         inst.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'dnaMagicRegen', '-2')])
     except ldap.LDAPError, e:
-        log.error('test_dna: Failed to set the magic reg value to -2: error ' + e.message['desc'])
+        log.fatal('test_dna: Failed to set the magic reg value to -2: error ' + e.message['desc'])
         assert False
 
     ################################################################################
@@ -713,7 +713,7 @@ def test_dna(inst, args=None):
     try:
         inst.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'uidNumber', '-2')])
     except ldap.LDAPError, e:
-        log.error('test_dna: Failed to set the magic reg value: error ' + e.message['desc'])
+        log.fatal('test_dna: Failed to set the magic reg value: error ' + e.message['desc'])
         assert False
 
     # See if the entry now has the new uidNumber assignment - uidNumber=3
@@ -739,7 +739,7 @@ def test_dna(inst, args=None):
     try:
         inst.delete_s(USER1_DN)
     except ldap.LDAPError, e:
-        log.error('test_dna: Failed to delete test entry1: ' + e.message['desc'])
+        log.fatal('test_dna: Failed to delete test entry1: ' + e.message['desc'])
         assert False
 
     inst.plugins.disable(name=PLUGIN_DNA)
@@ -781,7 +781,7 @@ def test_linkedattrs(inst, args=None):
                           'uid': 'user1'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_linkedattrs: Failed to user1: error ' + e.message['desc'])
+        log.fatal('test_linkedattrs: Failed to user1: error ' + e.message['desc'])
         assert False
 
     try:
@@ -790,7 +790,7 @@ def test_linkedattrs(inst, args=None):
                           'uid': 'user2'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_linkedattrs: Failed to user1: error ' + e.message['desc'])
+        log.fatal('test_linkedattrs: Failed to user1: error ' + e.message['desc'])
         assert False
 
     # Add the linked attrs config entry
@@ -802,7 +802,7 @@ def test_linkedattrs(inst, args=None):
                           'managedType': 'manager'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_linkedattrs: Failed to add config entry: error ' + e.message['desc'])
+        log.fatal('test_linkedattrs: Failed to add config entry: error ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -813,7 +813,7 @@ def test_linkedattrs(inst, args=None):
     try:
         inst.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'directReport', USER2_DN)])
     except ldap.LDAPError, e:
-        log.error('test_linkedattrs: Failed to add "directReport" to user1: error ' + e.message['desc'])
+        log.fatal('test_linkedattrs: Failed to add "directReport" to user1: error ' + e.message['desc'])
         assert False
 
     # See if manager was added to the other entry
@@ -830,7 +830,7 @@ def test_linkedattrs(inst, args=None):
     try:
         inst.modify_s(USER1_DN, [(ldap.MOD_DELETE, 'directReport', None)])
     except ldap.LDAPError, e:
-        log.error('test_linkedattrs: Failed to delete directReport: error ' + e.message['desc'])
+        log.fatal('test_linkedattrs: Failed to delete directReport: error ' + e.message['desc'])
         assert False
 
     # See if manager was removed
@@ -861,7 +861,7 @@ def test_linkedattrs(inst, args=None):
     try:
         inst.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'directReport', USER2_DN)])
     except ldap.LDAPError, e:
-        log.error('test_linkedattrs: Failed to add "directReport" to user1: error ' + e.message['desc'])
+        log.fatal('test_linkedattrs: Failed to add "directReport" to user1: error ' + e.message['desc'])
         assert False
 
     # See if manager was added to the other entry, better not be...
@@ -878,7 +878,7 @@ def test_linkedattrs(inst, args=None):
     try:
         inst.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'indirectReport', USER2_DN)])
     except ldap.LDAPError, e:
-        log.error('test_linkedattrs: Failed to add "indirectReport" to user1: error ' + e.message['desc'])
+        log.fatal('test_linkedattrs: Failed to add "indirectReport" to user1: error ' + e.message['desc'])
         assert False
 
     # See if manager was added to the other entry, better not be
@@ -895,7 +895,7 @@ def test_linkedattrs(inst, args=None):
     try:
         inst.modify_s(USER1_DN, [(ldap.MOD_DELETE, 'indirectReport', None)])
     except ldap.LDAPError, e:
-        log.error('test_linkedattrs: Failed to delete directReport: error ' + e.message['desc'])
+        log.fatal('test_linkedattrs: Failed to delete directReport: error ' + e.message['desc'])
         assert False
 
     # See if manager was removed
@@ -918,7 +918,7 @@ def test_linkedattrs(inst, args=None):
     try:
         inst.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'indirectReport', USER2_DN)])
     except ldap.LDAPError, e:
-        log.error('test_linkedattrs: Failed to add "indirectReport" to user1: error ' + e.message['desc'])
+        log.fatal('test_linkedattrs: Failed to add "indirectReport" to user1: error ' + e.message['desc'])
         assert False
 
     # The entry should not have a manager attribute
@@ -942,7 +942,7 @@ def test_linkedattrs(inst, args=None):
                           'basedn': DEFAULT_SUFFIX,
                           'filter': 'objectclass=top'})))
     except ldap.LDAPError, e:
-        log.error('test_linkedattrs: Failed to add task: error ' + e.message['desc'])
+        log.fatal('test_linkedattrs: Failed to add task: error ' + e.message['desc'])
         assert False
 
     wait_for_task(inst, TASK_DN)
@@ -970,19 +970,19 @@ def test_linkedattrs(inst, args=None):
     try:
         inst.delete_s(USER1_DN)
     except ldap.LDAPError, e:
-        log.error('test_linkedattrs: Failed to delete test entry1: ' + e.message['desc'])
+        log.fatal('test_linkedattrs: Failed to delete test entry1: ' + e.message['desc'])
         assert False
 
     try:
         inst.delete_s(USER2_DN)
     except ldap.LDAPError, e:
-        log.error('test_linkedattrs: Failed to delete test entry2: ' + e.message['desc'])
+        log.fatal('test_linkedattrs: Failed to delete test entry2: ' + e.message['desc'])
         assert False
 
     try:
         inst.delete_s(CONFIG_DN)
     except ldap.LDAPError, e:
-        log.error('test_linkedattrs: Failed to delete plugin config entry: ' + e.message['desc'])
+        log.fatal('test_linkedattrs: Failed to delete plugin config entry: ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -1018,7 +1018,7 @@ def test_memberof(inst, args=None):
     try:
         inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'member')])
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to update config(member): error ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to update config(member): error ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -1032,7 +1032,7 @@ def test_memberof(inst, args=None):
                           'uid': 'user1'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to add user1: error ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to add user1: error ' + e.message['desc'])
         assert False
 
     try:
@@ -1042,7 +1042,7 @@ def test_memberof(inst, args=None):
                           'member': USER1_DN
                           })))
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to add group: error ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to add group: error ' + e.message['desc'])
         assert False
 
     try:
@@ -1052,7 +1052,7 @@ def test_memberof(inst, args=None):
                           'memberofattr': 'memberof'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to shared config entry: error ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to shared config entry: error ' + e.message['desc'])
         assert False
 
     # Check if the user now has a "memberOf" attribute
@@ -1069,7 +1069,7 @@ def test_memberof(inst, args=None):
     try:
         inst.modify_s(GROUP_DN, [(ldap.MOD_DELETE, 'member', None)])
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to delete member: error ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to delete member: error ' + e.message['desc'])
         assert False
 
     # Check that "memberOf" was removed
@@ -1089,7 +1089,7 @@ def test_memberof(inst, args=None):
     try:
         inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'uniquemember')])
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to update config(uniquemember): error ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to update config(uniquemember): error ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -1099,7 +1099,7 @@ def test_memberof(inst, args=None):
     try:
         inst.modify_s(GROUP_DN, [(ldap.MOD_REPLACE, 'uniquemember', USER1_DN)])
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to add uniquemember: error ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to add uniquemember: error ' + e.message['desc'])
         assert False
 
     # Check if the user now has a "memberOf" attribute
@@ -1116,7 +1116,7 @@ def test_memberof(inst, args=None):
     try:
         inst.modify_s(GROUP_DN, [(ldap.MOD_DELETE, 'uniquemember', None)])
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to delete member: error ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to delete member: error ' + e.message['desc'])
         assert False
 
     # Check that "memberOf" was removed
@@ -1137,20 +1137,20 @@ def test_memberof(inst, args=None):
     try:
         inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, CONFIG_AREA, SHARED_CONFIG_DN)])
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to set plugin area: error ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to set plugin area: error ' + e.message['desc'])
         assert False
 
     # Delete the test entries then readd them to start with a clean slate
     try:
         inst.delete_s(USER1_DN)
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to delete test entry1: ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to delete test entry1: ' + e.message['desc'])
         assert False
 
     try:
         inst.delete_s(GROUP_DN)
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to delete test group: ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to delete test group: ' + e.message['desc'])
         assert False
 
     try:
@@ -1159,7 +1159,7 @@ def test_memberof(inst, args=None):
                           'uid': 'user1'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to add user1: error ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to add user1: error ' + e.message['desc'])
         assert False
 
     try:
@@ -1169,7 +1169,7 @@ def test_memberof(inst, args=None):
                           'member': USER1_DN
                           })))
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to add group: error ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to add group: error ' + e.message['desc'])
         assert False
 
     # Test the shared config
@@ -1187,7 +1187,7 @@ def test_memberof(inst, args=None):
     try:
         inst.modify_s(GROUP_DN, [(ldap.MOD_DELETE, 'member', None)])
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to delete member: error ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to delete member: error ' + e.message['desc'])
         assert False
 
     # Check that "memberOf" was removed
@@ -1207,14 +1207,14 @@ def test_memberof(inst, args=None):
     try:
         inst.modify_s(SHARED_CONFIG_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'uniquemember')])
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to set shared plugin entry(uniquemember): error '
+        log.fatal('test_memberof: Failed to set shared plugin entry(uniquemember): error '
             + e.message['desc'])
         assert False
 
     try:
         inst.modify_s(GROUP_DN, [(ldap.MOD_REPLACE, 'uniquemember', USER1_DN)])
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to add uniquemember: error ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to add uniquemember: error ' + e.message['desc'])
         assert False
 
     # Check if the user now has a "memberOf" attribute
@@ -1231,7 +1231,7 @@ def test_memberof(inst, args=None):
     try:
         inst.modify_s(GROUP_DN, [(ldap.MOD_DELETE, 'uniquemember', None)])
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to delete member: error ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to delete member: error ' + e.message['desc'])
         assert False
 
     # Check that "memberOf" was removed
@@ -1252,20 +1252,20 @@ def test_memberof(inst, args=None):
     try:
         inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'member')])
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to update config(uniquemember): error ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to update config(uniquemember): error ' + e.message['desc'])
         assert False
 
     # Remove shared config from plugin
     try:
         inst.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, CONFIG_AREA, None)])
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to add uniquemember: error ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to add uniquemember: error ' + e.message['desc'])
         assert False
 
     try:
         inst.modify_s(GROUP_DN, [(ldap.MOD_REPLACE, 'member', USER1_DN)])
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to add uniquemember: error ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to add uniquemember: error ' + e.message['desc'])
         assert False
 
     # Check if the user now has a "memberOf" attribute
@@ -1282,7 +1282,7 @@ def test_memberof(inst, args=None):
     try:
         inst.modify_s(GROUP_DN, [(ldap.MOD_DELETE, 'member', None)])
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to delete member: error ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to delete member: error ' + e.message['desc'])
         assert False
 
     # Check that "memberOf" was removed
@@ -1305,14 +1305,14 @@ def test_memberof(inst, args=None):
     try:
         inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'uniquemember')])
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to update config(uniquemember): error ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to update config(uniquemember): error ' + e.message['desc'])
         assert False
 
     # Add uniquemember, should not update USER1
     try:
         inst.modify_s(GROUP_DN, [(ldap.MOD_REPLACE, 'uniquemember', USER1_DN)])
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to add uniquemember: error ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to add uniquemember: error ' + e.message['desc'])
         assert False
 
     # Check for "memberOf"
@@ -1335,7 +1335,7 @@ def test_memberof(inst, args=None):
                           'basedn': DEFAULT_SUFFIX,
                           'filter': 'objectclass=top'})))
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to add task: error ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to add task: error ' + e.message['desc'])
         assert False
 
     wait_for_task(inst, TASK_DN)
@@ -1363,19 +1363,19 @@ def test_memberof(inst, args=None):
     try:
         inst.delete_s(USER1_DN)
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to delete test entry1: ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to delete test entry1: ' + e.message['desc'])
         assert False
 
     try:
         inst.delete_s(GROUP_DN)
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to delete test group: ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to delete test group: ' + e.message['desc'])
         assert False
 
     try:
         inst.delete_s(SHARED_CONFIG_DN)
     except ldap.LDAPError, e:
-        log.error('test_memberof: Failed to delete shared config entry: ' + e.message['desc'])
+        log.fatal('test_memberof: Failed to delete shared config entry: ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -1422,7 +1422,7 @@ def test_mep(inst, args=None):
     except ldap.ALREADY_EXISTS:
         pass
     except ldap.LDAPError, e:
-        log.error('test_mep: Failed to add people org unit: error ' + e.message['desc'])
+        log.fatal('test_mep: Failed to add people org unit: error ' + e.message['desc'])
         assert False
 
     try:
@@ -1432,7 +1432,7 @@ def test_mep(inst, args=None):
     except ldap.ALREADY_EXISTS:
         pass
     except ldap.LDAPError, e:
-        log.error('test_mep: Failed to add people org unit: error ' + e.message['desc'])
+        log.fatal('test_mep: Failed to add people org unit: error ' + e.message['desc'])
         assert False
 
     # Add the template entry
@@ -1445,7 +1445,7 @@ def test_mep(inst, args=None):
                    'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|')
                    })))
     except ldap.LDAPError, e:
-        log.error('test_mep: Failed to add template entry: error ' + e.message['desc'])
+        log.fatal('test_mep: Failed to add template entry: error ' + e.message['desc'])
         assert False
 
     # Add the config entry
@@ -1459,7 +1459,7 @@ def test_mep(inst, args=None):
                           'managedTemplate': TEMPLATE_DN
                           })))
     except ldap.LDAPError, e:
-        log.error('test_mep: Failed to add config entry: error ' + e.message['desc'])
+        log.fatal('test_mep: Failed to add config entry: error ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -1477,7 +1477,7 @@ def test_mep(inst, args=None):
                           'homeDirectory': '/home/user1'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_mep: Failed to user1: error ' + e.message['desc'])
+        log.fatal('test_mep: Failed to user1: error ' + e.message['desc'])
         assert False
 
     # Check if a managed group entry was created
@@ -1501,14 +1501,14 @@ def test_mep(inst, args=None):
                    'mepMappedAttr': 'cn: $uid|uid: $cn|gidNumber: $gidNumber'.split('|')
                    })))
     except ldap.LDAPError, e:
-        log.error('test_mep: Failed to add template entry2: error ' + e.message['desc'])
+        log.fatal('test_mep: Failed to add template entry2: error ' + e.message['desc'])
         assert False
 
     # Set the new template dn
     try:
         inst.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'managedTemplate', TEMPLATE_DN2)])
     except ldap.LDAPError, e:
-        log.error('test_mep: Failed to set mep plugin config: error ' + e.message['desc'])
+        log.fatal('test_mep: Failed to set mep plugin config: error ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -1526,7 +1526,7 @@ def test_mep(inst, args=None):
                           'homeDirectory': '/home/user2'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_mep: Failed to user2: error ' + e.message['desc'])
+        log.fatal('test_mep: Failed to user2: error ' + e.message['desc'])
         assert False
 
     # Check if a managed group entry was created
@@ -1549,19 +1549,19 @@ def test_mep(inst, args=None):
     try:
         inst.delete_s(USER_DN)
     except ldap.LDAPError, e:
-        log.error('test_mep: Failed to delete test user1: ' + e.message['desc'])
+        log.fatal('test_mep: Failed to delete test user1: ' + e.message['desc'])
         assert False
 
     try:
         inst.delete_s(USER_DN2)
     except ldap.LDAPError, e:
-        log.error('test_mep: Failed to delete test user 2: ' + e.message['desc'])
+        log.fatal('test_mep: Failed to delete test user 2: ' + e.message['desc'])
         assert False
 
     try:
         inst.delete_s(TEMPLATE_DN)
     except ldap.LDAPError, e:
-        log.error('test_mep: Failed to delete template1: ' + e.message['desc'])
+        log.fatal('test_mep: Failed to delete template1: ' + e.message['desc'])
         assert False
 
     inst.plugins.disable(name=PLUGIN_MANAGED_ENTRY)
@@ -1569,13 +1569,13 @@ def test_mep(inst, args=None):
     try:
         inst.delete_s(TEMPLATE_DN2)
     except ldap.LDAPError, e:
-        log.error('test_mep: Failed to delete template2: ' + e.message['desc'])
+        log.fatal('test_mep: Failed to delete template2: ' + e.message['desc'])
         assert False
 
     try:
         inst.delete_s(CONFIG_DN)
     except ldap.LDAPError, e:
-        log.error('test_mep: Failed to delete config: ' + e.message['desc'])
+        log.fatal('test_mep: Failed to delete config: ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -1637,7 +1637,7 @@ def test_passthru(inst, args=None):
     except ldap.ALREADY_EXISTS:
         pass
     except ldap.LDAPError, e:
-        log.error('test_passthru: Failed to create suffix entry: error ' + e.message['desc'])
+        log.fatal('test_passthru: Failed to create suffix entry: error ' + e.message['desc'])
         passthru_inst.delete()
         assert False
 
@@ -1649,7 +1649,7 @@ def test_passthru(inst, args=None):
                           'userpassword': 'password'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_passthru: Failed to admin1: error ' + e.message['desc'])
+        log.fatal('test_passthru: Failed to admin1: error ' + e.message['desc'])
         passthru_inst.delete()
         assert False
 
@@ -1661,7 +1661,7 @@ def test_passthru(inst, args=None):
                           'userpassword': 'password'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_passthru: Failed to admin2 : error ' + e.message['desc'])
+        log.fatal('test_passthru: Failed to admin2 : error ' + e.message['desc'])
         passthru_inst.delete()
         assert False
 
@@ -1673,7 +1673,7 @@ def test_passthru(inst, args=None):
         inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginenabled', 'on'),
                                   (ldap.MOD_REPLACE, 'nsslapd-pluginarg0', 'ldap://127.0.0.1:33333/dc=pass,dc=thru')])
     except ldap.LDAPError, e:
-        log.error('test_passthru: Failed to set mep plugin config: error ' + e.message['desc'])
+        log.fatal('test_passthru: Failed to set mep plugin config: error ' + e.message['desc'])
         passthru_inst.delete()
         assert False
 
@@ -1685,7 +1685,7 @@ def test_passthru(inst, args=None):
     try:
         inst.simple_bind_s(PASSTHRU_DN, "password")
     except ldap.LDAPError, e:
-        log.error('test_passthru: pass through bind failed: ' + e.message['desc'])
+        log.fatal('test_passthru: pass through bind failed: ' + e.message['desc'])
         passthru_inst.delete()
         assert False
 
@@ -1697,14 +1697,14 @@ def test_passthru(inst, args=None):
     try:
         inst.simple_bind_s(DN_DM, PASSWORD)
     except ldap.LDAPError, e:
-        log.error('test_passthru: pass through bind failed: ' + e.message['desc'])
+        log.fatal('test_passthru: pass through bind failed: ' + e.message['desc'])
         passthru_inst.delete()
         assert False
 
     try:
         inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', 'ldap://127.0.0.1:33333/dc=pass2,dc=thru')])
     except ldap.LDAPError, e:
-        log.error('test_passthru: Failed to set mep plugin config: error ' + e.message['desc'])
+        log.fatal('test_passthru: Failed to set mep plugin config: error ' + e.message['desc'])
         passthru_inst.delete()
         assert False
 
@@ -1716,7 +1716,7 @@ def test_passthru(inst, args=None):
     try:
         inst.simple_bind_s(PASSTHRU_DN2, "password")
     except ldap.LDAPError, e:
-        log.error('test_passthru: pass through bind failed: ' + e.message['desc'])
+        log.fatal('test_passthru: pass through bind failed: ' + e.message['desc'])
         passthru_inst.delete()
         assert False
 
@@ -1724,7 +1724,7 @@ def test_passthru(inst, args=None):
     try:
         inst.simple_bind_s(DN_DM, PASSWORD)
     except ldap.LDAPError, e:
-        log.error('test_passthru: pass through bind failed: ' + e.message['desc'])
+        log.fatal('test_passthru: pass through bind failed: ' + e.message['desc'])
         passthru_inst.delete()
         assert False
 
@@ -1774,7 +1774,7 @@ def test_referint(inst, args=None):
     try:
         inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'referint-membership-attr', 'member')])
     except ldap.LDAPError, e:
-        log.error('test_referint: Failed to configure RI plugin: error ' + e.message['desc'])
+        log.fatal('test_referint: Failed to configure RI plugin: error ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -1788,7 +1788,7 @@ def test_referint(inst, args=None):
                           'uid': 'user1'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_referint: Failed to add user1: error ' + e.message['desc'])
+        log.fatal('test_referint: Failed to add user1: error ' + e.message['desc'])
         assert False
 
     try:
@@ -1797,7 +1797,7 @@ def test_referint(inst, args=None):
                           'uid': 'user2'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_referint: Failed to add user2: error ' + e.message['desc'])
+        log.fatal('test_referint: Failed to add user2: error ' + e.message['desc'])
         assert False
 
     try:
@@ -1808,7 +1808,7 @@ def test_referint(inst, args=None):
                           'uniquemember': USER2_DN
                           })))
     except ldap.LDAPError, e:
-        log.error('test_referint: Failed to add group: error ' + e.message['desc'])
+        log.fatal('test_referint: Failed to add group: error ' + e.message['desc'])
         assert False
 
     # Grab the referint log file from the plugin
@@ -1830,21 +1830,21 @@ def test_referint(inst, args=None):
                           'referint-logchanges': '0'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_referint: Failed to shared config entry: error ' + e.message['desc'])
+        log.fatal('test_referint: Failed to shared config entry: error ' + e.message['desc'])
         assert False
 
     # Delete a user
     try:
         inst.delete_s(USER1_DN)
     except ldap.LDAPError, e:
-        log.error('test_referint: Failed to delete user1: ' + e.message['desc'])
+        log.fatal('test_referint: Failed to delete user1: ' + e.message['desc'])
         assert False
 
     # Check for integrity
     try:
         entry = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, '(member=' + USER1_DN + ')')
         if entry:
-            log.error('test_referint: user1 was not removed from group')
+            log.fatal('test_referint: user1 was not removed from group')
             assert False
     except ldap.LDAPError, e:
         log.fatal('test_referint: Unable to search group: ' + e.message['desc'])
@@ -1857,7 +1857,7 @@ def test_referint(inst, args=None):
     try:
         inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'referint-membership-attr', 'uniquemember')])
     except ldap.LDAPError, e:
-        log.error('test_referint: Failed to configure RI plugin: error ' + e.message['desc'])
+        log.fatal('test_referint: Failed to configure RI plugin: error ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -1868,14 +1868,14 @@ def test_referint(inst, args=None):
     try:
         inst.delete_s(USER2_DN)
     except ldap.LDAPError, e:
-        log.error('test_referint: Failed to delete user1: ' + e.message['desc'])
+        log.fatal('test_referint: Failed to delete user1: ' + e.message['desc'])
         assert False
 
     # Check for integrity
     try:
         entry = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, '(uniquemember=' + USER2_DN + ')')
         if entry:
-            log.error('test_referint: user2 was not removed from group')
+            log.fatal('test_referint: user2 was not removed from group')
             assert False
     except ldap.LDAPError, e:
         log.fatal('test_referint: Unable to search group: ' + e.message['desc'])
@@ -1889,14 +1889,14 @@ def test_referint(inst, args=None):
     try:
         inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, CONFIG_AREA, SHARED_CONFIG_DN)])
     except ldap.LDAPError, e:
-        log.error('test_referint: Failed to set plugin area: error ' + e.message['desc'])
+        log.fatal('test_referint: Failed to set plugin area: error ' + e.message['desc'])
         assert False
 
     # Delete the group, and readd everything
     try:
         inst.delete_s(GROUP_DN)
     except ldap.LDAPError, e:
-        log.error('test_referint: Failed to delete group: ' + e.message['desc'])
+        log.fatal('test_referint: Failed to delete group: ' + e.message['desc'])
         assert False
 
     try:
@@ -1905,7 +1905,7 @@ def test_referint(inst, args=None):
                           'uid': 'user1'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_referint: Failed to add user1: error ' + e.message['desc'])
+        log.fatal('test_referint: Failed to add user1: error ' + e.message['desc'])
         assert False
 
     try:
@@ -1914,7 +1914,7 @@ def test_referint(inst, args=None):
                           'uid': 'user2'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_referint: Failed to add user2: error ' + e.message['desc'])
+        log.fatal('test_referint: Failed to add user2: error ' + e.message['desc'])
         assert False
 
     try:
@@ -1925,21 +1925,21 @@ def test_referint(inst, args=None):
                           'uniquemember': USER2_DN
                           })))
     except ldap.LDAPError, e:
-        log.error('test_referint: Failed to add group: error ' + e.message['desc'])
+        log.fatal('test_referint: Failed to add group: error ' + e.message['desc'])
         assert False
 
     # Delete a user
     try:
         inst.delete_s(USER1_DN)
     except ldap.LDAPError, e:
-        log.error('test_referint: Failed to delete user1: ' + e.message['desc'])
+        log.fatal('test_referint: Failed to delete user1: ' + e.message['desc'])
         assert False
 
     # Check for integrity
     try:
         entry = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, '(member=' + USER1_DN + ')')
         if entry:
-            log.error('test_referint: user1 was not removed from group')
+            log.fatal('test_referint: user1 was not removed from group')
             assert False
     except ldap.LDAPError, e:
         log.fatal('test_referint: Unable to search group: ' + e.message['desc'])
@@ -1952,7 +1952,7 @@ def test_referint(inst, args=None):
     try:
         inst.modify_s(SHARED_CONFIG_DN, [(ldap.MOD_REPLACE, 'referint-membership-attr', 'uniquemember')])
     except ldap.LDAPError, e:
-        log.error('test_referint: Failed to set shared plugin entry(uniquemember): error '
+        log.fatal('test_referint: Failed to set shared plugin entry(uniquemember): error '
             + e.message['desc'])
         assert False
 
@@ -1960,14 +1960,14 @@ def test_referint(inst, args=None):
     try:
         inst.delete_s(USER2_DN)
     except ldap.LDAPError, e:
-        log.error('test_referint: Failed to delete user1: ' + e.message['desc'])
+        log.fatal('test_referint: Failed to delete user1: ' + e.message['desc'])
         assert False
 
     # Check for integrity
     try:
         entry = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, '(uniquemember=' + USER2_DN + ')')
         if entry:
-            log.error('test_referint: user2 was not removed from group')
+            log.fatal('test_referint: user2 was not removed from group')
             assert False
     except ldap.LDAPError, e:
         log.fatal('test_referint: Unable to search group: ' + e.message['desc'])
@@ -1981,14 +1981,14 @@ def test_referint(inst, args=None):
     try:
         inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'referint-membership-attr', 'member')])
     except ldap.LDAPError, e:
-        log.error('test_referint: Failed to update config(uniquemember): error ' + e.message['desc'])
+        log.fatal('test_referint: Failed to update config(uniquemember): error ' + e.message['desc'])
         assert False
 
     # Remove shared config from plugin
     try:
         inst.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, CONFIG_AREA, None)])
     except ldap.LDAPError, e:
-        log.error('test_referint: Failed to add uniquemember: error ' + e.message['desc'])
+        log.fatal('test_referint: Failed to add uniquemember: error ' + e.message['desc'])
         assert False
 
     # Add test user
@@ -1998,28 +1998,28 @@ def test_referint(inst, args=None):
                           'uid': 'user1'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_referint: Failed to add user1: error ' + e.message['desc'])
+        log.fatal('test_referint: Failed to add user1: error ' + e.message['desc'])
         assert False
 
     # Add user to group
     try:
         inst.modify_s(GROUP_DN, [(ldap.MOD_REPLACE, 'member', USER1_DN)])
     except ldap.LDAPError, e:
-        log.error('test_referint: Failed to add uniquemember: error ' + e.message['desc'])
+        log.fatal('test_referint: Failed to add uniquemember: error ' + e.message['desc'])
         assert False
 
     # Delete a user
     try:
         inst.delete_s(USER1_DN)
     except ldap.LDAPError, e:
-        log.error('test_referint: Failed to delete user1: ' + e.message['desc'])
+        log.fatal('test_referint: Failed to delete user1: ' + e.message['desc'])
         assert False
 
     # Check for integrity
     try:
         entry = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, '(member=' + USER1_DN + ')')
         if entry:
-            log.error('test_referint: user1 was not removed from group')
+            log.fatal('test_referint: user1 was not removed from group')
             assert False
     except ldap.LDAPError, e:
         log.fatal('test_referint: Unable to search group: ' + e.message['desc'])
@@ -2038,13 +2038,13 @@ def test_referint(inst, args=None):
     try:
         inst.delete_s(GROUP_DN)
     except ldap.LDAPError, e:
-        log.error('test_referint: Failed to delete group: ' + e.message['desc'])
+        log.fatal('test_referint: Failed to delete group: ' + e.message['desc'])
         assert False
 
     try:
         inst.delete_s(SHARED_CONFIG_DN)
     except ldap.LDAPError, e:
-        log.error('test_referint: Failed to delete shared config entry: ' + e.message['desc'])
+        log.fatal('test_referint: Failed to delete shared config entry: ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -2079,7 +2079,7 @@ def test_retrocl(inst, args=None):
     try:
         entry = inst.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(changenumber=*)')
     except ldap.LDAPError, e:
-        log.error('test_retrocl: Failed to get the count: error ' + e.message['desc'])
+        log.fatal('test_retrocl: Failed to get the count: error ' + e.message['desc'])
         assert False
 
     entry_count = len(entry)
@@ -2095,14 +2095,14 @@ def test_retrocl(inst, args=None):
                           'uid': 'user1'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_retrocl: Failed to add user1: error ' + e.message['desc'])
+        log.fatal('test_retrocl: Failed to add user1: error ' + e.message['desc'])
         assert False
 
     # Check we logged this in the retro cl
     try:
         entry = inst.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(changenumber=*)')
         if not entry or len(entry) == entry_count:
-            log.error('test_retrocl: changelog not updated')
+            log.fatal('test_retrocl: changelog not updated')
             assert False
     except ldap.LDAPError, e:
         log.fatal('test_retrocl: Unable to search group: ' + e.message['desc'])
@@ -2123,14 +2123,14 @@ def test_retrocl(inst, args=None):
     try:
         inst.delete_s(USER1_DN)
     except ldap.LDAPError, e:
-        log.error('test_retrocl: Failed to delete user1: ' + e.message['desc'])
+        log.fatal('test_retrocl: Failed to delete user1: ' + e.message['desc'])
         assert False
 
     # Check we didn't logged this in the retro cl
     try:
         entry = inst.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(changenumber=*)')
         if len(entry) != entry_count:
-            log.error('test_retrocl: changelog incorrectly updated - change count: '
+            log.fatal('test_retrocl: changelog incorrectly updated - change count: '
                 + str(len(entry)) + ' - expected 1')
             assert False
     except ldap.LDAPError, e:
@@ -2188,22 +2188,23 @@ def test_rootdn(inst, args=None):
                           'userpassword': 'password'
                           })))
     except ldap.LDAPError, e:
-        log.error('test_rootdn: Failed to add user1: error ' + e.message['desc'])
+        log.fatal('test_rootdn: Failed to add user1: error ' + e.message['desc'])
         assert False
 
     # Set an aci so we can modify the plugin after ew deny the root dn
-    ACI = '(target ="ldap:///cn=config")(targetattr = "*")(version 3.0;acl "all access";allow (all)(userdn="ldap:///anyone");)'
+    ACI = ('(target ="ldap:///cn=config")(targetattr = "*")(version 3.0;acl ' +
+           '"all access";allow (all)(userdn="ldap:///anyone");)')
     try:
         inst.modify_s(DN_CONFIG, [(ldap.MOD_ADD, 'aci', ACI)])
     except ldap.LDAPError, e:
-        log.error('test_rootdn: Failed to add aci to config: error ' + e.message['desc'])
+        log.fatal('test_rootdn: Failed to add aci to config: error ' + e.message['desc'])
         assert False
 
     # Set allowed IP to an unknown host - blocks root dn
     try:
         inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-ip', '10.10.10.10')])
     except ldap.LDAPError, e:
-        log.error('test_rootdn: Failed to set rootDN plugin config: error ' + e.message['desc'])
+        log.fatal('test_rootdn: Failed to set rootDN plugin config: error ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -2218,7 +2219,7 @@ def test_rootdn(inst, args=None):
         failed = True
 
     if not failed:
-        log.error('test_rootdn: Root DN was incorrectly able to bind')
+        log.fatal('test_rootdn: Root DN was incorrectly able to bind')
         assert False
 
     ############################################################################
@@ -2229,7 +2230,7 @@ def test_rootdn(inst, args=None):
     try:
         inst.simple_bind_s(USER1_DN, 'password')
     except ldap.LDAPError, e:
-        log.error('test_rootdn: failed to bind as user1')
+        log.fatal('test_rootdn: failed to bind as user1')
         assert False
 
     # First, test that invalid plugin changes are rejected
@@ -2251,7 +2252,7 @@ def test_rootdn(inst, args=None):
     try:
         inst.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-allow-ip', None)])
     except ldap.LDAPError, e:
-        log.error('test_rootdn: Failed to set rootDN plugin config: error ' + e.message['desc'])
+        log.fatal('test_rootdn: Failed to set rootDN plugin config: error ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -2266,7 +2267,7 @@ def test_rootdn(inst, args=None):
         failed = True
 
     if failed:
-        log.error('test_rootdn: Root DN was not able to bind')
+        log.fatal('test_rootdn: Root DN was not able to bind')
         assert False
 
     ############################################################################
@@ -2282,13 +2283,13 @@ def test_rootdn(inst, args=None):
     try:
         inst.modify_s(DN_CONFIG, [(ldap.MOD_DELETE, 'aci', ACI)])
     except ldap.LDAPError, e:
-        log.error('test_rootdn: Failed to add aci to config: error ' + e.message['desc'])
+        log.fatal('test_rootdn: Failed to add aci to config: error ' + e.message['desc'])
         assert False
 
     try:
         inst.delete_s(USER1_DN)
     except ldap.LDAPError, e:
-        log.error('test_rootdn: Failed to delete user1: ' + e.message['desc'])
+        log.fatal('test_rootdn: Failed to delete user1: ' + e.message['desc'])
         assert False
 
     ############################################################################
diff --git a/dirsrvtests/suites/dynamic-plugins/stress_tests.py b/dirsrvtests/suites/dynamic-plugins/stress_tests.py
index ff830dd..8845e3a 100644
--- a/dirsrvtests/suites/dynamic-plugins/stress_tests.py
+++ b/dirsrvtests/suites/dynamic-plugins/stress_tests.py
@@ -41,7 +41,7 @@ def configureRI(inst):
     try:
         inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'referint-membership-attr', 'uniquemember')])
     except ldap.LDAPError, e:
-        log.error('configureRI: Failed to configure RI plugin: error ' + e.message['desc'])
+        log.fatal('configureRI: Failed to configure RI plugin: error ' + e.message['desc'])
         assert False
 
 
@@ -52,7 +52,7 @@ def configureMO(inst):
     try:
         inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'uniquemember')])
     except ldap.LDAPError, e:
-        log.error('configureMO: Failed to update config(uniquemember): error ' + e.message['desc'])
+        log.fatal('configureMO: Failed to update config(uniquemember): error ' + e.message['desc'])
         assert False
 
 
@@ -60,7 +60,7 @@ def cleanup(conn):
     try:
         conn.delete_s(GROUP_DN)
     except ldap.LDAPError, e:
-        log.error('cleanup: failed to delete group (' + GROUP_DN + ') error: ' + e.message['desc'])
+        log.fatal('cleanup: failed to delete group (' + GROUP_DN + ') error: ' + e.message['desc'])
         assert False
 
 
@@ -80,7 +80,7 @@ class DelUsers(threading.Thread):
             try:
                 conn.delete_s(USER_DN)
             except ldap.LDAPError, e:
-                log.error('DeleteUsers: failed to delete (' + USER_DN + ') error: ' + e.message['desc'])
+                log.fatal('DeleteUsers: failed to delete (' + USER_DN + ') error: ' + e.message['desc'])
                 assert False
 
             idx += 1
@@ -110,7 +110,7 @@ class AddUsers(threading.Thread):
             except ldap.ALREADY_EXISTS:
                 pass
             except ldap.LDAPError, e:
-                log.error('AddUsers: failed to add group (' + USER_DN + ') error: ' + e.message['desc'])
+                log.fatal('AddUsers: failed to add group (' + USER_DN + ') error: ' + e.message['desc'])
                 assert False
 
         log.info('AddUsers - Adding ' + str(NUM_USERS) + ' entries (' + self.rdnval + ')...')
@@ -121,7 +121,7 @@ class AddUsers(threading.Thread):
                 conn.add_s(Entry((USER_DN, {'objectclass': 'top extensibleObject'.split(),
                            'uid': 'user' + str(idx)})))
             except ldap.LDAPError, e:
-                log.error('AddUsers: failed to add (' + USER_DN + ') error: ' + e.message['desc'])
+                log.fatal('AddUsers: failed to add (' + USER_DN + ') error: ' + e.message['desc'])
                 assert False
 
             if self.addToGroup:
@@ -129,7 +129,7 @@ class AddUsers(threading.Thread):
                 try:
                     conn.modify_s(GROUP_DN, [(ldap.MOD_ADD, 'uniquemember', USER_DN)])
                 except ldap.LDAPError, e:
-                    log.error('AddUsers: Failed to add user' + USER_DN + ' to group: error ' + e.message['desc'])
+                    log.fatal('AddUsers: Failed to add user' + USER_DN + ' to group: error ' + e.message['desc'])
                     assert False
 
             idx += 1
diff --git a/dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py b/dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py
index 6724c82..6567f47 100644
--- a/dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py
+++ b/dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py
@@ -108,7 +108,7 @@ def test_dynamic_plugins(topology):
     try:
         topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
     except ldap.LDAPError, e:
-        ldap.error('Failed to enable dynamic plugin!' + e.message['desc'])
+        ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc'])
         assert False
 
     while 1:
diff --git a/dirsrvtests/suites/filter/filter_test.py b/dirsrvtests/suites/filter/filter_test.py
new file mode 100644
index 0000000..8901221
--- /dev/null
+++ b/dirsrvtests/suites/filter/filter_test.py
@@ -0,0 +1,144 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+installation1_prefix = None
+
+
+class TopologyStandalone(object):
+    def __init__(self, standalone):
+        standalone.open()
+        self.standalone = standalone
+
+
+ at pytest.fixture(scope="module")
+def topology(request):
+    global installation1_prefix
+    if installation1_prefix:
+        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+
+    # Creating standalone instance ...
+    standalone = DirSrv(verbose=False)
+    args_instance[SER_HOST] = HOST_STANDALONE
+    args_instance[SER_PORT] = PORT_STANDALONE
+    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+    args_standalone = args_instance.copy()
+    standalone.allocate(args_standalone)
+    instance_standalone = standalone.exists()
+    if instance_standalone:
+        standalone.delete()
+    standalone.create()
+    standalone.open()
+
+    # Clear out the tmp dir
+    standalone.clearTmpDir(__file__)
+
+    return TopologyStandalone(standalone)
+
+
+def test_filter_init(topology):
+    '''
+    Write your testcase here...
+    '''
+    return
+
+
+def test_filter_escaped(topology):
+    '''
+    Test we can search for an '*' in a attribute value.
+    '''
+
+    log.info('Running test_filter_escaped...')
+
+    USER1_DN = 'uid=test_entry,' + DEFAULT_SUFFIX
+    USER2_DN = 'uid=test_entry2,' + DEFAULT_SUFFIX
+
+    try:
+        topology.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(),
+                                 'sn': '1',
+                                 'cn': 'test * me',
+                                 'uid': 'test_entry',
+                                 'userpassword': PASSWORD})))
+    except ldap.LDAPError, e:
+        log.fatal('test_filter_escaped: Failed to add test user ' + USER1_DN + ': error ' +
+                  e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
+                                 'sn': '2',
+                                 'cn': 'test me',
+                                 'uid': 'test_entry2',
+                                 'userpassword': PASSWORD})))
+    except ldap.LDAPError, e:
+        log.fatal('test_filter_escaped: Failed to add test user ' + USER2_DN + ': error ' + e.message['desc'])
+        assert False
+
+    try:
+        entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'cn=*\**')
+        if not entry or len(entry) > 1:
+            log.fatal('test_filter_escaped: Entry was not found using "cn=*\**"')
+            assert False
+    except ldap.LDAPError, e:
+        log.fatal('test_filter_escaped: Failed to search for user(%s), error: %s' %
+        (USER1_DN, e.message('desc')))
+        assert False
+
+    log.info('test_filter_escaped: PASSED')
+
+
+def test_filter_search_original_attrs(topology):
+    '''
+    Search and request attributes with extra characters.  The returned entry
+    should not have these extra characters:  "objectclass EXTRA"
+    '''
+
+    log.info('Running test_filter_search_original_attrs...')
+
+    try:
+        entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_BASE,
+                                             'objectclass=top', ['objectclass-EXTRA'])
+        if entry[0].hasAttr('objectclass-EXTRA'):
+            log.fatal('test_filter_search_original_attrs: Entry does not have the original attribute')
+            assert False
+    except ldap.LDAPError, e:
+        log.fatal('test_filter_search_original_attrs: Failed to search suffix(%s), error: %s' %
+                  (DEFAULT_SUFFIX, e.message('desc')))
+        assert False
+
+    log.info('test_filter_search_original_attrs: PASSED')
+
+
+def test_filter_final(topology):
+    topology.standalone.delete()
+    log.info('Testcase PASSED')
+
+
+def run_isolated():
+    global installation1_prefix
+    installation1_prefix = None
+
+    topo = topology(True)
+
+    test_filter_init(topo)
+    test_filter_escaped(topo)
+    test_filter_search_original_attrs(topo)
+
+    test_filter_final(topo)
+
+
+if __name__ == '__main__':
+    run_isolated()
+
diff --git a/dirsrvtests/suites/memory_leaks/range_search_test.py b/dirsrvtests/suites/memory_leaks/range_search_test.py
new file mode 100644
index 0000000..ddbdae0
--- /dev/null
+++ b/dirsrvtests/suites/memory_leaks/range_search_test.py
@@ -0,0 +1,145 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+installation1_prefix = None
+
+
+class TopologyStandalone(object):
+    def __init__(self, standalone):
+        standalone.open()
+        self.standalone = standalone
+
+
+ at pytest.fixture(scope="module")
+def topology(request):
+    global installation1_prefix
+    if installation1_prefix:
+        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+
+    # Creating standalone instance ...
+    standalone = DirSrv(verbose=False)
+    args_instance[SER_HOST] = HOST_STANDALONE
+    args_instance[SER_PORT] = PORT_STANDALONE
+    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+    args_standalone = args_instance.copy()
+    standalone.allocate(args_standalone)
+    instance_standalone = standalone.exists()
+    if instance_standalone:
+        standalone.delete()
+    standalone.create()
+    standalone.open()
+
+    # Clear out the tmp dir
+    standalone.clearTmpDir(__file__)
+
+    return TopologyStandalone(standalone)
+
+
+def test_range_search_init(topology):
+    '''
+    Enable retro cl, and valgrind.  Since valgrind tests move the ns-slapd binary
+    around it's important to always "valgrind_disable" before "assert False"ing,
+    otherwise we leave the wrong ns-slapd in place if there is a failure
+    '''
+
+    log.info('Initializing test_range_search...')
+
+    topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
+
+    # First stop the instance
+    topology.standalone.stop(timeout=10)
+
+    # Get the sbin directory so we know where to replace 'ns-slapd'
+    sbin_dir = get_sbin_dir(prefix=topology.standalone.prefix)
+
+    # Enable valgrind
+    valgrind_enable(sbin_dir)
+
+    # Now start the server with a longer timeout
+    topology.standalone.start(timeout=60)
+
+
+def test_range_search(topology):
+    '''
+    Add a 100 entries, and run a range search.  When we encounter an error we
+    still need to disable valgrind before exiting
+    '''
+
+    log.info('Running test_range_search...')
+
+    sbin_dir = get_sbin_dir(prefix=topology.standalone.prefix)
+
+    success = True
+
+    # Add 100 test entries
+    for idx in range(1, 100):
+        idx = str(idx)
+        USER_DN = 'uid=user' + idx + ',' + DEFAULT_SUFFIX
+        try:
+            topology.standalone.add_s(Entry((USER_DN, {'objectclass': "top extensibleObject".split(),
+                                 'uid': 'user' + idx})))
+        except ldap.LDAPError, e:
+            log.fatal('test_range_search: Failed to add test user ' + USER_DN + ': error ' + e.message['desc'])
+            success = False
+    time.sleep(1)
+
+    if success:
+        # Issue range search
+        try:
+            topology.standalone.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE,
+                                         '(&(changenumber>=74)(changenumber<=84))')
+        except ldap.LDAPError, e:
+            log.fatal('test_range_search: Failed to search retro changelog(%s), error: %s' %
+                      (RETROCL_SUFFIX, e.message('desc')))
+            test_range_search_final(topology)  # With valgrind we always need to cleanup
+            success = False
+
+    if success:
+        # Check valgrind(this stops the server)
+        if valgrind_check_leak(topology.standalone, 'range_candidates'):
+            log.fatal('test_range_search: Memory leak is still present!')
+            test_range_search_final(topology)  # With valgrind we always need to cleanup
+            success = False
+
+    # Disable valgrind
+    sbin_dir = get_sbin_dir(prefix=topology.standalone.prefix)
+    valgrind_disable(sbin_dir)
+
+    if success:
+        log.info('test_range_search: PASSED')
+    else:
+        log.fatal('test_range_search: FAILED')
+
+
+def test_range_search_final(topology):
+    # Remove the instance
+    topology.standalone.delete()
+
+
+def run_isolated():
+    global installation1_prefix
+    installation1_prefix = None
+
+    topo = topology(True)
+    test_range_search_init(topo)
+    test_range_search(topo)
+    test_range_search_final(topo)
+
+
+if __name__ == '__main__':
+    run_isolated()
+
diff --git a/dirsrvtests/suites/password/password_test.py b/dirsrvtests/suites/password/password_test.py
new file mode 100644
index 0000000..daab9aa
--- /dev/null
+++ b/dirsrvtests/suites/password/password_test.py
@@ -0,0 +1,135 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+installation1_prefix = None
+
+
+class TopologyStandalone(object):
+    def __init__(self, standalone):
+        standalone.open()
+        self.standalone = standalone
+
+
+ at pytest.fixture(scope="module")
+def topology(request):
+    global installation1_prefix
+    if installation1_prefix:
+        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+
+    # Creating standalone instance ...
+    standalone = DirSrv(verbose=False)
+    args_instance[SER_HOST] = HOST_STANDALONE
+    args_instance[SER_PORT] = PORT_STANDALONE
+    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+    args_standalone = args_instance.copy()
+    standalone.allocate(args_standalone)
+    instance_standalone = standalone.exists()
+    if instance_standalone:
+        standalone.delete()
+    standalone.create()
+    standalone.open()
+
+    # Clear out the tmp dir
+    standalone.clearTmpDir(__file__)
+
+    return TopologyStandalone(standalone)
+
+
+def test_password_init(topology):
+    '''
+    Do init, if necessary
+    '''
+
+    return
+
+
+def test_password_delete_specific_password(topology):
+    '''
+    Delete a specific userpassword, and make sure it is actually deleted from the entry
+    '''
+
+    log.info('Running test_password_delete_specific_password...')
+
+    USER_DN = 'uid=test_entry,' + DEFAULT_SUFFIX
+
+    #
+    # Add a test user with a password
+    #
+    try:
+        topology.standalone.add_s(Entry((USER_DN, {'objectclass': "top extensibleObject".split(),
+                                 'sn': '1',
+                                 'cn': 'user 1',
+                                 'uid': 'user1',
+                                 'userpassword': PASSWORD})))
+    except ldap.LDAPError, e:
+        log.fatal('test_password_delete_specific_password: Failed to add test user ' +
+                  USER_DN + ': error ' + e.message['desc'])
+        assert False
+
+    #
+    # Delete the exact password
+    #
+    try:
+        topology.standalone.modify_s(USER_DN, [(ldap.MOD_DELETE, 'userpassword', PASSWORD)])
+    except ldap.LDAPError, e:
+        log.fatal('test_password_delete_specific_password: Failed to delete userpassword: error ' +
+                  e.message['desc'])
+        assert False
+
+    #
+    # Check the password is actually deleted
+    #
+    try:
+        entry = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, 'objectclass=top')
+        if entry[0].hasAttr('userpassword'):
+            log.fatal('test_password_delete_specific_password: Entry incorrectly still have the userpassword attribute')
+            assert False
+    except ldap.LDAPError, e:
+        log.fatal('test_password_delete_specific_password: Failed to search for user(%s), error: %s' %
+                  (USER_DN, e.message('desc')))
+        assert False
+
+    #
+    # Cleanup
+    #
+    try:
+        topology.standalone.delete_s(USER_DN)
+    except ldap.LDAPError, e:
+        log.fatal('test_password_delete_specific_password: Failed to delete user(%s), error: %s' %
+                  (USER_DN, e.message('desc')))
+        assert False
+
+    log.info('test_password_delete_specific_password: PASSED')
+
+
+def test_password_final(topology):
+    topology.standalone.delete()
+    log.info('Password test suite PASSED')
+
+
+def run_isolated():
+    global installation1_prefix
+    installation1_prefix = None
+
+    topo = topology(True)
+    test_password_init(topo)
+    test_password_delete_specific_password(topo)
+    test_password_final(topo)
+
+
+if __name__ == '__main__':
+    run_isolated()
+
diff --git a/dirsrvtests/suites/password/pwdAdmin_test.py b/dirsrvtests/suites/password/pwdAdmin_test.py
new file mode 100644
index 0000000..cc67077
--- /dev/null
+++ b/dirsrvtests/suites/password/pwdAdmin_test.py
@@ -0,0 +1,439 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+installation1_prefix = None
+CONFIG_DN = 'cn=config'
+ADMIN_NAME = 'passwd_admin'
+ADMIN_DN = 'cn=%s,%s' % (ADMIN_NAME, SUFFIX)
+ADMIN2_NAME = 'passwd_admin2'
+ADMIN2_DN = 'cn=%s,%s' % (ADMIN2_NAME, SUFFIX)
+ADMIN_PWD = 'adminPassword_1'
+ADMIN_GROUP_DN = 'cn=password admin group,%s' % (SUFFIX)
+ENTRY_NAME = 'Joe Schmo'
+ENTRY_DN = 'cn=%s,%s' % (ENTRY_NAME, SUFFIX)
+INVALID_PWDS = ('2_Short', 'No_Number', 'N0Special', '{SSHA}bBy8UdtPZwu8uZna9QOYG3Pr41RpIRVDl8wddw==')
+
+
+class TopologyStandalone(object):
+    def __init__(self, standalone):
+        standalone.open()
+        self.standalone = standalone
+
+
+ at pytest.fixture(scope="module")
+def topology(request):
+    global installation1_prefix
+    if installation1_prefix:
+        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+
+    # Creating standalone instance ...
+    standalone = DirSrv(verbose=False)
+    args_instance[SER_HOST] = HOST_STANDALONE
+    args_instance[SER_PORT] = PORT_STANDALONE
+    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+    args_standalone = args_instance.copy()
+    standalone.allocate(args_standalone)
+    instance_standalone = standalone.exists()
+    if instance_standalone:
+        standalone.delete()
+    standalone.create()
+    standalone.open()
+
+    # Clear out the tmp dir
+    standalone.clearTmpDir(__file__)
+
+    return TopologyStandalone(standalone)
+
+
+def test_pwdAdmin_init(topology):
+    '''
+    Create our future Password Admin entry, set the password policy, and test
+    that its working
+    '''
+
+    log.info('test_pwdAdmin_init: Creating Password Administator entries...')
+
+    # Add Password Admin 1
+    try:
+        topology.standalone.add_s(Entry((ADMIN_DN, {'objectclass': "top extensibleObject".split(),
+                                 'cn': ADMIN_NAME,
+                                 'userpassword': ADMIN_PWD})))
+    except ldap.LDAPError, e:
+        log.fatal('test_pwdAdmin_init: Failed to add test user' + ADMIN_DN + ': error ' + e.message['desc'])
+        assert False
+
+    # Add Password Admin 2
+    try:
+        topology.standalone.add_s(Entry((ADMIN2_DN, {'objectclass': "top extensibleObject".split(),
+                                      'cn': ADMIN2_NAME,
+                                      'userpassword': ADMIN_PWD})))
+    except ldap.LDAPError, e:
+        log.fatal('test_pwdAdmin_init: Failed to add test user ' + ADMIN2_DN + ': error ' + e.message['desc'])
+        assert False
+
+    # Add Password Admin Group
+    try:
+        topology.standalone.add_s(Entry((ADMIN_GROUP_DN, {'objectclass': "top groupOfUNiqueNames".split(),
+                                      'cn': 'password admin group',
+                                      'uniquemember': ADMIN_DN,
+                                      'uniquemember': ADMIN2_DN})))
+    except ldap.LDAPError, e:
+        log.fatal('test_pwdAdmin_init:  Failed to add group' + ADMIN_GROUP_DN + ': error ' + e.message['desc'])
+        assert False
+
+    # Configure password policy
+    log.info('test_pwdAdmin_init: Configuring password policy...')
+    try:
+        topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on'),
+                                                 (ldap.MOD_REPLACE, 'passwordCheckSyntax', 'on'),
+                                                 (ldap.MOD_REPLACE, 'passwordMinCategories', '1'),
+                                                 (ldap.MOD_REPLACE, 'passwordMinTokenLength', '1'),
+                                                 (ldap.MOD_REPLACE, 'passwordExp', 'on'),
+                                                 (ldap.MOD_REPLACE, 'passwordMinDigits', '1'),
+                                                 (ldap.MOD_REPLACE, 'passwordMinSpecials', '1')])
+    except ldap.LDAPError, e:
+        log.fatal('test_pwdAdmin_init: Failed configure password policy: ' +
+                  e.message['desc'])
+        assert False
+
+    #
+    # Add an aci to allow everyone all access (just makes things easier)
+    #
+    log.info('Add aci to allow password admin to add/update entries...')
+
+    ACI_TARGET       = "(target = \"ldap:///%s\")" % SUFFIX
+    ACI_TARGETATTR   = "(targetattr = *)"
+    ACI_ALLOW        = "(version 3.0; acl \"Password Admin Access\"; allow (all) "
+    ACI_SUBJECT      = "(userdn = \"ldap:///anyone\");)"
+    ACI_BODY         = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT
+    mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
+    try:
+        topology.standalone.modify_s(SUFFIX, mod)
+    except ldap.LDAPError, e:
+        log.fatal('test_pwdAdmin_init: Failed to add aci for password admin: ' +
+                  e.message['desc'])
+        assert False
+
+    #
+    # Bind as the future Password Admin
+    #
+    log.info('test_pwdAdmin_init: Bind as the Password Administator (before activating)...')
+    try:
+        topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
+    except ldap.LDAPError, e:
+        log.fatal('test_pwdAdmin_init: Failed to bind as the Password Admin: ' +
+                                      e.message['desc'])
+        assert False
+
+    #
+    # Setup our test entry, and test password policy is working
+    #
+    entry = Entry(ENTRY_DN)
+    entry.setValues('objectclass', 'top', 'person')
+    entry.setValues('sn', ENTRY_NAME)
+    entry.setValues('cn', ENTRY_NAME)
+
+    #
+    # Start by attempting to add an entry with an invalid password
+    #
+    log.info('test_pwdAdmin_init: Attempt to add entries with invalid passwords, these adds should fail...')
+    for passwd in INVALID_PWDS:
+        failed_as_expected = False
+        entry.setValues('userpassword', passwd)
+        log.info('test_pwdAdmin_init: Create a regular user entry %s with password (%s)...' %
+                 (ENTRY_DN, passwd))
+        try:
+            topology.standalone.add_s(entry)
+        except ldap.LDAPError, e:
+            # We failed as expected
+            failed_as_expected = True
+            log.info('test_pwdAdmin_init: Add failed as expected: password (%s) result (%s)'
+                    % (passwd, e.message['desc']))
+
+        if not failed_as_expected:
+            log.fatal('test_pwdAdmin_init: We were incorrectly able to add an entry ' +
+                      'with an invalid password (%s)' % (passwd))
+            assert False
+
+
+def test_pwdAdmin(topology):
+    '''
+        Test that password administrators/root DN can
+        bypass password syntax/policy.
+
+        We need to test how passwords are modified in
+        existing entries, and when adding new entries.
+
+        Create the Password Admin entry, but do not set
+        it as an admin yet.  Use the entry to verify invalid
+        passwords are caught.  Then activate the password
+        admin and make sure it can bypass password policy.
+    '''
+
+    #
+    # Now activate a password administator, bind as root dn to do the config
+    # update, then rebind as the password admin
+    #
+    log.info('test_pwdAdmin: Activate the Password Administator...')
+
+    #
+    # Setup our test entry, and test password policy is working
+    #
+    entry = Entry(ENTRY_DN)
+    entry.setValues('objectclass', 'top', 'person')
+    entry.setValues('sn', ENTRY_NAME)
+    entry.setValues('cn', ENTRY_NAME)
+
+    # Bind as Root DN
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_pwdAdmin: Root DN failed to authenticate: ' +
+                  e.message['desc'])
+        assert False
+
+    # Set the password admin
+    try:
+        topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_DN)])
+    except ldap.LDAPError, e:
+        log.fatal('test_pwdAdmin: Failed to add password admin to config: ' +
+                  e.message['desc'])
+        assert False
+
+    # Bind as Password Admin
+    try:
+        topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
+    except ldap.LDAPError, e:
+        log.fatal('test_pwdAdmin: Failed to bind as the Password Admin: ' +
+                  e.message['desc'])
+        assert False
+
+    #
+    # Start adding entries with invalid passwords, delete the entry after each pass.
+    #
+    for passwd in INVALID_PWDS:
+        entry.setValues('userpassword', passwd)
+        log.info('test_pwdAdmin: Create a regular user entry %s with password (%s)...' %
+                 (ENTRY_DN, passwd))
+        try:
+            topology.standalone.add_s(entry)
+        except ldap.LDAPError, e:
+            log.fatal('test_pwdAdmin: Failed to add entry with password (%s) result (%s)'
+                      % (passwd, e.message['desc']))
+            assert False
+
+        log.info('test_pwdAdmin: Successfully added entry (%s)' % ENTRY_DN)
+
+        # Delete entry for the next pass
+        try:
+            topology.standalone.delete_s(ENTRY_DN)
+        except ldap.LDAPError, e:
+            log.fatal('test_pwdAdmin: Failed to delete entry: %s' %
+                      (e.message['desc']))
+            assert False
+
+    #
+    # Add the entry for the next round of testing (modify password)
+    #
+    entry.setValues('userpassword', ADMIN_PWD)
+    try:
+        topology.standalone.add_s(entry)
+    except ldap.LDAPError, e:
+        log.fatal('test_pwdAdmin: Failed to add entry with valid password (%s) result (%s)' %
+                  (passwd, e.message['desc']))
+        assert False
+
+    #
+    # Deactivate the password admin and make sure invalid password updates fail
+    #
+    log.info('test_pwdAdmin: Deactivate Password Administator and ' +
+             'try invalid password updates...')
+
+    # Bind as root DN
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_pwdAdmin: Root DN failed to authenticate: ' +
+                  e.message['desc'])
+        assert False
+
+    # Remove password admin
+    try:
+        topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_DELETE, 'passwordAdminDN', None)])
+    except ldap.LDAPError, e:
+        log.fatal('test_pwdAdmin: Failed to remove password admin from config: ' +
+                  e.message['desc'])
+        assert False
+
+    # Bind as Password Admin (who is no longer an admin)
+    try:
+        topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
+    except ldap.LDAPError, e:
+        log.fatal('test_pwdAdmin: Failed to bind as the Password Admin: ' +
+                  e.message['desc'])
+        assert False
+
+    #
+    # Make invalid password updates that should fail
+    #
+    for passwd in INVALID_PWDS:
+        failed_as_expected = False
+        entry.setValues('userpassword', passwd)
+        try:
+            topology.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)])
+        except ldap.LDAPError, e:
+            # We failed as expected
+            failed_as_expected = True
+            log.info('test_pwdAdmin: Password update failed as expected: password (%s) result (%s)'
+                     % (passwd, e.message['desc']))
+
+        if not failed_as_expected:
+            log.fatal('test_pwdAdmin: We were incorrectly able to add an invalid password (%s)'
+                      % (passwd))
+            assert False
+
+    #
+    # Now activate a password administator
+    #
+    log.info('test_pwdAdmin: Activate Password Administator and try updates again...')
+
+    # Bind as root DN to make the update
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_pwdAdmin: Root DN failed to authenticate: ' + e.message['desc'])
+        assert False
+
+    # Update config - set the password admin
+    try:
+        topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_DN)])
+    except ldap.LDAPError, e:
+        log.fatal('test_pwdAdmin: Failed to add password admin to config: ' +
+                  e.message['desc'])
+        assert False
+
+    # Bind as Password Admin
+    try:
+        topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
+    except ldap.LDAPError, e:
+        log.fatal('test_pwdAdmin: Failed to bind as the Password Admin: ' +
+                  e.message['desc'])
+        assert False
+
+    #
+    # Make the same password updates, but this time they should succeed
+    #
+    for passwd in INVALID_PWDS:
+        try:
+            topology.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)])
+        except ldap.LDAPError, e:
+            log.fatal('test_pwdAdmin: Password update failed unexpectedly: password (%s) result (%s)'
+                    % (passwd, e.message['desc']))
+            assert False
+        log.info('test_pwdAdmin: Password update succeeded (%s)' % passwd)
+
+    #
+    # Test Password Admin Group
+    #
+    log.info('test_pwdAdmin: Testing password admin group...')
+
+    # Bind as root DN to make the update
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_pwdAdmin: Root DN failed to authenticate: ' + e.message['desc'])
+        assert False
+
+    # Update config - set the password admin group
+    try:
+        topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_GROUP_DN)])
+    except ldap.LDAPError, e:
+        log.fatal('test_pwdAdmin: Failed to add password admin to config: ' +
+                  e.message['desc'])
+        assert False
+
+    # Bind as admin2
+    try:
+        topology.standalone.simple_bind_s(ADMIN2_DN, ADMIN_PWD)
+    except ldap.LDAPError, e:
+        log.fatal('test_pwdAdmin: Failed to bind as the Password Admin2: ' +
+                  e.message['desc'])
+        assert False
+
+    # Make some invalid password updates, but they should succeed
+    for passwd in INVALID_PWDS:
+        try:
+            topology.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)])
+        except ldap.LDAPError, e:
+            log.fatal('test_pwdAdmin: Password update failed unexpectedly: password (%s) result (%s)'
+                    % (passwd, e.message['desc']))
+            assert False
+        log.info('test_pwdAdmin: Password update succeeded (%s)' % passwd)
+
+    # Cleanup - bind as Root DN for the other tests
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_pwdAdmin: Root DN failed to authenticate: ' + e.message['desc'])
+        assert False
+
+
+def test_pwdAdmin_config_validation(topology):
+    '''
+    Test config validation:
+
+    - Test adding multiple passwordAdminDN attributes
+    - Test adding invalid values(non-DN's)
+    '''
+    # Add mulitple attributes - one already eists so just try and add as second one
+    try:
+        topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_ADD, 'passwordAdminDN', ENTRY_DN)])
+        log.fatal('test_pwdAdmin_config_validation: Incorrectly was able to add two config attributes')
+        assert False
+    except ldap.LDAPError, e:
+        log.info('test_pwdAdmin_config_validation: Failed as expected: ' +
+                 e.message['desc'])
+
+    # Attempt to set invalid DN
+    try:
+        topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_ADD, 'passwordAdminDN', 'ZZZZZ')])
+        log.fatal('test_pwdAdmin_config_validation: Incorrectly was able to add invalid DN')
+        assert False
+    except ldap.LDAPError, e:
+        log.info('test_pwdAdmin_config_validation: Failed as expected: ' +
+                 e.message['desc'])
+
+
+def test_pwdAdmin_final(topology):
+    topology.standalone.delete()
+    log.info('pwdAdmin test suite PASSED')
+
+
+def run_isolated():
+    global installation1_prefix
+    installation1_prefix = None
+
+    topo = topology(True)
+    test_pwdAdmin_init(topo)
+    test_pwdAdmin(topo)
+    test_pwdAdmin_config_validation(topo)
+    test_pwdAdmin_final(topo)
+
+
+if __name__ == '__main__':
+    run_isolated()
+
diff --git a/dirsrvtests/suites/password/pwdPolicy_test.py b/dirsrvtests/suites/password/pwdPolicy_test.py
new file mode 100644
index 0000000..a4e6c7e
--- /dev/null
+++ b/dirsrvtests/suites/password/pwdPolicy_test.py
@@ -0,0 +1,74 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+installation1_prefix = None
+
+
+class TopologyStandalone(object):
+    def __init__(self, standalone):
+        standalone.open()
+        self.standalone = standalone
+
+
+ at pytest.fixture(scope="module")
+def topology(request):
+    global installation1_prefix
+    if installation1_prefix:
+        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+
+    # Creating standalone instance ...
+    standalone = DirSrv(verbose=False)
+    args_instance[SER_HOST] = HOST_STANDALONE
+    args_instance[SER_PORT] = PORT_STANDALONE
+    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+    args_standalone = args_instance.copy()
+    standalone.allocate(args_standalone)
+    instance_standalone = standalone.exists()
+    if instance_standalone:
+        standalone.delete()
+    standalone.create()
+    standalone.open()
+
+    # Clear out the tmp dir
+    standalone.clearTmpDir(__file__)
+
+    return TopologyStandalone(standalone)
+
+
+def test_pwdPolicy_init(topology):
+    '''
+    Init the test suite (if necessary)
+    '''
+    return
+
+
+def test_pwdPolicy_final(topology):
+    topology.standalone.delete()
+    log.info('Password Policy test suite PASSED')
+
+
+def run_isolated():
+    global installation1_prefix
+    installation1_prefix = None
+
+    topo = topology(True)
+    test_pwdPolicy_init(topo)
+    test_pwdPolicy_final(topo)
+
+
+if __name__ == '__main__':
+    run_isolated()
+
diff --git a/dirsrvtests/suites/replication/cleanallruv_test.py b/dirsrvtests/suites/replication/cleanallruv_test.py
new file mode 100644
index 0000000..df74062
--- /dev/null
+++ b/dirsrvtests/suites/replication/cleanallruv_test.py
@@ -0,0 +1,1486 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import pytest
+import threading
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+installation1_prefix = None
+
+
+class AddUsers(threading.Thread):
+    def __init__(self, inst, num_users):
+        threading.Thread.__init__(self)
+        self.daemon = True
+        self.inst = inst
+        self.num_users = num_users
+
+    def openConnection(self, inst):
+        # Open a new connection to our LDAP server
+        server = DirSrv(verbose=False)
+        args_instance[SER_HOST] = inst.host
+        args_instance[SER_PORT] = inst.port
+        args_instance[SER_SERVERID_PROP] = inst.serverid
+        args_standalone = args_instance.copy()
+        server.allocate(args_standalone)
+        server.open()
+        return server
+
+    def run(self):
+        # Start adding users
+        conn = self.openConnection(self.inst)
+        idx = 0
+
+        while idx < self.num_users:
+            USER_DN = 'uid=' + self.inst.serverid + '_' + str(idx) + ',' + DEFAULT_SUFFIX
+            try:
+                conn.add_s(Entry((USER_DN, {'objectclass': 'top extensibleObject'.split(),
+                           'uid': 'user' + str(idx)})))
+            except ldap.UNWILLING_TO_PERFORM:
+                # One of the masters was probably put into read only mode - just break out
+                break
+            except ldap.LDAPError, e:
+                log.error('AddUsers: failed to add (' + USER_DN + ') error: ' + e.message['desc'])
+                assert False
+            idx += 1
+
+        conn.close()
+
+
+class TopologyReplication(object):
+    def __init__(self, master1, master2, master3, master4, m1_m2_agmt, m1_m3_agmt, m1_m4_agmt):
+        master1.open()
+        self.master1 = master1
+        master2.open()
+        self.master2 = master2
+        master3.open()
+        self.master3 = master3
+        master4.open()
+        self.master4 = master4
+
+        # Store the agreement dn's for future initializations
+        self.m1_m2_agmt = m1_m2_agmt
+        self.m1_m3_agmt = m1_m3_agmt
+        self.m1_m4_agmt = m1_m4_agmt
+
+
+ at pytest.fixture(scope="module")
+def topology(request):
+    global installation1_prefix
+    if installation1_prefix:
+        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+
+    # Creating master 1...
+    master1 = DirSrv(verbose=False)
+    args_instance[SER_HOST] = HOST_MASTER_1
+    args_instance[SER_PORT] = PORT_MASTER_1
+    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
+    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+    args_master = args_instance.copy()
+    master1.allocate(args_master)
+    instance_master1 = master1.exists()
+    if instance_master1:
+        master1.delete()
+    master1.create()
+    master1.open()
+    master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
+    master1.log = log
+
+    # Creating master 2...
+    master2 = DirSrv(verbose=False)
+    args_instance[SER_HOST] = HOST_MASTER_2
+    args_instance[SER_PORT] = PORT_MASTER_2
+    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
+    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+    args_master = args_instance.copy()
+    master2.allocate(args_master)
+    instance_master2 = master2.exists()
+    if instance_master2:
+        master2.delete()
+    master2.create()
+    master2.open()
+    master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
+
+    # Creating master 3...
+    master3 = DirSrv(verbose=False)
+    args_instance[SER_HOST] = HOST_MASTER_3
+    args_instance[SER_PORT] = PORT_MASTER_3
+    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3
+    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+    args_master = args_instance.copy()
+    master3.allocate(args_master)
+    instance_master3 = master3.exists()
+    if instance_master3:
+        master3.delete()
+    master3.create()
+    master3.open()
+    master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_3)
+
+    # Creating master 4...
+    master4 = DirSrv(verbose=False)
+    args_instance[SER_HOST] = HOST_MASTER_4
+    args_instance[SER_PORT] = PORT_MASTER_4
+    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_4
+    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+    args_master = args_instance.copy()
+    master4.allocate(args_master)
+    instance_master4 = master4.exists()
+    if instance_master4:
+        master4.delete()
+    master4.create()
+    master4.open()
+    master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_4)
+
+    #
+    # Create all the agreements
+    #
+    # Creating agreement from master 1 to master 2
+    properties = {RA_NAME:      r'meTo_$host:$port',
+                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
+                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
+                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
+                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+    m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
+    if not m1_m2_agmt:
+        log.fatal("Fail to create a master -> master replica agreement")
+        sys.exit(1)
+    log.debug("%s created" % m1_m2_agmt)
+
+    # Creating agreement from master 1 to master 3
+    properties = {RA_NAME:      r'meTo_$host:$port',
+                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
+                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
+                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
+                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+    m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host, port=master3.port, properties=properties)
+    if not m1_m3_agmt:
+        log.fatal("Fail to create a master -> master replica agreement")
+        sys.exit(1)
+    log.debug("%s created" % m1_m3_agmt)
+
+    # Creating agreement from master 1 to master 4
+    properties = {RA_NAME:      r'meTo_$host:$port',
+                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
+                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
+                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
+                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+    m1_m4_agmt = master1.agreement.create(suffix=SUFFIX, host=master4.host, port=master4.port, properties=properties)
+    if not m1_m4_agmt:
+        log.fatal("Fail to create a master -> master replica agreement")
+        sys.exit(1)
+    log.debug("%s created" % m1_m4_agmt)
+
+    # Creating agreement from master 2 to master 1
+    properties = {RA_NAME:      r'meTo_$host:$port',
+                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
+                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
+                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
+                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+    m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
+    if not m2_m1_agmt:
+        log.fatal("Fail to create a master -> master replica agreement")
+        sys.exit(1)
+    log.debug("%s created" % m2_m1_agmt)
+
+    # Creating agreement from master 2 to master 3
+    properties = {RA_NAME:      r'meTo_$host:$port',
+                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
+                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
+                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
+                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+    m2_m3_agmt = master2.agreement.create(suffix=SUFFIX, host=master3.host, port=master3.port, properties=properties)
+    if not m2_m3_agmt:
+        log.fatal("Fail to create a master -> master replica agreement")
+        sys.exit(1)
+    log.debug("%s created" % m2_m3_agmt)
+
+    # Creating agreement from master 2 to master 4
+    properties = {RA_NAME:      r'meTo_$host:$port',
+                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
+                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
+                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
+                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+    m2_m4_agmt = master2.agreement.create(suffix=SUFFIX, host=master4.host, port=master4.port, properties=properties)
+    if not m2_m4_agmt:
+        log.fatal("Fail to create a master -> master replica agreement")
+        sys.exit(1)
+    log.debug("%s created" % m2_m4_agmt)
+
+    # Creating agreement from master 3 to master 1
+    properties = {RA_NAME:      r'meTo_$host:$port',
+                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
+                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
+                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
+                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+    m3_m1_agmt = master3.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
+    if not m3_m1_agmt:
+        log.fatal("Fail to create a master -> master replica agreement")
+        sys.exit(1)
+    log.debug("%s created" % m3_m1_agmt)
+
+    # Creating agreement from master 3 to master 2
+    properties = {RA_NAME:      r'meTo_$host:$port',
+                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
+                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
+                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
+                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+    m3_m2_agmt = master3.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
+    if not m3_m2_agmt:
+        log.fatal("Fail to create a master -> master replica agreement")
+        sys.exit(1)
+    log.debug("%s created" % m3_m2_agmt)
+
+    # Creating agreement from master 3 to master 4
+    properties = {RA_NAME:      r'meTo_$host:$port',
+                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
+                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
+                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
+                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+    m3_m4_agmt = master3.agreement.create(suffix=SUFFIX, host=master4.host, port=master4.port, properties=properties)
+    if not m3_m4_agmt:
+        log.fatal("Fail to create a master -> master replica agreement")
+        sys.exit(1)
+    log.debug("%s created" % m3_m4_agmt)
+
+    # Creating agreement from master 4 to master 1
+    properties = {RA_NAME:      r'meTo_$host:$port',
+                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
+                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
+                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
+                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+    m4_m1_agmt = master4.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
+    if not m4_m1_agmt:
+        log.fatal("Fail to create a master -> master replica agreement")
+        sys.exit(1)
+    log.debug("%s created" % m4_m1_agmt)
+
+    # Creating agreement from master 4 to master 2
+    properties = {RA_NAME:      r'meTo_$host:$port',
+                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
+                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
+                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
+                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+    m4_m2_agmt = master4.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
+    if not m4_m2_agmt:
+        log.fatal("Fail to create a master -> master replica agreement")
+        sys.exit(1)
+    log.debug("%s created" % m4_m2_agmt)
+
+    # Creating agreement from master 4 to master 3
+    properties = {RA_NAME:      r'meTo_$host:$port',
+                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
+                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
+                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
+                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+    m4_m3_agmt = master4.agreement.create(suffix=SUFFIX, host=master3.host, port=master3.port, properties=properties)
+    if not m4_m3_agmt:
+        log.fatal("Fail to create a master -> master replica agreement")
+        sys.exit(1)
+    log.debug("%s created" % m4_m3_agmt)
+
+    # Allow the replicas to get situated with the new agreements
+    time.sleep(5)
+
+    #
+    # Initialize all the agreements
+    #
+    master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
+    master1.waitForReplInit(m1_m2_agmt)
+    master1.agreement.init(SUFFIX, HOST_MASTER_3, PORT_MASTER_3)
+    master1.waitForReplInit(m1_m3_agmt)
+    master1.agreement.init(SUFFIX, HOST_MASTER_4, PORT_MASTER_4)
+    master1.waitForReplInit(m1_m4_agmt)
+
+    # Check replication is working...
+    if master1.testReplication(DEFAULT_SUFFIX, master2):
+        log.info('Replication is working.')
+    else:
+        log.fatal('Replication is not working.')
+        assert False
+
+    # Clear out the tmp dir
+    master1.clearTmpDir(__file__)
+
+    return TopologyReplication(master1, master2, master3, master4, m1_m2_agmt, m1_m3_agmt, m1_m4_agmt)
+
+
+def restore_master4(topology):
+    '''
+    In our tests will always be removing master 4, so we need a common
+    way to restore it for another test
+    '''
+
+    log.info('Restoring master 4...')
+
+    # Enable replication on master 4
+    topology.master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_4)
+
+    #
+    # Create agreements from master 4 -> m1, m2 ,m3
+    #
+    # Creating agreement from master 4 to master 1
+    properties = {RA_NAME:      r'meTo_$host:$port',
+                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
+                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
+                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
+                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+    m4_m1_agmt = topology.master4.agreement.create(suffix=SUFFIX, host=topology.master1.host,
+                                                   port=topology.master1.port, properties=properties)
+    if not m4_m1_agmt:
+        log.fatal("Fail to create a master -> master replica agreement")
+        sys.exit(1)
+    log.debug("%s created" % m4_m1_agmt)
+
+    # Creating agreement from master 4 to master 2
+    properties = {RA_NAME:      r'meTo_$host:$port',
+                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
+                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
+                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
+                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+    m4_m2_agmt = topology.master4.agreement.create(suffix=SUFFIX, host=topology.master2.host,
+                                                   port=topology.master2.port, properties=properties)
+    if not m4_m2_agmt:
+        log.fatal("Fail to create a master -> master replica agreement")
+        sys.exit(1)
+    log.debug("%s created" % m4_m2_agmt)
+
+    # Creating agreement from master 4 to master 3
+    properties = {RA_NAME:      r'meTo_$host:$port',
+                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
+                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
+                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
+                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+    m4_m3_agmt = topology.master4.agreement.create(suffix=SUFFIX, host=topology.master3.host,
+                                                   port=topology.master3.port, properties=properties)
+    if not m4_m3_agmt:
+        log.fatal("Fail to create a master -> master replica agreement")
+        sys.exit(1)
+    log.debug("%s created" % m4_m3_agmt)
+
+    #
+    # Create agreements from m1, m2, m3 to master 4
+    #
+    # Creating agreement from master 1 to master 4
+    properties = {RA_NAME:      r'meTo_$host:$port',
+                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
+                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
+                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
+                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+    m1_m4_agmt = topology.master1.agreement.create(suffix=SUFFIX, host=topology.master4.host,
+                                                   port=topology.master4.port, properties=properties)
+    if not m1_m4_agmt:
+        log.fatal("Fail to create a master -> master replica agreement")
+        sys.exit(1)
+    log.debug("%s created" % m1_m4_agmt)
+
+    # Creating agreement from master 2 to master 4
+    properties = {RA_NAME:      r'meTo_$host:$port',
+                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
+                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
+                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
+                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+    m2_m4_agmt = topology.master2.agreement.create(suffix=SUFFIX, host=topology.master4.host,
+                                                   port=topology.master4.port, properties=properties)
+    if not m2_m4_agmt:
+        log.fatal("Fail to create a master -> master replica agreement")
+        sys.exit(1)
+    log.debug("%s created" % m2_m4_agmt)
+
+    # Creating agreement from master 3 to master 4
+    properties = {RA_NAME:      r'meTo_$host:$port',
+                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
+                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
+                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
+                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+    m3_m4_agmt = topology.master3.agreement.create(suffix=SUFFIX, host=topology.master4.host,
+                                                   port=topology.master4.port, properties=properties)
+    if not m3_m4_agmt:
+        log.fatal("Fail to create a master -> master replica agreement")
+        sys.exit(1)
+    log.debug("%s created" % m3_m4_agmt)
+
+    #
+    # Restart the other servers - this allows the rid(for master4) to be used again/valid
+    #
+    topology.master1.restart(timeout=30)
+    topology.master2.restart(timeout=30)
+    topology.master3.restart(timeout=30)
+    topology.master4.restart(timeout=30)
+
+    #
+    # Initialize the agreements
+    #
+    topology.master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
+    topology.master1.waitForReplInit(topology.m1_m2_agmt)
+    topology.master1.agreement.init(SUFFIX, HOST_MASTER_3, PORT_MASTER_3)
+    topology.master1.waitForReplInit(topology.m1_m3_agmt)
+    topology.master1.agreement.init(SUFFIX, HOST_MASTER_4, PORT_MASTER_4)
+    topology.master1.waitForReplInit(topology.m1_m4_agmt)
+
+    #
+    # Test Replication is working
+    #
+    # Check replication is working with previous working master(m1 -> m2)
+    if topology.master1.testReplication(DEFAULT_SUFFIX, topology.master2):
+        log.info('Replication is working m1 -> m2.')
+    else:
+        log.fatal('restore_master4: Replication is not working from m1 -> m2.')
+        assert False
+
+    # Check replication is working from master 1 to  master 4...
+    if topology.master1.testReplication(DEFAULT_SUFFIX, topology.master4):
+        log.info('Replication is working m1 -> m4.')
+    else:
+        log.fatal('restore_master4: Replication is not working from m1 -> m4.')
+        assert False
+
+    # Check replication is working from master 4 to master1...
+    if topology.master4.testReplication(DEFAULT_SUFFIX, topology.master1):
+        log.info('Replication is working m4 -> m1.')
+    else:
+        log.fatal('restore_master4: Replication is not working from m4 -> 1.')
+        assert False
+
+    log.info('Master 4 has been successfully restored.')
+
+
+def test_cleanallruv_init(topology):
+    '''
+    Make updates on each master to make sure we have the all master RUVs on
+    each master.
+    '''
+
+    log.info('Initializing cleanAllRUV test suite...')
+
+    # Master 1
+    if not topology.master1.testReplication(DEFAULT_SUFFIX, topology.master2):
+        log.fatal('test_cleanallruv_init: Replication is not working between master 1 and master 2.')
+        assert False
+
+    if not topology.master1.testReplication(DEFAULT_SUFFIX, topology.master3):
+        log.fatal('test_cleanallruv_init: Replication is not working between master 1 and master 3.')
+        assert False
+
+    if not topology.master1.testReplication(DEFAULT_SUFFIX, topology.master4):
+        log.fatal('test_cleanallruv_init: Replication is not working between master 1 and master 4.')
+        assert False
+
+    # Master 2
+    if not topology.master2.testReplication(DEFAULT_SUFFIX, topology.master1):
+        log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 1.')
+        assert False
+
+    if not topology.master2.testReplication(DEFAULT_SUFFIX, topology.master3):
+        log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 3.')
+        assert False
+
+    if not topology.master2.testReplication(DEFAULT_SUFFIX, topology.master4):
+        log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 4.')
+        assert False
+
+    # Master 3
+    if not topology.master3.testReplication(DEFAULT_SUFFIX, topology.master1):
+        log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 1.')
+        assert False
+
+    if not topology.master3.testReplication(DEFAULT_SUFFIX, topology.master2):
+        log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 2.')
+        assert False
+
+    if not topology.master3.testReplication(DEFAULT_SUFFIX, topology.master4):
+        log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 4.')
+        assert False
+
+    # Master 4
+    if not topology.master4.testReplication(DEFAULT_SUFFIX, topology.master1):
+        log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 1.')
+        assert False
+
+    if not topology.master4.testReplication(DEFAULT_SUFFIX, topology.master2):
+        log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 2.')
+        assert False
+
+    if not topology.master4.testReplication(DEFAULT_SUFFIX, topology.master3):
+        log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 3.')
+        assert False
+
+    log.info('Initialized cleanAllRUV test suite.')
+
+
+def test_cleanallruv_clean(topology):
+    '''
+    Disable a master, remove agreements to that master, and clean the RUVs on
+    the remaining replicas
+    '''
+
+    log.info('Running test_cleanallruv_clean...')
+
+    # Disable master 4
+    log.info('test_cleanallruv_clean: disable master 4...')
+    try:
+        topology.master4.replica.disableReplication(DEFAULT_SUFFIX)
+    except:
+        log.fatal('error!')
+        assert False
+
+    # Remove the agreements from the other masters that point to master 4
+    log.info('test_cleanallruv_clean: remove all the agreements to master 4...')
+    try:
+        topology.master1.agreement.delete(DEFAULT_SUFFIX, topology.master4)
+    except ldap.LDAPError, e:
+        log.fatal('test_cleanallruv_clean: Failed to delete agmt(m1 -> m4), error: ' +
+                  e.message['desc'])
+        assert False
+    try:
+        topology.master2.agreement.delete(DEFAULT_SUFFIX, topology.master4)
+    except ldap.LDAPError, e:
+        log.fatal('test_cleanallruv_clean: Failed to delete agmt(m2 -> m4), error: ' +
+                  e.message['desc'])
+        assert False
+    try:
+        topology.master3.agreement.delete(DEFAULT_SUFFIX, topology.master4)
+    except ldap.LDAPError, e:
+        log.fatal('test_cleanallruv_clean: Failed to delete agmt(m3 -> m4), error: ' +
+                  e.message['desc'])
+        assert False
+
+    # Run the task
+    log.info('test_cleanallruv_clean: run the cleanAllRUV task...')
+    try:
+        topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4',
+                                           args={TASK_WAIT: True})
+    except ValueError, e:
+        log.fatal('test_cleanallruv_clean: Problem running cleanAllRuv task: ' +
+                  e.message('desc'))
+        assert False
+
+    # Check the other master's RUV for 'replica 4'
+    log.info('test_cleanallruv_clean: check all the masters have been cleaned...')
+    clean = False
+    count = 0
+    while not clean and count < 5:
+        clean = True
+
+        # Check master 1
+        try:
+            entry = topology.master1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER)
+            if not entry:
+                log.error('test_cleanallruv_clean: Failed to find db tombstone entry from master')
+                repl_fail(replica_inst)
+            elements = entry[0].getValues('nsds50ruv')
+            for ruv in elements:
+                if 'replica 4' in ruv:
+                    # Not cleaned
+                    log.error('test_cleanallruv_clean: Master 1 not cleaned!')
+                    clean = False
+            if clean:
+                log.info('test_cleanallruv_clean: Master 1 is cleaned.')
+        except ldap.LDAPError, e:
+            log.fatal('test_cleanallruv_clean: Unable to search master 1 for db tombstone: ' + e.message['desc'])
+
+        # Check master 2
+        try:
+            entry = topology.master2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER)
+            if not entry:
+                log.error('test_cleanallruv_clean: Failed to find db tombstone entry from master')
+                repl_fail(replica_inst)
+            elements = entry[0].getValues('nsds50ruv')
+            for ruv in elements:
+                if 'replica 4' in ruv:
+                    # Not cleaned
+                    log.error('test_cleanallruv_clean: Master 2 not cleaned!')
+                    clean = False
+            if clean:
+                log.info('test_cleanallruv_clean: Master 2 is cleaned.')
+        except ldap.LDAPError, e:
+            log.fatal('Unable to search master 2 for db tombstone: ' + e.message['desc'])
+
+        # Check master 3
+        try:
+            entry = topology.master3.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER)
+            if not entry:
+                log.error('test_cleanallruv_clean: Failed to find db tombstone entry from master')
+                repl_fail(replica_inst)
+            elements = entry[0].getValues('nsds50ruv')
+            for ruv in elements:
+                if 'replica 4' in ruv:
+                    # Not cleaned
+                    log.error('test_cleanallruv_clean: Master 3 not cleaned!')
+                    clean = False
+            if clean:
+                log.info('test_cleanallruv_clean: Master 3 is cleaned.')
+        except ldap.LDAPError, e:
+            log.fatal('test_cleanallruv_clean: Unable to search master 3 for db tombstone: ' + e.message['desc'])
+
+        # Sleep a bit and give it chance to clean up...
+        time.sleep(5)
+        count += 1
+
+    if not clean:
+        log.fatal('test_cleanallruv_clean: Failed to clean replicas')
+        assert False
+
+    log.info('Allow cleanallruv threads to finish...')
+    time.sleep(30)
+
+    log.info('test_cleanallruv_clean PASSED, restoring master 4...')
+
+    #
+    # Cleanup - restore master 4
+    #
+    restore_master4(topology)
+
+
+def test_cleanallruv_clean_restart(topology):
+    '''
+    Test that if a master istopped during the clean process, that it
+    resumes and finishes when its started.
+    '''
+
+    log.info('Running test_cleanallruv_clean_restart...')
+
+    # Disable master 4
+    log.info('test_cleanallruv_clean_restart: disable master 4...')
+    try:
+        topology.master4.replica.disableReplication(DEFAULT_SUFFIX)
+    except:
+        log.fatal('error!')
+        assert False
+
+    # Remove the agreements from the other masters that point to master 4
+    log.info('test_cleanallruv_clean: remove all the agreements to master 4...')
+    try:
+        topology.master1.agreement.delete(DEFAULT_SUFFIX, topology.master4)
+    except ldap.LDAPError, e:
+        log.fatal('test_cleanallruv_clean_restart: Failed to delete agmt(m1 -> m4), error: ' +
+                  e.message['desc'])
+        assert False
+    try:
+        topology.master2.agreement.delete(DEFAULT_SUFFIX, topology.master4)
+    except ldap.LDAPError, e:
+        log.fatal('test_cleanallruv_clean_restart: Failed to delete agmt(m2 -> m4), error: ' +
+                  e.message['desc'])
+        assert False
+    try:
+        topology.master3.agreement.delete(DEFAULT_SUFFIX, topology.master4)
+    except ldap.LDAPError, e:
+        log.fatal('test_cleanallruv_clean_restart: Failed to delete agmt(m3 -> m4), error: ' +
+                  e.message['desc'])
+        assert False
+
+    # Stop master 3 to keep the task running, so we can stop master 1...
+    topology.master3.stop(timeout=30)
+
+    # Run the task
+    log.info('test_cleanallruv_clean_restart: run the cleanAllRUV task...')
+    try:
+        topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4',
+                                           args={TASK_WAIT: False})
+    except ValueError, e:
+        log.fatal('test_cleanallruv_clean_restart: Problem running cleanAllRuv task: ' +
+                  e.message('desc'))
+        assert False
+
+    # Sleep a bit, then stop master 1
+    time.sleep(3)
+    topology.master1.stop(timeout=30)
+
+    # Now start master 3 & 1, and make sure we didn't crash
+    topology.master3.start(timeout=30)
+    if topology.master3.detectDisorderlyShutdown():
+        log.fatal('test_cleanallruv_clean_restart: Master 3 previously crashed!')
+        assert False
+
+    topology.master1.start(timeout=30)
+    if topology.master1.detectDisorderlyShutdown():
+        log.fatal('test_cleanallruv_clean_restart: Master 1 previously crashed!')
+        assert False
+
+    # Wait a little for agmts/cleanallruv to wake up
+    time.sleep(5)
+
+    # Check the other master's RUV for 'replica 4'
+    log.info('test_cleanallruv_clean_restart: check all the masters have been cleaned...')
+    clean = False
+    count = 0
+    while not clean and count < 10:
+        clean = True
+
+        # Check master 1
+        try:
+            entry = topology.master1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER)
+            if not entry:
+                log.error('test_cleanallruv_clean_restart: Failed to find db tombstone entry from master')
+                repl_fail(replica_inst)
+            elements = entry[0].getValues('nsds50ruv')
+            for ruv in elements:
+                if 'replica 4' in ruv:
+                    # Not cleaned
+                    log.error('test_cleanallruv_clean_restart: Master 1 not cleaned!')
+                    clean = False
+            if clean:
+                log.info('test_cleanallruv_clean_restart: Master 1 is cleaned.')
+        except ldap.LDAPError, e:
+            log.fatal('test_cleanallruv_clean_restart: Unable to search master 1 for db tombstone: ' +
+                      e.message['desc'])
+
+        # Check master 2
+        try:
+            entry = topology.master2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER)
+            if not entry:
+                log.error('test_cleanallruv_clean_restart: Failed to find db tombstone entry from master')
+                repl_fail(replica_inst)
+            elements = entry[0].getValues('nsds50ruv')
+            for ruv in elements:
+                if 'replica 4' in ruv:
+                    # Not cleaned
+                    log.error('test_cleanallruv_clean_restart: Master 2 not cleaned!')
+                    clean = False
+            if clean:
+                log.info('test_cleanallruv_clean_restart: Master 2 is cleaned.')
+        except ldap.LDAPError, e:
+            log.fatal('test_cleanallruv_clean_restart: Unable to search master 2 for db tombstone: ' +
+                      e.message['desc'])
+
+        # Check master 3
+        try:
+            entry = topology.master3.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER)
+            if not entry:
+                log.error('test_cleanallruv_clean_restart: Failed to find db tombstone entry from master')
+                repl_fail(replica_inst)
+            elements = entry[0].getValues('nsds50ruv')
+            for ruv in elements:
+                if 'replica 4' in ruv:
+                    # Not cleaned
+                    log.error('test_cleanallruv_clean_restart: Master 3 not cleaned!')
+                    clean = False
+            if clean:
+                log.info('test_cleanallruv_clean_restart: Master 3 is cleaned.')
+        except ldap.LDAPError, e:
+            log.fatal('test_cleanallruv_clean_restart: Unable to search master 3 for db tombstone: ' +
+                      e.message['desc'])
+
+        # Sleep a bit and give it chance to clean up...
+        time.sleep(5)
+        count += 1
+
+    if not clean:
+        log.fatal('Failed to clean replicas')
+        assert False
+
+    log.info('Allow cleanallruv threads to finish...')
+    time.sleep(30)
+
+    log.info('test_cleanallruv_clean_restart PASSED, restoring master 4...')
+
+    #
+    # Cleanup - restore master 4
+    #
+    restore_master4(topology)
+
+
+def test_cleanallruv_clean_force(topology):
+    '''
+    Disable a master, remove agreements to that master, and clean the RUVs on
+    the remaining replicas
+    '''
+
+    log.info('Running test_cleanallruv_clean_force...')
+
+    # Stop master 3, while we update master 4, so that 3 is behind the other masters
+    topology.master3.stop(timeout=10)
+
+    # Add a bunch of updates to master 4
+    m4_add_users = AddUsers(topology.master4, 1500)
+    m4_add_users.start()
+    m4_add_users.join()
+
+    # Disable master 4
+    log.info('test_cleanallruv_clean_force: disable master 4...')
+    try:
+        topology.master4.replica.disableReplication(DEFAULT_SUFFIX)
+    except:
+        log.fatal('error!')
+        assert False
+
+    # Start master 3, it should be out of sync with the other replicas...
+    topology.master3.start(timeout=10)
+
+    # Remove the agreements from the other masters that point to master 4
+    log.info('test_cleanallruv_clean_force: remove all the agreements to master 4...')
+    try:
+        topology.master1.agreement.delete(DEFAULT_SUFFIX, topology.master4)
+    except ldap.LDAPError, e:
+        log.fatal('test_cleanallruv_clean_force: Failed to delete agmt(m1 -> m4), error: ' +
+                  e.message['desc'])
+        assert False
+    try:
+        topology.master2.agreement.delete(DEFAULT_SUFFIX, topology.master4)
+    except ldap.LDAPError, e:
+        log.fatal('test_cleanallruv_clean_force: Failed to delete agmt(m2 -> m4), error: ' +
+                  e.message['desc'])
+        assert False
+    try:
+        topology.master3.agreement.delete(DEFAULT_SUFFIX, topology.master4)
+    except ldap.LDAPError, e:
+        log.fatal('test_cleanallruv_clean_force: Failed to delete agmt(m3 -> m4), error: ' +
+                  e.message['desc'])
+        assert False
+
+    # Run the task, use "force" because master 3 is not in sync with the other replicas
+    # in regards to the replica 4 RUV
+    log.info('test_cleanallruv_clean_force: run the cleanAllRUV task...')
+    try:
+        topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4',
+                                           force=True, args={TASK_WAIT: True})
+    except ValueError, e:
+        log.fatal('test_cleanallruv_clean_force: Problem running cleanAllRuv task: ' +
+                  e.message('desc'))
+        assert False
+
+    # Check the other master's RUV for 'replica 4'
+    log.info('test_cleanallruv_clean_force: check all the masters have been cleaned...')
+    clean = False
+    count = 0
+    while not clean and count < 5:
+        clean = True
+
+        # Check master 1
+        try:
+            entry = topology.master1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER)
+            if not entry:
+                log.error('test_cleanallruv_clean_force: Failed to find db tombstone entry from master')
+                repl_fail(replica_inst)
+            elements = entry[0].getValues('nsds50ruv')
+            for ruv in elements:
+                if 'replica 4' in ruv:
+                    # Not cleaned
+                    log.error('test_cleanallruv_clean_force: Master 1 not cleaned!')
+                    clean = False
+            if clean:
+                log.info('test_cleanallruv_clean_force: Master 1 is cleaned.')
+        except ldap.LDAPError, e:
+            log.fatal('test_cleanallruv_clean_force: Unable to search master 1 for db tombstone: ' +
+                      e.message['desc'])
+
+        # Check master 2
+        try:
+            entry = topology.master2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER)
+            if not entry:
+                log.error('test_cleanallruv_clean_force: Failed to find db tombstone entry from master')
+                repl_fail(replica_inst)
+            elements = entry[0].getValues('nsds50ruv')
+            for ruv in elements:
+                if 'replica 4' in ruv:
+                    # Not cleaned
+                    log.error('test_cleanallruv_clean_force: Master 1 not cleaned!')
+                    clean = False
+            if clean:
+                log.info('Master 2 is cleaned.')
+        except ldap.LDAPError, e:
+            log.fatal('test_cleanallruv_clean_force: Unable to search master 2 for db tombstone: ' +
+                      e.message['desc'])
+
+        # Check master 3
+        try:
+            entry = topology.master3.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER)
+            if not entry:
+                log.error('test_cleanallruv_clean_force: Failed to find db tombstone entry from master')
+                repl_fail(replica_inst)
+            elements = entry[0].getValues('nsds50ruv')
+            for ruv in elements:
+                if 'replica 4' in ruv:
+                    # Not cleaned
+                    log.error('test_cleanallruv_clean_force: Master 3 not cleaned!')
+                    clean = False
+            if clean:
+                log.info('test_cleanallruv_clean_force: Master 3 is cleaned.')
+        except ldap.LDAPError, e:
+            log.fatal('test_cleanallruv_clean_force: Unable to search master 3 for db tombstone: ' +
+                      e.message['desc'])
+
+        # Sleep a bit and give it chance to clean up...
+        time.sleep(5)
+        count += 1
+
+    if not clean:
+        log.fatal('test_cleanallruv_clean_force: Failed to clean replicas')
+        assert False
+
+    log.info('test_cleanallruv_clean_force: Allow cleanallruv threads to finish')
+    time.sleep(30)
+
+    log.info('test_cleanallruv_clean_force PASSED, restoring master 4...')
+
+    #
+    # Cleanup - restore master 4
+    #
+    restore_master4(topology)
+
+
+def test_cleanallruv_abort(topology):
+    '''
+    Test the abort task.
+
+    DIsable master 4
+    Stop master 2 so that it can not be cleaned
+    Run the clean task
+    Wait a bit
+    Abort the task
+    Verify task is aborted
+    '''
+
+    log.info('Running test_cleanallruv_abort...')
+
+    # Disable master 4
+    log.info('test_cleanallruv_abort: disable replication on master 4...')
+    try:
+        topology.master4.replica.disableReplication(DEFAULT_SUFFIX)
+    except:
+        log.fatal('test_cleanallruv_abort: failed to disable replication')
+        assert False
+
+    # Remove the agreements from the other masters that point to master 4
+    log.info('test_cleanallruv_abort: remove all the agreements to master 4...)')
+    try:
+        topology.master1.agreement.delete(DEFAULT_SUFFIX, topology.master4)
+    except ldap.LDAPError, e:
+        log.fatal('test_cleanallruv_abort: Failed to delete agmt(m1 -> m4), error: ' +
+                  e.message['desc'])
+        assert False
+    try:
+        topology.master2.agreement.delete(DEFAULT_SUFFIX, topology.master4)
+    except ldap.LDAPError, e:
+        log.fatal('test_cleanallruv_abort: Failed to delete agmt(m2 -> m4), error: ' +
+                  e.message['desc'])
+        assert False
+    try:
+        topology.master3.agreement.delete(DEFAULT_SUFFIX, topology.master4)
+    except ldap.LDAPError, e:
+        log.fatal('test_cleanallruv_abort: Failed to delete agmt(m3 -> m4), error: ' +
+                  e.message['desc'])
+        assert False
+
+    # Stop master 2
+    log.info('test_cleanallruv_abort: stop master 2 to freeze the cleanAllRUV task...')
+    topology.master2.stop(timeout=10)
+
+    # Run the task
+    log.info('test_cleanallruv_abort: add the cleanAllRUV task...')
+    try:
+        (clean_task_dn, rc) = topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
+                                  replicaid='4', args={TASK_WAIT: False})
+    except ValueError, e:
+        log.fatal('test_cleanallruv_abort: Problem running cleanAllRuv task: ' +
+                  e.message('desc'))
+        assert False
+
+    # Wait a bit
+    time.sleep(10)
+
+    # Abort the task
+    log.info('test_cleanallruv_abort: abort the cleanAllRUV task...')
+    try:
+        topology.master1.tasks.abortCleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4',
+                                                args={TASK_WAIT: True})
+    except ValueError, e:
+        log.fatal('test_cleanallruv_abort: Problem running abortCleanAllRuv task: ' +
+                  e.message('desc'))
+        assert False
+
+    # Check master 1 does not have the clean task running
+    log.info('test_cleanallruv_abort: check master 1 no longer has a cleanAllRUV task...')
+    attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode',
+                'nsTaskCurrentItem', 'nsTaskTotalItems']
+    done = False
+    count = 0
+    while not done and count < 5:
+        entry = topology.master1.getEntry(clean_task_dn, attrlist=attrlist)
+        if not entry or entry.nsTaskExitCode:
+            done = True
+            break
+        time.sleep(1)
+        count += 1
+    if not done:
+        log.fatal('test_cleanallruv_abort: CleanAllRUV task was not aborted')
+        assert False
+
+    # Start master 2
+    log.info('test_cleanallruv_abort: start master 2 to begin the restore process...')
+    topology.master2.start(timeout=10)
+
+    #
+    # Now run the clean task task again to we can properly restore master 4
+    #
+    log.info('test_cleanallruv_abort: run cleanAllRUV task so we can properly restore master 4...')
+    try:
+        topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
+                                  replicaid='4', args={TASK_WAIT: True})
+    except ValueError, e:
+        log.fatal('test_cleanallruv_abort: Problem running cleanAllRuv task: ' + e.message('desc'))
+        assert False
+
+    log.info('test_cleanallruv_abort PASSED, restoring master 4...')
+
+    #
+    # Cleanup - Restore master 4
+    #
+    restore_master4(topology)
+
+
+def test_cleanallruv_abort_restart(topology):
+    '''
+    Test the abort task can handle a restart, and then resume
+    '''
+
+    log.info('Running test_cleanallruv_abort_restart...')
+
+    # Disable master 4
+    log.info('test_cleanallruv_abort_restart: disable replication on master 4...')
+    try:
+        topology.master4.replica.disableReplication(DEFAULT_SUFFIX)
+    except:
+        log.fatal('error!')
+        assert False
+
+    # Remove the agreements from the other masters that point to master 4
+    log.info('test_cleanallruv_abort_restart: remove all the agreements to master 4...)')
+    try:
+        topology.master1.agreement.delete(DEFAULT_SUFFIX, topology.master4)
+    except ldap.LDAPError, e:
+        log.fatal('test_cleanallruv_abort_restart: Failed to delete agmt(m1 -> m4), error: ' +
+                  e.message['desc'])
+        assert False
+    try:
+        topology.master2.agreement.delete(DEFAULT_SUFFIX, topology.master4)
+    except ldap.LDAPError, e:
+        log.fatal('test_cleanallruv_abort_restart: Failed to delete agmt(m2 -> m4), error: ' +
+                  e.message['desc'])
+        assert False
+    try:
+        topology.master3.agreement.delete(DEFAULT_SUFFIX, topology.master4)
+    except ldap.LDAPError, e:
+        log.fatal('test_cleanallruv_abort_restart: Failed to delete agmt(m3 -> m4), error: ' +
+                  e.message['desc'])
+        assert False
+
+    # Stop master 3
+    log.info('test_cleanallruv_abort_restart: stop master 3 to freeze the cleanAllRUV task...')
+    topology.master3.stop(timeout=10)
+
+    # Run the task
+    log.info('test_cleanallruv_abort_restart: add the cleanAllRUV task...')
+    try:
+        (clean_task_dn, rc) = topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
+                                  replicaid='4', args={TASK_WAIT: False})
+    except ValueError, e:
+        log.fatal('test_cleanallruv_abort_restart: Problem running cleanAllRuv task: ' +
+                  e.message('desc'))
+        assert False
+
+    # Wait a bit
+    time.sleep(5)
+
+    # Abort the task
+    log.info('test_cleanallruv_abort_restart: abort the cleanAllRUV task...')
+    try:
+        topology.master1.tasks.abortCleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4',
+                                                certify=True, args={TASK_WAIT: False})
+    except ValueError, e:
+        log.fatal('test_cleanallruv_abort_restart: Problem running test_cleanallruv_abort_restart task: ' +
+                  e.message('desc'))
+        assert False
+
+    # Allow task to run for a bit:
+    time.sleep(5)
+
+    # Check master 1 does not have the clean task running
+    log.info('test_cleanallruv_abort: check master 1 no longer has a cleanAllRUV task...')
+    attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode',
+                'nsTaskCurrentItem', 'nsTaskTotalItems']
+    done = False
+    count = 0
+    while not done and count < 10:
+        entry = topology.master1.getEntry(clean_task_dn, attrlist=attrlist)
+        if not entry or entry.nsTaskExitCode:
+            done = True
+            break
+        time.sleep(1)
+        count += 1
+    if not done:
+        log.fatal('test_cleanallruv_abort_restart: CleanAllRUV task was not aborted')
+        assert False
+
+    # Now restart master 1, and make sure the abort process completes
+    topology.master1.restart(timeout=30)
+    if topology.master1.detectDisorderlyShutdown():
+        log.fatal('test_cleanallruv_abort_restart: Master 1 previously crashed!')
+        assert False
+
+    # Start master 3
+    topology.master3.start(timeout=10)
+
+    # Check master 1 tried to run abort task.  We expect the abort task to be aborted.
+    if not topology.master1.searchErrorsLog('Aborting abort task'):
+        log.fatal('test_cleanallruv_abort_restart: Abort task did not restart')
+        assert False
+
+    #
+    # Now run the clean task task again to we can properly restore master 4
+    #
+    log.info('test_cleanallruv_abort_restart: run cleanAllRUV task so we can properly restore master 4...')
+    try:
+        topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
+                                  replicaid='4', args={TASK_WAIT: True})
+    except ValueError, e:
+        log.fatal('test_cleanallruv_abort_restart: Problem running cleanAllRuv task: ' +
+                  e.message('desc'))
+        assert False
+
+    log.info('test_cleanallruv_abort_restart PASSED, restoring master 4...')
+
+    #
+    # Cleanup - Restore master 4
+    #
+    restore_master4(topology)
+
+
+def test_cleanallruv_abort_certify(topology):
+    '''
+    Test the abort task.
+
+    Disable master 4
+    Stop master 2 so that it can not be cleaned
+    Run the clean task
+    Wait a bit
+    Abort the task
+    Verify task is aborted
+    '''
+
+    log.info('Running test_cleanallruv_abort_certify...')
+
+    # Disable master 4
+    log.info('test_cleanallruv_abort_certify: disable replication on master 4...')
+    try:
+        topology.master4.replica.disableReplication(DEFAULT_SUFFIX)
+    except:
+        log.fatal('error!')
+        assert False
+
+    # Remove the agreements from the other masters that point to master 4
+    log.info('test_cleanallruv_abort_certify: remove all the agreements to master 4...)')
+    try:
+        topology.master1.agreement.delete(DEFAULT_SUFFIX, topology.master4)
+    except ldap.LDAPError, e:
+        log.fatal('test_cleanallruv_abort_certify: Failed to delete agmt(m1 -> m4), error: ' +
+                  e.message['desc'])
+        assert False
+    try:
+        topology.master2.agreement.delete(DEFAULT_SUFFIX, topology.master4)
+    except ldap.LDAPError, e:
+        log.fatal('test_cleanallruv_abort_certify: Failed to delete agmt(m2 -> m4), error: ' +
+                  e.message['desc'])
+        assert False
+    try:
+        topology.master3.agreement.delete(DEFAULT_SUFFIX, topology.master4)
+    except ldap.LDAPError, e:
+        log.fatal('test_cleanallruv_abort_certify: Failed to delete agmt(m3 -> m4), error: ' +
+                  e.message['desc'])
+        assert False
+
+    # Stop master 2
+    log.info('test_cleanallruv_abort_certify: stop master 2 to freeze the cleanAllRUV task...')
+    topology.master2.stop(timeout=10)
+
+    # Run the task
+    log.info('test_cleanallruv_abort_certify: add the cleanAllRUV task...')
+    try:
+        (clean_task_dn, rc) = topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
+                                  replicaid='4', args={TASK_WAIT: False})
+    except ValueError, e:
+        log.fatal('test_cleanallruv_abort_certify: Problem running cleanAllRuv task: ' +
+                  e.message('desc'))
+        assert False
+
+    # Abort the task
+    log.info('test_cleanallruv_abort_certify: abort the cleanAllRUV task...')
+    try:
+        (abort_task_dn, rc) = topology.master1.tasks.abortCleanAllRUV(suffix=DEFAULT_SUFFIX,
+                                  replicaid='4', certify=True, args={TASK_WAIT: False})
+    except ValueError, e:
+        log.fatal('test_cleanallruv_abort_certify: Problem running abortCleanAllRuv task: ' +
+                  e.message('desc'))
+        assert False
+
+    # Wait a while and make sure the abort task is still running
+    log.info('test_cleanallruv_abort_certify: sleep for 10 seconds')
+    time.sleep(10)
+
+    attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode',
+                'nsTaskCurrentItem', 'nsTaskTotalItems']
+    entry = topology.master1.getEntry(abort_task_dn, attrlist=attrlist)
+    if not entry or entry.nsTaskExitCode:
+        log.fatal('test_cleanallruv_abort_certify: abort task incorrectly finished')
+        assert False
+
+    # Now start master 2 so it can be aborted
+    log.info('test_cleanallruv_abort_certify: start master 2 to allow the abort task to finish...')
+    topology.master2.start(timeout=10)
+
+    # Wait for the abort task to stop
+    done = False
+    count = 0
+    while not done and count < 60:
+        entry = topology.master1.getEntry(abort_task_dn, attrlist=attrlist)
+        if not entry or entry.nsTaskExitCode:
+            done = True
+            break
+        time.sleep(1)
+        count += 1
+    if not done:
+        log.fatal('test_cleanallruv_abort_certify: The abort CleanAllRUV task was not aborted')
+        assert False
+
+    # Check master 1 does not have the clean task running
+    log.info('test_cleanallruv_abort_certify: check master 1 no longer has a cleanAllRUV task...')
+    attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode',
+                'nsTaskCurrentItem', 'nsTaskTotalItems']
+    done = False
+    count = 0
+    while not done and count < 5:
+        entry = topology.master1.getEntry(clean_task_dn, attrlist=attrlist)
+        if not entry or entry.nsTaskExitCode:
+            done = True
+            break
+        time.sleep(1)
+        count += 1
+    if not done:
+        log.fatal('test_cleanallruv_abort_certify: CleanAllRUV task was not aborted')
+        assert False
+
+    # Start master 2
+    log.info('test_cleanallruv_abort_certify: start master 2 to begin the restore process...')
+    topology.master2.start(timeout=10)
+
+    #
+    # Now run the clean task task again to we can properly restore master 4
+    #
+    log.info('test_cleanallruv_abort_certify: run cleanAllRUV task so we can properly restore master 4...')
+    try:
+        topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
+                                  replicaid='4', args={TASK_WAIT: True})
+    except ValueError, e:
+        log.fatal('test_cleanallruv_abort_certify: Problem running cleanAllRuv task: ' +
+                  e.message('desc'))
+        assert False
+
+    log.info('test_cleanallruv_abort_certify PASSED, restoring master 4...')
+
+    #
+    # Cleanup - Restore master 4
+    #
+    restore_master4(topology)
+
+
+def test_cleanallruv_stress_clean(topology):
+    '''
+    Put each server(m1 - m4) under stress, and perform the entire clean process
+    '''
+    log.info('Running test_cleanallruv_stress_clean...')
+    log.info('test_cleanallruv_stress_clean: put all the masters under load...')
+
+    # Put all the masters under load
+    m1_add_users = AddUsers(topology.master1, 4000)
+    m1_add_users.start()
+    m2_add_users = AddUsers(topology.master2, 4000)
+    m2_add_users.start()
+    m3_add_users = AddUsers(topology.master3, 4000)
+    m3_add_users.start()
+    m4_add_users = AddUsers(topology.master4, 4000)
+    m4_add_users.start()
+
+    # Allow sometime to get replication flowing in all directions
+    log.info('test_cleanallruv_stress_clean: allow some time for replication to get flowing...')
+    time.sleep(5)
+
+    # Put master 4 into read only mode
+    log.info('test_cleanallruv_stress_clean: put master 4 into read-only mode...')
+    try:
+        topology.master4.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-readonly', 'on')])
+    except ldap.LDAPError, e:
+        log.fatal('test_cleanallruv_stress_clean: Failed to put master 4 into read-only mode: error ' +
+                  e.message['desc'])
+        assert False
+
+    # We need to wait for master 4 to push its changes out
+    log.info('test_cleanallruv_stress_clean: allow some time for master 4 to push changes out (30 seconds)...')
+    time.sleep(30)
+
+    # Disable master 4
+    log.info('test_cleanallruv_stress_clean: disable replication on master 4...')
+    try:
+        topology.master4.replica.disableReplication(DEFAULT_SUFFIX)
+    except:
+        log.fatal('test_cleanallruv_stress_clean: failed to diable replication')
+        assert False
+
+    # Remove the agreements from the other masters that point to master 4
+    log.info('test_cleanallruv_stress_clean: remove all the agreements to master 4...')
+    try:
+        topology.master1.agreement.delete(DEFAULT_SUFFIX, topology.master4)
+    except ldap.LDAPError, e:
+        log.fatal('test_cleanallruv_stress_clean: Failed to delete agmt(m1 -> m4), error: ' +
+                  e.message['desc'])
+        assert False
+    try:
+        topology.master2.agreement.delete(DEFAULT_SUFFIX, topology.master4)
+    except ldap.LDAPError, e:
+        log.fatal('test_cleanallruv_stress_clean: Failed to delete agmt(m2 -> m4), error: ' +
+                  e.message['desc'])
+        assert False
+    try:
+        topology.master3.agreement.delete(DEFAULT_SUFFIX, topology.master4)
+    except ldap.LDAPError, e:
+        log.fatal('test_cleanallruv_stress_clean: Failed to delete agmt(m3 -> m4), error: ' +
+                  e.message['desc'])
+        assert False
+
+    # Run the task
+    log.info('test_cleanallruv_stress_clean: Run the cleanAllRUV task...')
+    try:
+        topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4',
+                                           args={TASK_WAIT: True})
+    except ValueError, e:
+        log.fatal('test_cleanallruv_stress_clean: Problem running cleanAllRuv task: ' +
+                  e.message('desc'))
+        assert False
+
+    # Wait for the update to finish
+    log.info('test_cleanallruv_stress_clean: wait for all the updates to finish...')
+    m1_add_users.join()
+    m2_add_users.join()
+    m3_add_users.join()
+    m4_add_users.join()
+
+    # Check the other master's RUV for 'replica 4'
+    log.info('test_cleanallruv_stress_clean: check if all the replicas have been cleaned...')
+    clean = False
+    count = 0
+    while not clean and count < 10:
+        clean = True
+
+        # Check master 1
+        try:
+            entry = topology.master1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER)
+            if not entry:
+                log.error('test_cleanallruv_stress_clean: Failed to find db tombstone entry from master')
+                repl_fail(replica_inst)
+            elements = entry[0].getValues('nsds50ruv')
+            for ruv in elements:
+                if 'replica 4' in ruv:
+                    # Not cleaned
+                    log.error('test_cleanallruv_stress_clean: Master 1 not cleaned!')
+                    clean = False
+            if clean:
+                log.info('test_cleanallruv_stress_clean: Master 1 is cleaned.')
+        except ldap.LDAPError, e:
+            log.fatal('test_cleanallruv_stress_clean: Unable to search master 1 for db tombstone: ' +
+                      e.message['desc'])
+
+        # Check master 2
+        try:
+            entry = topology.master2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER)
+            if not entry:
+                log.error('test_cleanallruv_stress_clean: Failed to find db tombstone entry from master')
+                repl_fail(replica_inst)
+            elements = entry[0].getValues('nsds50ruv')
+            for ruv in elements:
+                if 'replica 4' in ruv:
+                    # Not cleaned
+                    log.error('test_cleanallruv_stress_clean: Master 2 not cleaned!')
+                    clean = False
+            if clean:
+                log.info('test_cleanallruv_stress_clean: Master 2 is cleaned.')
+        except ldap.LDAPError, e:
+            log.fatal('test_cleanallruv_stress_clean: Unable to search master 2 for db tombstone: ' +
+                      e.message['desc'])
+
+        # Check master 3
+        try:
+            entry = topology.master3.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER)
+            if not entry:
+                log.error('test_cleanallruv_stress_clean: Failed to find db tombstone entry from master')
+                repl_fail(replica_inst)
+            elements = entry[0].getValues('nsds50ruv')
+            for ruv in elements:
+                if 'replica 4' in ruv:
+                    # Not cleaned
+                    log.error('test_cleanallruv_stress_clean: Master 3 not cleaned!')
+                    clean = False
+            if clean:
+                log.info('test_cleanallruv_stress_clean: Master 3 is cleaned.')
+        except ldap.LDAPError, e:
+            log.fatal('test_cleanallruv_stress_clean: Unable to search master 3 for db tombstone: ' +
+                      e.message['desc'])
+
+        # Sleep a bit and give it chance to clean up...
+        time.sleep(5)
+        count += 1
+
+    if not clean:
+        log.fatal('test_cleanallruv_stress_clean: Failed to clean replicas')
+        assert False
+
+    log.info('test_cleanallruv_stress_clean:  PASSED, restoring master 4...')
+
+    #
+    # Cleanup - restore master 4
+    #
+
+    # Turn off readonly mode
+    try:
+        topology.master4.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-readonly', 'off')])
+    except ldap.LDAPError, e:
+        log.fatal('test_cleanallruv_stress_clean: Failed to put master 4 into read-only mode: error ' +
+                  e.message['desc'])
+        assert False
+
+    restore_master4(topology)
+
+
+def test_cleanallruv_final(topology):
+    topology.master1.delete()
+    topology.master2.delete()
+    topology.master3.delete()
+    topology.master4.delete()
+    log.info('cleanAllRUV test suite PASSED')
+
+
+def run_isolated():
+    global installation1_prefix
+    installation1_prefix = None
+    topo = topology(True)
+
+    test_cleanallruv_init(topo)
+    test_cleanallruv_clean(topo)
+    test_cleanallruv_clean_restart(topo)
+    test_cleanallruv_clean_force(topo)
+    test_cleanallruv_abort(topo)
+    test_cleanallruv_abort_restart(topo)
+    test_cleanallruv_abort_certify(topo)
+    test_cleanallruv_stress_clean(topo)
+    test_cleanallruv_final(topo)
+
+
+if __name__ == '__main__':
+    run_isolated()
+
diff --git a/dirsrvtests/suites/rootdn_plugin/rootdn_plugin_test.py b/dirsrvtests/suites/rootdn_plugin/rootdn_plugin_test.py
new file mode 100644
index 0000000..d1c4916
--- /dev/null
+++ b/dirsrvtests/suites/rootdn_plugin/rootdn_plugin_test.py
@@ -0,0 +1,770 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+installation1_prefix = None
+
+PLUGIN_DN = 'cn=' + PLUGIN_ROOTDN_ACCESS + ',cn=plugins,cn=config'
+USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX
+
+
+class TopologyStandalone(object):
+    def __init__(self, standalone):
+        standalone.open()
+        self.standalone = standalone
+
+
+ at pytest.fixture(scope="module")
+def topology(request):
+    global installation1_prefix
+    if installation1_prefix:
+        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+
+    # Creating standalone instance ...
+    standalone = DirSrv(verbose=False)
+    args_instance[SER_HOST] = HOST_STANDALONE
+    args_instance[SER_PORT] = PORT_STANDALONE
+    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+    args_standalone = args_instance.copy()
+    standalone.allocate(args_standalone)
+    instance_standalone = standalone.exists()
+    if instance_standalone:
+        standalone.delete()
+    standalone.create()
+    standalone.open()
+
+    # Clear out the tmp dir
+    standalone.clearTmpDir(__file__)
+
+    return TopologyStandalone(standalone)
+
+
+def test_rootdn_init(topology):
+    '''
+    Initialize our setup to test the ROot DN Access Control Plugin
+
+        Test the following access control type:
+
+        - Allowed IP address *
+        - Denied IP address *
+        - Specific time window
+        - Days allowed access
+        - Allowed host *
+        - Denied host *
+
+        * means mulitple valued
+    '''
+
+    log.info('Initializing root DN test suite...')
+
+    #
+    # Set an aci so we can modify the plugin after we deny the Root DN
+    #
+    ACI = ('(target ="ldap:///cn=config")(targetattr = "*")(version 3.0' +
+           ';acl "all access";allow (all)(userdn="ldap:///anyone");)')
+    try:
+        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_ADD, 'aci', ACI)])
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_init: Failed to add aci to config: error ' +
+                  e.message['desc'])
+        assert False
+
+    #
+    # Create a user to modify the config
+    #
+    try:
+        topology.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(),
+                                 'uid': 'user1',
+                                 'userpassword': PASSWORD})))
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_init: Failed to add test user ' + USER1_DN + ': error ' +
+                  e.message['desc'])
+        assert False
+
+    #
+    # Enable dynamic plugins
+    #
+    try:
+        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_init: Failed to set dynamic plugins: error ' + e.message['desc'])
+        assert False
+
+    #
+    # Enable the plugin (aftewr enabling dynamic plugins)
+    #
+    topology.standalone.plugins.enable(PLUGIN_ROOTDN_ACCESS)
+
+    log.info('test_rootdn_init: Initialized root DN test suite.')
+
+
+def test_rootdn_access_specific_time(topology):
+    '''
+    Test binding inside and outside of a specific time
+    '''
+
+    log.info('Running test_rootdn_access_specific_time...')
+
+    # Get the current time, and bump it ahead twohours
+    current_hour = time.strftime("%H")
+    if int(current_hour) > 12:
+        open_time = '0200'
+        close_time = '0400'
+    else:
+        open_time = '1600'
+        close_time = '1800'
+
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-open-time', open_time),
+                                  (ldap.MOD_ADD, 'rootdn-close-time', close_time)])
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_specific_time: Failed to set (blocking) open/close times: error ' +
+                  e.message['desc'])
+        assert False
+
+    #
+    # Bind as Root DN - should fail
+    #
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+        succeeded = True
+    except ldap.LDAPError, e:
+        succeeded = False
+
+    if succeeded:
+        log.fatal('test_rootdn_access_specific_time: Root DN was incorrectly able to bind')
+        assert False
+
+    #
+    # Set config to allow the entire day
+    #
+    try:
+        topology.standalone.simple_bind_s(USER1_DN, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_specific_time: test_rootdn: failed to bind as user1')
+        assert False
+
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'),
+                                  (ldap.MOD_REPLACE, 'rootdn-close-time', '2359')])
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_specific_time: Failed to set (open) open/close times: error ' +
+                  e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_specific_time: Root DN bind failed unexpectedly failed: error ' +
+                  e.message['desc'])
+        assert False
+
+    #
+    # Cleanup - undo the changes we made so the next test has a clean slate
+    #
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-open-time', None),
+                                                 (ldap.MOD_DELETE, 'rootdn-close-time', None)])
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_specific_time: Failed to delete open and close time: error ' +
+                  e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_specific_time: Root DN bind failed unexpectedly failed: error ' +
+                  e.message['desc'])
+        assert False
+
+    log.info('test_rootdn_access_specific_time: PASSED')
+
+
+def test_rootdn_access_day_of_week(topology):
+    '''
+    Test the days of week feature
+    '''
+
+    log.info('Running test_rootdn_access_day_of_week...')
+
+    days = ('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat')
+    day = int(time.strftime("%w", time.gmtime()))
+
+    if day > 3:
+        deny_days = days[0] + ', ' + days[1]
+        allow_days = days[day] + ',' + days[day - 1]
+    else:
+        deny_days = days[4] + ',' + days[5]
+        allow_days = days[day] + ',' + days[day + 1]
+
+    #
+    # Set the deny days
+    #
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed',
+                                     deny_days)])
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_day_of_week: Failed to set the deny days: error ' +
+                  e.message['desc'])
+        assert False
+
+    #
+    # Bind as Root DN - should fail
+    #
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+        succeeded = True
+    except ldap.LDAPError, e:
+        succeeded = False
+
+    if succeeded:
+        log.fatal('test_rootdn_access_day_of_week: Root DN was incorrectly able to bind')
+        assert False
+
+    #
+    # Set the allow days
+    #
+    try:
+        topology.standalone.simple_bind_s(USER1_DN, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_day_of_week: : failed to bind as user1')
+        assert False
+
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed',
+                                     allow_days)])
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_day_of_week: Failed to set the deny days: error ' +
+                  e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_day_of_week: Root DN bind failed unexpectedly failed: error ' +
+                  e.message['desc'])
+        assert False
+
+    #
+    # Cleanup - undo the changes we made so the next test has a clean slate
+    #
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-days-allowed', None)])
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_day_of_week: Failed to set rootDN plugin config: error ' +
+                  e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_day_of_week: Root DN bind failed unexpectedly failed: error ' +
+                  e.message['desc'])
+        assert False
+
+    log.info('test_rootdn_access_day_of_week: PASSED')
+
+
+def test_rootdn_access_denied_ip(topology):
+    '''
+    Test denied IP feature - we can just test denying 127.0.01
+    '''
+
+    log.info('Running test_rootdn_access_denied_ip...')
+
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-ip', '127.0.0.1'),
+                                  (ldap.MOD_ADD, 'rootdn-deny-ip', '::1')])
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_denied_ip: Failed to set rootDN plugin config: error ' +
+                  e.message['desc'])
+        assert False
+
+    #
+    # Bind as Root DN - should fail
+    #
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+        succeeded = True
+    except ldap.LDAPError, e:
+        succeeded = False
+
+    if succeeded:
+        log.fatal('test_rootdn_access_denied_ip: Root DN was incorrectly able to bind')
+        assert False
+
+    #
+    # Change the denied IP so root DN succeeds
+    #
+    try:
+        topology.standalone.simple_bind_s(USER1_DN, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_denied_ip: : failed to bind as user1')
+        assert False
+
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-ip', '255.255.255.255')])
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_denied_ip: Failed to set rootDN plugin config: error ' +
+                  e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_denied_ip: Root DN bind failed unexpectedly failed: error ' +
+                  e.message['desc'])
+        assert False
+
+    #
+    # Cleanup - undo the changes we made so the next test has a clean slate
+    #
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-deny-ip', None)])
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_denied_ip: Failed to set rootDN plugin config: error ' +
+                  e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_denied_ip: Root DN bind failed unexpectedly failed: error ' +
+                  e.message['desc'])
+        assert False
+
+    log.info('test_rootdn_access_denied_ip: PASSED')
+
+
+def test_rootdn_access_denied_host(topology):
+    '''
+    Test denied Host feature - we can just test denying localhost
+    '''
+
+    log.info('Running test_rootdn_access_denied_host...')
+
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-deny-host', 'localhost.localdomain')])
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_denied_host: Failed to set deny host: error ' +
+                  e.message['desc'])
+        assert False
+
+    #
+    # Bind as Root DN - should fail
+    #
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+        succeeded = True
+    except ldap.LDAPError, e:
+        succeeded = False
+
+    if succeeded:
+        log.fatal('test_rootdn_access_denied_host: Root DN was incorrectly able to bind')
+        assert False
+
+    #
+    # Change the denied host so root DN succeeds
+    #
+    try:
+        topology.standalone.simple_bind_s(USER1_DN, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_denied_host: : failed to bind as user1')
+        assert False
+
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-host', 'i.dont.exist.com')])
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_denied_host: Failed to set rootDN plugin config: error ' +
+                  e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_denied_host: Root DN bind failed unexpectedly failed: error ' +
+                  e.message['desc'])
+        assert False
+
+    #
+    # Cleanup - undo the changes we made so the next test has a clean slate
+    #
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-deny-host', None)])
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_denied_host: Failed to set rootDN plugin config: error ' +
+                  e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_denied_host: Root DN bind failed unexpectedly failed: error ' +
+                  e.message['desc'])
+        assert False
+
+    log.info('test_rootdn_access_denied_host: PASSED')
+
+
+def test_rootdn_access_allowed_ip(topology):
+    '''
+    Test allowed ip feature
+    '''
+
+    log.info('Running test_rootdn_access_allowed_ip...')
+
+    #
+    # Set allowed host to an unknown host - blocks the Root DN
+    #
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-ip', '255.255.255.255')])
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_allowed_ip: Failed to set allowed host: error ' +
+                  e.message['desc'])
+        assert False
+
+    #
+    # Bind as Root DN - should fail
+    #
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+        succeeded = True
+    except ldap.LDAPError, e:
+        succeeded = False
+
+    if succeeded:
+        log.fatal('test_rootdn_access_allowed_ip: Root DN was incorrectly able to bind')
+        assert False
+
+    #
+    # Allow localhost
+    #
+    try:
+        topology.standalone.simple_bind_s(USER1_DN, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_allowed_ip: : failed to bind as user1')
+        assert False
+
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-ip', '127.0.0.1'),
+                                  (ldap.MOD_ADD, 'rootdn-allow-ip', '::1')])
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_allowed_ip: Failed to set allowed host: error ' +
+                  e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_allowed_ip: Root DN bind failed unexpectedly failed: error ' +
+                  e.message['desc'])
+        assert False
+
+    #
+    # Cleanup - undo everything we did so the next test has a clean slate
+    #
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-allow-ip', None)])
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_allowed_ip: Failed to delete(rootdn-allow-ip): error ' +
+                  e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_allowed_ip: Root DN bind failed unexpectedly failed: error ' +
+                  e.message['desc'])
+        assert False
+
+    log.info('test_rootdn_access_allowed_ip: PASSED')
+
+
+def test_rootdn_access_allowed_host(topology):
+    '''
+    Test allowed ip feature
+    '''
+
+    log.info('Running test_rootdn_access_allowed_host...')
+
+    #
+    # Set allowed host to an unknown host - blocks the Root DN
+    #
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-host', 'i.dont.exist.com')])
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_allowed_host: Failed to set allowed host: error ' +
+                  e.message['desc'])
+        assert False
+
+    #
+    # Bind as Root DN - should fail
+    #
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+        succeeded = True
+    except ldap.LDAPError, e:
+        succeeded = False
+
+    if succeeded:
+        log.fatal('test_rootdn_access_allowed_host: Root DN was incorrectly able to bind')
+        assert False
+
+    #
+    # Allow localhost
+    #
+    try:
+        topology.standalone.simple_bind_s(USER1_DN, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_allowed_host: : failed to bind as user1')
+        assert False
+
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-allow-host', 'localhost.localdomain')])
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_allowed_host: Failed to set allowed host: error ' +
+                  e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_allowed_host: Root DN bind failed unexpectedly failed: error ' +
+                  e.message['desc'])
+        assert False
+
+    #
+    # Cleanup - undo everything we did so the next test has a clean slate
+    #
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-allow-host', None)])
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_allowed_host: Failed to delete(rootdn-allow-host): error ' +
+                  e.message['desc'])
+        assert False
+
+    try:
+        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    except ldap.LDAPError, e:
+        log.fatal('test_rootdn_access_allowed_host: Root DN bind failed unexpectedly failed: error ' +
+                  e.message['desc'])
+        assert False
+
+    log.info('test_rootdn_access_allowed_host: PASSED')
+
+
+def test_rootdn_config_validate(topology):
+    '''
+    Test configuration validation
+
+    test single valued attributes: rootdn-open-time,
+                                   rootdn-close-time,
+                                   rootdn-days-allowed
+
+    '''
+
+    log.info('Running test_rootdn_config_validate...')
+
+    #
+    # Test rootdn-open-time
+    #
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000')])
+        log.fatal('test_rootdn_config_validate: Incorrectly allowed to just add "rootdn-open-time" ')
+        assert False
+    except ldap.LDAPError:
+        pass
+
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-open-time', '0000'),
+                                  (ldap.MOD_ADD, 'rootdn-open-time', '0001')])
+        log.fatal('test_rootdn_config_validate: Incorrectly allowed to add multiple "rootdn-open-time"')
+        assert False
+    except ldap.LDAPError:
+        pass
+
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '-1'),
+                                  (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')])
+        log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-open-time: -1"')
+        assert False
+    except ldap.LDAPError:
+        pass
+
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '2400'),
+                                  (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')])
+        log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-open-time: 2400"')
+        assert False
+    except ldap.LDAPError:
+        pass
+
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', 'aaaaa'),
+                                  (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')])
+        log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-open-time: aaaaa"')
+        assert False
+    except ldap.LDAPError:
+        pass
+
+    #
+    # Test rootdn-close-time
+    #
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-close-time', '0000')])
+        log.fatal('test_rootdn_config_validate: Incorrectly allowed to add just "rootdn-close-time"')
+        assert False
+    except ldap.LDAPError:
+        pass
+
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-close-time', '0000'),
+                                  (ldap.MOD_ADD, 'rootdn-close-time', '0001')])
+        log.fatal('test_rootdn_config_validate: Incorrectly allowed to add multiple "rootdn-open-time"')
+        assert False
+    except ldap.LDAPError:
+        pass
+
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'),
+                                  (ldap.MOD_REPLACE, 'rootdn-close-time', '-1')])
+        log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-close-time: -1"')
+        assert False
+    except ldap.LDAPError:
+        pass
+
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'),
+                                  (ldap.MOD_REPLACE, 'rootdn-close-time', '2400')])
+        log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-close-time: 2400"')
+        assert False
+    except ldap.LDAPError:
+        pass
+
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'),
+                                  (ldap.MOD_REPLACE, 'rootdn-close-time', 'aaaaa')])
+        log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-close-time: aaaaa"')
+        assert False
+    except ldap.LDAPError:
+        pass
+
+    #
+    # Test days allowed
+    #
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-days-allowed', 'Mon'),
+                                  (ldap.MOD_ADD, 'rootdn-days-allowed', 'Tue')])
+        log.fatal('test_rootdn_config_validate: Incorrectly allowed to add two "rootdn-days-allowed"')
+        assert False
+    except ldap.LDAPError:
+        pass
+
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Mon1')])
+        log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-days-allowed: Mon1"')
+        assert False
+    except ldap.LDAPError:
+        pass
+
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Tue, Mon1')])
+        log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-days-allowed: Tue, Mon1"')
+        assert False
+    except ldap.LDAPError:
+        pass
+
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'm111m')])
+        log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-days-allowed: 111"')
+        assert False
+    except ldap.LDAPError:
+        pass
+
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Gur')])
+        log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-days-allowed: Gur"')
+        assert False
+    except ldap.LDAPError:
+        pass
+
+    #
+    # Test allow ips
+    #
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-ip', '12.12.Z.12')])
+        log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-allow-ip: 12.12.Z.12"')
+        assert False
+    except ldap.LDAPError:
+        pass
+
+    #
+    # Test deny ips
+    #
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-ip', '12.12.Z.12')])
+        log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-deny-ip: 12.12.Z.12"')
+        assert False
+    except ldap.LDAPError:
+        pass
+
+    #
+    # Test allow hosts
+    #
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-host', 'host._.com')])
+        log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-allow-host: host._.com"')
+        assert False
+    except ldap.LDAPError:
+        pass
+
+    #
+    # Test deny hosts
+    #
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-host', 'host.####.com')])
+        log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-deny-host: host.####.com"')
+        assert False
+    except ldap.LDAPError:
+        pass
+
+    log.info('test_rootdn_config_validate: PASSED')
+
+
+def test_rootdn_final(topology):
+    topology.standalone.delete()
+    log.info('Root DN Access Control test suite PASSED')
+
+
+def run_isolated():
+    global installation1_prefix
+    installation1_prefix = None
+
+    topo = topology(True)
+    test_rootdn_init(topo)
+    test_rootdn_access_specific_time(topo)
+    test_rootdn_access_day_of_week(topo)
+    test_rootdn_access_allowed_ip(topo)
+    test_rootdn_access_denied_ip(topo)
+    test_rootdn_access_allowed_host(topo)
+    test_rootdn_access_denied_host(topo)
+    test_rootdn_config_validate(topo)
+
+    test_rootdn_final(topo)
+
+
+if __name__ == '__main__':
+    run_isolated()
+
diff --git a/dirsrvtests/suites/schema/test_schema.py b/dirsrvtests/suites/schema/test_schema.py
index a1b3791..1767237 100644
--- a/dirsrvtests/suites/schema/test_schema.py
+++ b/dirsrvtests/suites/schema/test_schema.py
@@ -22,6 +22,10 @@ log = logging.getLogger(__name__)
 
 installation_prefix = None
 
+attrclass = ldap.schema.models.AttributeType
+occlass = ldap.schema.models.ObjectClass
+syntax_len_supported = False
+
 
 class TopologyStandalone(object):
     def __init__(self, standalone):
@@ -56,9 +60,6 @@ def topology(request):
 
     return TopologyStandalone(schemainst)
 
-attrclass = ldap.schema.models.AttributeType
-occlass = ldap.schema.models.ObjectClass
-
 
 def ochasattr(subschema, oc, mustormay, attr, key):
     """See if the oc and any of its parents and ancestors have the
@@ -133,8 +134,6 @@ def atgetparfield(subschema, at, field):
             break
     return v
 
-syntax_len_supported = False
-
 
 def atgetdiffs(ldschema, at1, at2):
     fields = ['names', 'desc', 'obsolete', 'sup', 'equality', 'ordering', 'substr', 'syntax',
@@ -152,6 +151,9 @@ def atgetdiffs(ldschema, at1, at2):
 
 def test_schema_comparewithfiles(topology):
     '''Compare the schema from ldap cn=schema with the schema files'''
+
+    log.info('Running test_schema_comparewithfiles...')
+
     retval = True
     schemainst = topology.standalone
     ldschema = schemainst.schema.get_subschema()
@@ -188,6 +190,8 @@ def test_schema_comparewithfiles(topology):
                 retval = False
     assert retval
 
+    log.info('test_schema_comparewithfiles: PASSED')
+
 
 def test_schema_final(topology):
     topology.standalone.delete()
diff --git a/dirsrvtests/tickets/create_testcase.py b/dirsrvtests/tickets/create_testcase.py
deleted file mode 100644
index 1fa2963..0000000
--- a/dirsrvtests/tickets/create_testcase.py
+++ /dev/null
@@ -1,530 +0,0 @@
-import sys
-import optparse
-
-'''
-    This script generates a template test script that handles the
-    non-interesting parts of a test script:
-        topology,
-        test (to be completed by the user),
-        final,
-        and run-isolated functions
-'''
-
-
-def displayUsage():
-    print ('\nUsage:\ncreate_ticket.py -t|--ticket <ticket number> [ i|--instances ' +
-           '<number of standalone instances> [ -m|--masters <number of masters> ' +
-           '-h|--hubs <number of hubs> -c|--consumers <number of consumers> ] ' +
-           '-o|--outputfile ]\n')
-    print ('If only "-t" is provided then a single standalone instance is created.  ' +
-           'The "-i" option can add mulitple standalone instances(maximum 10).  ' +
-           'However, you can not mix "-i" with the replication options(-m, -h , -c).  ' +
-           'There is a maximum of 10 masters, 10 hubs, and 10 consumers.')
-    exit(1)
-
-desc = 'Script to generate an initial lib389 test script.  ' + \
-       'This generates the topology, test, final, and run-isolated functions.'
-
-if len(sys.argv) > 0:
-    parser = optparse.OptionParser(description=desc, add_help_option=False)
-
-    # Script options
-    parser.add_option('-t', '--ticket', dest='ticket', default=None)
-    parser.add_option('-i', '--instances', dest='inst', default=None)
-    parser.add_option('-m', '--masters', dest='masters', default='0')
-    parser.add_option('-h', '--hubs', dest='hubs', default='0')
-    parser.add_option('-c', '--consumers', dest='consumers', default='0')
-    parser.add_option('-o', '--outputfile', dest='filename', default=None)
-
-    # Validate the options
-    try:
-        (args, opts) = parser.parse_args()
-    except:
-        displayUsage()
-
-    if args.ticket is None:
-        print 'Missing required ticket number'
-        displayUsage()
-
-    if int(args.masters) == 0:
-        if int(args.hubs) > 0 or int(args.consumers) > 0:
-            print 'You must use "-m|--masters" if you want to have hubs and/or consumers'
-            displayUsage()
-
-    if not args.masters.isdigit() or int(args.masters) > 10 or int(args.masters) < 0:
-        print 'Invalid value for "--masters", it must be a number and it can not be greater than 10'
-        displayUsage()
-
-    if not args.hubs.isdigit() or int(args.hubs) > 10 or int(args.hubs) < 0:
-        print 'Invalid value for "--hubs", it must be a number and it can not be greater than 10'
-        displayUsage()
-
-    if not args.consumers.isdigit() or int(args.consumers) > 10 or int(args.consumers) < 0:
-        print 'Invalid value for "--consumers", it must be a number and it can not be greater than 10'
-        displayUsage()
-
-    if args.inst:
-        if not args.inst.isdigit() or int(args.inst) > 10 or int(args.inst) < 1:
-            print ('Invalid value for "--instances", it must be a number greater than 0 ' +
-                   'and not greater than 10')
-            displayUsage()
-        if int(args.inst) > 0:
-            if int(args.masters) > 0 or int(args.hubs) > 0 or int(args.consumers) > 0:
-                print 'You can not mix "--instances" with replication.'
-                displayUsage()
-
-    # Extract usable values
-    masters = int(args.masters)
-    hubs = int(args.hubs)
-    consumers = int(args.consumers)
-    ticket = args.ticket
-    if not args.inst:
-        instances = 1
-    else:
-        instances = int(args.inst)
-    filename = args.filename
-
-    #
-    # Create/open the new test script file
-    #
-    if not filename:
-        filename = 'ticket' + ticket + '_test.py'
-    try:
-        TEST = open(filename, "w")
-    except IOError:
-        print "Can\'t open file:", filename
-        exit(1)
-
-    #
-    # Write the imports
-    #
-    TEST.write('import os\nimport sys\nimport time\nimport ldap\nimport logging\nimport pytest\n')
-    TEST.write('from lib389 import DirSrv, Entry, tools, tasks\nfrom lib389.tools import DirSrvTools\n' +
-               'from lib389._constants import *\nfrom lib389.properties import *\nfrom lib389.tasks import *\n\n')
-
-    #
-    # Set the logger and installation prefix
-    #
-    TEST.write('logging.getLogger(__name__).setLevel(logging.DEBUG)\n')
-    TEST.write('log = logging.getLogger(__name__)\n\n')
-    TEST.write('installation1_prefix = None\n\n\n')
-
-    #
-    # Write the replication or standalone classes
-    #
-    repl_deployment = False
-    if masters + hubs + consumers > 0:
-        #
-        # Write the replication class
-        #
-        repl_deployment = True
-
-        TEST.write('class TopologyReplication(object):\n')
-        TEST.write('    def __init__(self')
-        for idx in range(masters):
-            TEST.write(', master' + str(idx + 1))
-        for idx in range(hubs):
-            TEST.write(', hub' + str(idx + 1))
-        for idx in range(consumers):
-            TEST.write(', consumer' + str(idx + 1))
-        TEST.write('):\n')
-
-        for idx in range(masters):
-            TEST.write('        master' + str(idx + 1) + '.open()\n')
-            TEST.write('        self.master' + str(idx + 1) + ' = master' + str(idx + 1) + '\n')
-        for idx in range(hubs):
-            TEST.write('        hub' + str(idx + 1) + '.open()\n')
-            TEST.write('        self.hub' + str(idx + 1) + ' = hub' + str(idx + 1) + '\n')
-        for idx in range(consumers):
-            TEST.write('        consumer' + str(idx + 1) + '.open()\n')
-            TEST.write('        self.consumer' + str(idx + 1) + ' = consumer' + str(idx + 1) + '\n')
-        TEST.write('\n\n')
-    else:
-        #
-        # Write the standalone class
-        #
-        TEST.write('class TopologyStandalone(object):\n')
-        TEST.write('    def __init__(self')
-        for idx in range(instances):
-            idx += 1
-            if idx == 1:
-                idx = ''
-            else:
-                idx = str(idx)
-            TEST.write(', standalone' + idx)
-        TEST.write('):\n')
-
-        for idx in range(instances):
-            idx += 1
-            if idx == 1:
-                idx = ''
-            else:
-                idx = str(idx)
-            TEST.write('        standalone' + idx + '.open()\n')
-            TEST.write('        self.standalone' + idx + ' = standalone' + idx + '\n')
-        TEST.write('\n\n')
-
-    #
-    # Write the 'topology function'
-    #
-    TEST.write('@pytest.fixture(scope="module")\n')
-    TEST.write('def topology(request):\n')
-    TEST.write('    global installation1_prefix\n')
-    TEST.write('    if installation1_prefix:\n')
-    TEST.write('        args_instance[SER_DEPLOYED_DIR] = installation1_prefix\n\n')
-
-    if repl_deployment:
-        #
-        # Create the replication instances
-        #
-        for idx in range(masters):
-            idx = str(idx + 1)
-            TEST.write('    # Creating master ' + idx + '...\n')
-            TEST.write('    master' + idx + ' = DirSrv(verbose=False)\n')
-            TEST.write('    args_instance[SER_HOST] = HOST_MASTER_' + idx + '\n')
-            TEST.write('    args_instance[SER_PORT] = PORT_MASTER_' + idx + '\n')
-            TEST.write('    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_' + idx + '\n')
-            TEST.write('    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX\n')
-            TEST.write('    args_master = args_instance.copy()\n')
-            TEST.write('    master' + idx + '.allocate(args_master)\n')
-            TEST.write('    instance_master' + idx + ' = master' + idx + '.exists()\n')
-            TEST.write('    if instance_master' + idx + ':\n')
-            TEST.write('        master' + idx + '.delete()\n')
-            TEST.write('    master' + idx + '.create()\n')
-            TEST.write('    master' + idx + '.open()\n')
-            TEST.write('    master' + idx + '.replica.enableReplication(suffix=SUFFIX, ' +
-                                            'role=REPLICAROLE_MASTER, ' +
-                                            'replicaId=REPLICAID_MASTER_' + idx + ')\n\n')
-
-        for idx in range(hubs):
-            idx = str(idx + 1)
-            TEST.write('    # Creating hub ' + idx + '...\n')
-            TEST.write('    hub' + idx + ' = DirSrv(verbose=False)\n')
-            TEST.write('    args_instance[SER_HOST] = HOST_HUB_' + idx + '\n')
-            TEST.write('    args_instance[SER_PORT] = PORT_HUB_' + idx + '\n')
-            TEST.write('    args_instance[SER_SERVERID_PROP] = SERVERID_HUB_' + idx + '\n')
-            TEST.write('    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX\n')
-            TEST.write('    args_hub = args_instance.copy()\n')
-            TEST.write('    hub' + idx + '.allocate(args_hub)\n')
-            TEST.write('    instance_hub' + idx + ' = hub' + idx + '.exists()\n')
-            TEST.write('    if instance_hub' + idx + ':\n')
-            TEST.write('        hub' + idx + '.delete()\n')
-            TEST.write('    hub' + idx + '.create()\n')
-            TEST.write('    hub' + idx + '.open()\n')
-            TEST.write('    hub' + idx + '.replica.enableReplication(suffix=SUFFIX, ' +
-                                            'role=REPLICAROLE_HUB, ' +
-                                            'replicaId=REPLICAID_HUB_' + idx + ')\n\n')
-
-        for idx in range(consumers):
-            idx = str(idx + 1)
-            TEST.write('    # Creating consumer ' + idx + '...\n')
-            TEST.write('    consumer' + idx + ' = DirSrv(verbose=False)\n')
-            TEST.write('    args_instance[SER_HOST] = HOST_CONSUMER_' + idx + '\n')
-            TEST.write('    args_instance[SER_PORT] = PORT_CONSUMER_' + idx + '\n')
-            TEST.write('    args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_' + idx + '\n')
-            TEST.write('    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX\n')
-            TEST.write('    args_consumer = args_instance.copy()\n')
-            TEST.write('    consumer' + idx + '.allocate(args_consumer)\n')
-            TEST.write('    instance_consumer' + idx + ' = consumer' + idx + '.exists()\n')
-            TEST.write('    if instance_consumer' + idx + ':\n')
-            TEST.write('        consumer' + idx + '.delete()\n')
-            TEST.write('    consumer' + idx + '.create()\n')
-            TEST.write('    consumer' + idx + '.open()\n')
-            TEST.write('    consumer' + idx + '.replica.enableReplication(suffix=SUFFIX, ' +
-                                            'role=REPLICAROLE_CONSUMER, ' +
-                                            'replicaId=CONSUMER_REPLICAID)\n\n')
-
-        #
-        # Create the master agreements
-        #
-        TEST.write('    #\n')
-        TEST.write('    # Create all the agreements\n')
-        TEST.write('    #\n')
-        agmt_count = 0
-        for idx in range(masters):
-            master_idx = idx + 1
-            for idx in range(masters):
-                #
-                # Create agreements with the other masters (master -> master)
-                #
-                idx += 1
-                if master_idx == idx:
-                    # skip ourselves
-                    continue
-                TEST.write('    # Creating agreement from master ' + str(master_idx) + ' to master ' + str(idx) + '\n')
-                TEST.write("    properties = {RA_NAME:      r'meTo_$host:$port',\n")
-                TEST.write("                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],\n")
-                TEST.write("                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],\n")
-                TEST.write("                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],\n")
-                TEST.write("                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}\n")
-                TEST.write('    m' + str(idx) + '_agmt = master' + str(master_idx) +
-                            '.agreement.create(suffix=SUFFIX, host=master' +
-                            str(idx) + '.host, port=master' + str(idx) + '.port, properties=properties)\n')
-                TEST.write('    if not m' + str(idx) + '_agmt:\n')
-                TEST.write('        log.fatal("Fail to create a master -> master replica agreement")\n')
-                TEST.write('        sys.exit(1)\n')
-                TEST.write('    log.debug("%s created" % m' + str(idx) + '_agmt)\n\n')
-                agmt_count += 1
-
-            for idx in range(hubs):
-                idx += 1
-                #
-                # Create agreements from each master to each hub (master -> hub)
-                #
-                TEST.write('    # Creating agreement from master ' + str(master_idx) + ' to hub ' + str(idx) + '\n')
-                TEST.write("    properties = {RA_NAME:      r'meTo_$host:$port',\n")
-                TEST.write("                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],\n")
-                TEST.write("                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],\n")
-                TEST.write("                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],\n")
-                TEST.write("                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}\n")
-                TEST.write('    h' + str(idx) + '_agmt = master' + str(master_idx) +
-                            '.agreement.create(suffix=SUFFIX, host=hub' +
-                            str(idx) + '.host, port=hub' + str(idx) + '.port, properties=properties)\n')
-                TEST.write('    if not h' + str(idx) + '_agmt:\n')
-                TEST.write('        log.fatal("Fail to create a master -> hub replica agreement")\n')
-                TEST.write('        sys.exit(1)\n')
-                TEST.write('    log.debug("%s created" % h' + str(idx) + '_agmt)\n\n')
-                agmt_count += 1
-
-        #
-        # Create the hub agreements
-        #
-        for idx in range(hubs):
-            hub_idx = idx + 1
-            #
-            # Add agreements from each hub to each consumer (hub -> consumer)
-            #
-            for idx in range(consumers):
-                idx += 1
-                #
-                # Create agreements from each hub to each consumer
-                #
-                TEST.write('    # Creating agreement from hub ' + str(hub_idx) + ' to consumer ' + str(idx) + '\n')
-                TEST.write("    properties = {RA_NAME:      r'meTo_$host:$port',\n")
-                TEST.write("                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],\n")
-                TEST.write("                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],\n")
-                TEST.write("                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],\n")
-                TEST.write("                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}\n")
-                TEST.write('    c' + str(idx) + '_agmt = hub' +
-                            str(hub_idx) + '.agreement.create(suffix=SUFFIX, host=consumer' +
-                            str(idx) + '.host, port=consumer' + str(idx) + '.port, properties=properties)\n')
-                TEST.write('    if not c' + str(idx) + '_agmt:\n')
-                TEST.write('        log.fatal("Fail to create a hub -> consumer replica agreement")\n')
-                TEST.write('        sys.exit(1)\n')
-                TEST.write('    log.debug("%s created" % c' + str(idx) + '_agmt)\n\n')
-                agmt_count += 1
-
-        if hubs == 0:
-            #
-            # No Hubs, see if there are any consumers to create agreements to...
-            #
-            for idx in range(masters):
-                master_idx = idx + 1
-                #
-                # Create agreements with the consumers (master -> consumer)
-                #
-                for idx in range(consumers):
-                    idx += 1
-                    #
-                    # Create agreements from each master to each consumer
-                    #
-                    TEST.write('    # Creating agreement fro master ' + str(master_idx) +
-                               ' to consumer ' + str(idx) + '\n')
-                    TEST.write("    properties = {RA_NAME:      r'meTo_$host:$port',\n")
-                    TEST.write("                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],\n")
-                    TEST.write("                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],\n")
-                    TEST.write("                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],\n")
-                    TEST.write("                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}\n")
-                    TEST.write('    c' + str(idx) + '_agmt = master' + str(master_idx) +
-                                '.agreement.create(suffix=SUFFIX, host=consumer' +
-                                str(idx) + '.host, port=consumer' + str(idx) +
-                                '.port, properties=properties)\n')
-                    TEST.write('    if not c' + str(idx) + '_agmt:\n')
-                    TEST.write('        log.fatal("Fail to create a hub -> consumer replica agreement")\n')
-                    TEST.write('        sys.exit(1)\n')
-                    TEST.write('    log.debug("%s created" % c' + str(idx) + '_agmt)\n\n')
-                    agmt_count += 1
-
-        #
-        # Write the replication initializations
-        #
-        TEST.write('    #\n')
-        TEST.write('    # Initialize all the agreements\n')
-        TEST.write('    #\n')
-
-        # Masters
-        for idx in range(masters):
-            idx += 1
-            if idx == 1:
-                continue
-            TEST.write('    master1.agreement.init(SUFFIX, HOST_MASTER_' +
-                       str(idx) + ', PORT_MASTER_' + str(idx) + ')\n')
-            TEST.write('    master1.waitForReplInit(m' + str(idx) + '_agmt)\n')
-
-        # Hubs
-        consumers_inited = False
-        for idx in range(hubs):
-            idx += 1
-            TEST.write('    master1.agreement.init(SUFFIX, HOST_HUB_' +
-                   str(idx) + ', PORT_HUB_' + str(idx) + ')\n')
-            TEST.write('    master1.waitForReplInit(h' + str(idx) + '_agmt)\n')
-            for idx in range(consumers):
-                if consumers_inited:
-                    continue
-                idx += 1
-                TEST.write('    hub1.agreement.init(SUFFIX, HOST_CONSUMER_' +
-                           str(idx) + ', PORT_CONSUMER_' + str(idx) + ')\n')
-                TEST.write('    hub1.waitForReplInit(c' + str(idx) + '_agmt)\n')
-            consumers_inited = True
-
-        # Consumers (master -> consumer)
-        if hubs == 0:
-            for idx in range(consumers):
-                idx += 1
-                TEST.write('    master1.agreement.init(SUFFIX, HOST_CONSUMER_' +
-                           str(idx) + ', PORT_CONSUMER_' + str(idx) + ')\n')
-                TEST.write('    master1.waitForReplInit(c' + str(idx) + '_agmt)\n')
-
-        TEST.write('\n')
-
-        #
-        # Write replicaton check
-        #
-        if agmt_count > 0:
-            # Find the lowest replica type in the deployment(consumer -> master)
-            if consumers > 0:
-                replica = 'consumer1'
-            elif hubs > 0:
-                replica = 'hub1'
-            else:
-                replica = 'master2'
-            TEST.write('    # Check replication is working...\n')
-            TEST.write('    if master1.testReplication(DEFAULT_SUFFIX, ' + replica + '):\n')
-            TEST.write("        log.info('Replication is working.')\n")
-            TEST.write('    else:\n')
-            TEST.write("        log.fatal('Replication is not working.')\n")
-            TEST.write('        assert False\n')
-            TEST.write('\n')
-
-        #
-        # Write the finals steps for replication
-        #
-        TEST.write('    # Clear out the tmp dir\n')
-        TEST.write('    master1.clearTmpDir(__file__)\n\n')
-        TEST.write('    return TopologyReplication(master1')
-        for idx in range(masters):
-            idx += 1
-            if idx == 1:
-                continue
-            TEST.write(', master' + str(idx))
-        for idx in range(hubs):
-            TEST.write(', hub' + str(idx + 1))
-        for idx in range(consumers):
-            TEST.write(', consumer' + str(idx + 1))
-        TEST.write(')\n')
-    else:
-        #
-        # Standalone servers
-        #
-
-        # Args for the standalone instance
-        for idx in range(instances):
-            idx += 1
-            if idx == 1:
-                idx = ''
-            else:
-                idx = str(idx)
-            TEST.write('    # Creating standalone instance ' + idx + '...\n')
-            TEST.write('    standalone' + idx + ' = DirSrv(verbose=False)\n')
-            TEST.write('    args_instance[SER_HOST] = HOST_STANDALONE' + idx + '\n')
-            TEST.write('    args_instance[SER_PORT] = PORT_STANDALONE' + idx + '\n')
-            TEST.write('    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE' + idx + '\n')
-            TEST.write('    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX\n')
-            TEST.write('    args_standalone' + idx + ' = args_instance.copy()\n')
-            TEST.write('    standalone' + idx + '.allocate(args_standalone' + idx + ')\n')
-
-            # Get the status of the instance and restart it if it exists
-            TEST.write('    instance_standalone' + idx + ' = standalone' + idx + '.exists()\n')
-
-            # Remove the instance
-            TEST.write('    if instance_standalone' + idx + ':\n')
-            TEST.write('        standalone' + idx + '.delete()\n')
-
-            # Create and open the instance
-            TEST.write('    standalone' + idx + '.create()\n')
-            TEST.write('    standalone' + idx + '.open()\n\n')
-
-        TEST.write('    # Clear out the tmp dir\n')
-        TEST.write('    standalone.clearTmpDir(__file__)\n')
-        TEST.write('\n')
-        TEST.write('    return TopologyStandalone(standalone')
-        for idx in range(instances):
-            idx += 1
-            if idx == 1:
-                continue
-            TEST.write(', standalone' + str(idx))
-        TEST.write(')\n')
-
-    TEST.write('\n\n')
-
-    #
-    # Write the test function
-    #
-    TEST.write('def test_ticket' + ticket + '(topology):\n')
-    TEST.write("    '''\n")
-    if repl_deployment:
-        TEST.write('    Write your replication testcase here.\n\n')
-        TEST.write('    To access each DirSrv instance use:  topology.master1, topology.master2,\n' +
-                   '        ..., topology.hub1, ..., topology.consumer1, ...\n')
-    else:
-        TEST.write('    Write your testcase here...\n')
-
-    TEST.write("    '''\n\n")
-    TEST.write("    log.info('Test complete')\n")
-    TEST.write("\n\n")
-
-    #
-    # Write the final function here - delete each instance
-    #
-    TEST.write('def test_ticket' + ticket + '_final(topology):\n')
-    if repl_deployment:
-        for idx in range(masters):
-            idx += 1
-            TEST.write('    topology.master' + str(idx) + '.delete()\n')
-        for idx in range(hubs):
-            idx += 1
-            TEST.write('    topology.hub' + str(idx) + '.delete()\n')
-        for idx in range(consumers):
-            idx += 1
-            TEST.write('    topology.consumer' + str(idx) + '.delete()\n')
-    else:
-        for idx in range(instances):
-            idx += 1
-            if idx == 1:
-                idx = ''
-            else:
-                idx = str(idx)
-            TEST.write('    topology.standalone' + idx + '.delete()\n')
-
-    TEST.write("    log.info('Testcase PASSED')\n")
-    TEST.write('\n\n')
-
-    #
-    # Write the main function
-    #
-    TEST.write('def run_isolated():\n')
-    TEST.write('    global installation1_prefix\n')
-    TEST.write('    installation1_prefix = None\n\n')
-    TEST.write('    topo = topology(True)\n')
-    TEST.write('    test_ticket' + ticket + '(topo)\n')
-    TEST.write('    test_ticket' + ticket + '_final(topo)\n')
-    TEST.write('\n\n')
-
-    TEST.write("if __name__ == '__main__':\n")
-    TEST.write('    run_isolated()\n\n')
-
-    #
-    # Done, close things up
-    #
-    TEST.close()
-    print('Created: ' + filename)
diff --git a/dirsrvtests/tickets/ticket365_test.py b/dirsrvtests/tickets/ticket365_test.py
new file mode 100644
index 0000000..6c2db2d
--- /dev/null
+++ b/dirsrvtests/tickets/ticket365_test.py
@@ -0,0 +1,161 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+installation1_prefix = None
+
+
+class TopologyStandalone(object):
+    def __init__(self, standalone):
+        standalone.open()
+        self.standalone = standalone
+
+
+ at pytest.fixture(scope="module")
+def topology(request):
+    global installation1_prefix
+    if installation1_prefix:
+        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+
+    # Creating standalone instance ...
+    standalone = DirSrv(verbose=False)
+    args_instance[SER_HOST] = HOST_STANDALONE
+    args_instance[SER_PORT] = PORT_STANDALONE
+    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+    args_standalone = args_instance.copy()
+    standalone.allocate(args_standalone)
+    instance_standalone = standalone.exists()
+    if instance_standalone:
+        standalone.delete()
+    standalone.create()
+    standalone.open()
+
+    # Clear out the tmp dir
+    standalone.clearTmpDir(__file__)
+
+    return TopologyStandalone(standalone)
+
+
+def test_ticket365(topology):
+    '''
+    Write your testcase here...
+
+    nsslapd-auditlog-logging-hide-unhashed-pw
+
+    and test
+
+    nsslapd-unhashed-pw-switch ticket 561
+
+    on, off, nolog?
+    '''
+
+    USER_DN = 'uid=test_entry,' + DEFAULT_SUFFIX
+
+    #
+    # Add the test entry
+    #
+    try:
+        topology.standalone.add_s(Entry((USER_DN, {
+                          'objectclass': 'top extensibleObject'.split(),
+                          'uid': 'test_entry',
+                          'userpassword': 'password'
+                          })))
+    except ldap.LDAPError, e:
+        log.error('Failed to add test user: error ' + e.message['desc'])
+        assert False
+
+    #
+    # Enable the audit log
+    #
+    try:
+        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-auditlog-logging-enabled', 'on')])
+    except ldap.LDAPError, e:
+        log.fatal('Failed to enable audit log, error: ' + e.message['desc'])
+        assert False
+    '''
+    try:
+        ent = topology.standalone.getEntry(DN_CONFIG, attrlist=[
+                    'nsslapd-instancedir',
+                    'nsslapd-errorlog',
+                    'nsslapd-accesslog',
+                    'nsslapd-certdir',
+                    'nsslapd-schemadir'])
+    '''
+    #
+    # Allow the unhashed password to be written to audit log
+    #
+    try:
+        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
+                                     'nsslapd-auditlog-logging-hide-unhashed-pw', 'off')])
+    except ldap.LDAPError, e:
+        log.fatal('Failed to enable writing unhashed password to audit log, error: ' + e.message['desc'])
+        assert False
+
+    #
+    # Set new password, and check the audit log
+    #
+    try:
+        topology.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'userpassword', 'mypassword')])
+    except ldap.LDAPError, e:
+        log.fatal('Failed to enable writing unhashed password to audit log, error: ' + e.message['desc'])
+        assert False
+
+    # Check audit log
+    if not topology.standalone.searchAuditLog('unhashed#user#password: mypassword'):
+        log.fatal('failed to find unhashed password in auditlog')
+        assert False
+
+    #
+    # Hide unhashed password in audit log
+    #
+    try:
+        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-auditlog-logging-hide-unhashed-pw', 'on')])
+    except ldap.LDAPError, e:
+        log.fatal('Failed to deny writing unhashed password to audit log, error: ' + e.message['desc'])
+        assert False
+    log.info('Test complete')
+
+    #
+    # Modify password, and check the audit log
+    #
+    try:
+        topology.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'userpassword', 'hidepassword')])
+    except ldap.LDAPError, e:
+        log.fatal('Failed to enable writing unhashed password to audit log, error: ' + e.message['desc'])
+        assert False
+
+    # Check audit log
+    if topology.standalone.searchAuditLog('unhashed#user#password: hidepassword'):
+        log.fatal('Found unhashed password in auditlog')
+        assert False
+
+
+def test_ticket365_final(topology):
+    topology.standalone.delete()
+    log.info('Testcase PASSED')
+
+
+def run_isolated():
+    global installation1_prefix
+    installation1_prefix = None
+
+    topo = topology(True)
+    test_ticket365(topo)
+    test_ticket365_final(topo)
+
+
+if __name__ == '__main__':
+    run_isolated()
+
diff --git a/dirsrvtests/tickets/ticket47384_test.py b/dirsrvtests/tickets/ticket47384_test.py
new file mode 100644
index 0000000..7325e18
--- /dev/null
+++ b/dirsrvtests/tickets/ticket47384_test.py
@@ -0,0 +1,159 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import pytest
+import shutil
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+installation1_prefix = None
+
+
+class TopologyStandalone(object):
+    def __init__(self, standalone):
+        standalone.open()
+        self.standalone = standalone
+
+
+ at pytest.fixture(scope="module")
+def topology(request):
+    global installation1_prefix
+    if installation1_prefix:
+        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+
+    # Creating standalone instance ...
+    standalone = DirSrv(verbose=False)
+    args_instance[SER_HOST] = HOST_STANDALONE
+    args_instance[SER_PORT] = PORT_STANDALONE
+    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+    args_standalone = args_instance.copy()
+    standalone.allocate(args_standalone)
+    instance_standalone = standalone.exists()
+    if instance_standalone:
+        standalone.delete()
+    standalone.create()
+    standalone.open()
+
+    # Clear out the tmp dir
+    standalone.clearTmpDir(__file__)
+
+    return TopologyStandalone(standalone)
+
+
+def test_ticket47384(topology):
+    '''
+    Test pluginpath validation: relative and absolute paths
+
+    With the inclusion of ticket 47601 - we do allow plugin paths
+    outside the default location
+    '''
+    PLUGIN_DN = 'cn=%s,cn=plugins,cn=config' % PLUGIN_WHOAMI
+    tmp_dir = topology.standalone.getDir(__file__, TMP_DIR)
+    plugin_dir = get_plugin_dir(topology.standalone.prefix)
+
+    # Copy the library to our tmp directory
+    try:
+        shutil.copy('%s/libwhoami-plugin.so' % plugin_dir, tmp_dir)
+    except IOError, e:
+        log.fatal('Failed to copy libwhoami-plugin.so to the tmp directory, error: '
+                  + e.strerror)
+        assert False
+    try:
+        shutil.copy('%s/libwhoami-plugin.la' % plugin_dir, tmp_dir)
+    except IOError, e:
+        log.fatal('Failed to copy libwhoami-plugin.la to the tmp directory, error: '
+                  + e.strerror)
+        assert False
+
+    #
+    # Test adding valid plugin paths
+    #
+    # Try using the absolute path to the current library
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
+                                     'nsslapd-pluginPath', '%s/libwhoami-plugin' % plugin_dir)])
+    except ldap.LDAPError, e:
+        log.error('Failed to set valid plugin path (%s): error (%s)' %
+                  ('%s/libwhoami-plugin' % plugin_dir, e.message['desc']))
+        assert False
+
+    # Try using new remote location
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
+                                     'nsslapd-pluginPath', '%s/libwhoami-plugin' % tmp_dir)])
+    except ldap.LDAPError, e:
+        log.error('Failed to set valid plugin path (%s): error (%s)' %
+                  ('%s/libwhoami-plugin' % tmp_dir, e.message['desc']))
+        assert False
+
+    # Set plugin path back to the default
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
+                                     'nsslapd-pluginPath', 'libwhoami-plugin')])
+    except ldap.LDAPError, e:
+        log.error('Failed to set valid relative plugin path (%s): error (%s)' %
+                  ('libwhoami-plugin' % tmp_dir, e.message['desc']))
+        assert False
+
+    #
+    # Test invalid path (no library present)
+    #
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
+                                     'nsslapd-pluginPath', '/bin/libwhoami-plugin')])
+        # No exception?! This is an error
+        log.error('Invalid plugin path was incorrectly accepted by the server!')
+        assert False
+    except ldap.UNWILLING_TO_PERFORM:
+        # Correct, operation should be rejected
+        pass
+    except ldap.LDAPError, e:
+        log.error('Failed to set invalid plugin path (%s): error (%s)' %
+                  ('/bin/libwhoami-plugin', e.message['desc']))
+
+    #
+    # Test invalid relative path (no library present)
+    #
+    try:
+        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
+                                     'nsslapd-pluginPath', '../libwhoami-plugin')])
+        # No exception?! This is an error
+        log.error('Invalid plugin path was incorrectly accepted by the server!')
+        assert False
+    except ldap.UNWILLING_TO_PERFORM:
+        # Correct, operation should be rejected
+        pass
+    except ldap.LDAPError, e:
+        log.error('Failed to set invalid plugin path (%s): error (%s)' %
+                  ('../libwhoami-plugin', e.message['desc']))
+
+    log.info('Test complete')
+
+
+def test_ticket47384_final(topology):
+    topology.standalone.delete()
+    log.info('Testcase PASSED')
+
+
+def run_isolated():
+    global installation1_prefix
+    installation1_prefix = None
+
+    topo = topology(True)
+    test_ticket47384(topo)
+    test_ticket47384_final(topo)
+
+
+if __name__ == '__main__':
+    run_isolated()
+
diff --git a/dirsrvtests/tickets/ticket47953_test.py b/dirsrvtests/tickets/ticket47953_test.py
index 4e1ec60..77497a4 100644
--- a/dirsrvtests/tickets/ticket47953_test.py
+++ b/dirsrvtests/tickets/ticket47953_test.py
@@ -72,7 +72,7 @@ def test_ticket47953(topology):
     #
     # Import an invalid ldif
     #
-    ldif_file = topology.standalone.getDir(__file__, DATA_DIR) + "ticket47953.ldif"
+    ldif_file = topology.standalone.getDir(__file__, DATA_DIR) + "ticket47953/ticket47953.ldif"
     importTask = Tasks(topology.standalone)
     args = {TASK_WAIT: True}
     try:




More information about the 389-commits mailing list