dirsrvtests/create_test.py dirsrvtests/tests
by Simon Pichugin
dirsrvtests/create_test.py | 890 ++++------
dirsrvtests/tests/suites/acct_usability_plugin/acct_usability_test.py | 84
dirsrvtests/tests/suites/acctpolicy_plugin/accpol_test.py | 136 -
dirsrvtests/tests/suites/acctpolicy_plugin/acctpolicy_test.py | 81
dirsrvtests/tests/suites/acl/acl_test.py | 863 ++++-----
dirsrvtests/tests/suites/attr_encryption/attr_encrypt_test.py | 84
dirsrvtests/tests/suites/attr_uniqueness_plugin/attr_uniqueness_test.py | 153 -
dirsrvtests/tests/suites/automember_plugin/automember_test.py | 84
dirsrvtests/tests/suites/basic/basic_test.py | 318 +--
dirsrvtests/tests/suites/betxns/betxn_test.py | 111 -
dirsrvtests/tests/suites/chaining_plugin/chaining_test.py | 81
dirsrvtests/tests/suites/clu/clu_test.py | 49
dirsrvtests/tests/suites/clu/db2ldif_test.py | 72
dirsrvtests/tests/suites/collation_plugin/collatation_test.py | 81
dirsrvtests/tests/suites/config/config_test.py | 246 --
dirsrvtests/tests/suites/cos_plugin/cos_test.py | 81
dirsrvtests/tests/suites/deref_plugin/deref_test.py | 81
dirsrvtests/tests/suites/disk_monitoring/disk_monitor_test.py | 81
dirsrvtests/tests/suites/distrib_plugin/distrib_test.py | 81
dirsrvtests/tests/suites/dna_plugin/dna_test.py | 124 -
dirsrvtests/tests/suites/ds_logs/ds_logs_test.py | 81
dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py | 2
dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py | 2
dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py | 191 --
dirsrvtests/tests/suites/filter/filter_test.py | 57
dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py | 123 -
dirsrvtests/tests/suites/get_effective_rights/ger_test.py | 81
dirsrvtests/tests/suites/gssapi_repl/gssapi_repl_test.py | 130 -
dirsrvtests/tests/suites/ldapi/ldapi_test.py | 81
dirsrvtests/tests/suites/linkedattrs_plugin/linked_attrs_test.py | 81
dirsrvtests/tests/suites/mapping_tree/mapping_tree_test.py | 81
dirsrvtests/tests/suites/memberof_plugin/memberof_test.py | 139 -
dirsrvtests/tests/suites/memory_leaks/range_search_test.py | 98 -
dirsrvtests/tests/suites/monitor/monitor_test.py | 81
dirsrvtests/tests/suites/paged_results/paged_results_test.py | 632 +++----
dirsrvtests/tests/suites/pam_passthru_plugin/pam_test.py | 81
dirsrvtests/tests/suites/passthru_plugin/passthru_test.py | 81
dirsrvtests/tests/suites/password/password_test.py | 70
dirsrvtests/tests/suites/password/pwdAdmin_test.py | 143 -
dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py | 217 +-
dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py | 199 --
dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py | 95 -
dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py | 234 +-
dirsrvtests/tests/suites/password/pwd_algo_test.py | 114 -
dirsrvtests/tests/suites/password/pwp_history_test.py | 141 -
dirsrvtests/tests/suites/posix_winsync_plugin/posix_winsync_test.py | 81
dirsrvtests/tests/suites/psearch/psearch_test.py | 81
dirsrvtests/tests/suites/referint_plugin/referint_test.py | 81
dirsrvtests/tests/suites/replication/cleanallruv_test.py | 684 ++-----
dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py | 197 --
dirsrvtests/tests/suites/replsync_plugin/repl_sync_test.py | 81
dirsrvtests/tests/suites/resource_limits/res_limits_test.py | 81
dirsrvtests/tests/suites/retrocl_plugin/retrocl_test.py | 81
dirsrvtests/tests/suites/reverpwd_plugin/reverpwd_test.py | 81
dirsrvtests/tests/suites/roles_plugin/roles_test.py | 81
dirsrvtests/tests/suites/rootdn_plugin/rootdn_plugin_test.py | 188 --
dirsrvtests/tests/suites/sasl/sasl_test.py | 81
dirsrvtests/tests/suites/schema/test_schema.py | 42
dirsrvtests/tests/suites/schema_reload_plugin/schema_reload_test.py | 81
dirsrvtests/tests/suites/snmp/snmp_test.py | 81
dirsrvtests/tests/suites/ssl/ssl_test.py | 81
dirsrvtests/tests/suites/syntax_plugin/syntax_test.py | 81
dirsrvtests/tests/suites/usn_plugin/usn_test.py | 81
dirsrvtests/tests/suites/views_plugin/views_test.py | 81
dirsrvtests/tests/suites/vlv/vlv_test.py | 81
dirsrvtests/tests/suites/whoami_plugin/whoami_test.py | 81
dirsrvtests/tests/tickets/finalizer.py | 2
dirsrvtests/tests/tickets/ticket47313_test.py | 2
dirsrvtests/tests/tickets/ticket47384_test.py | 2
dirsrvtests/tests/tickets/ticket47431_test.py | 2
dirsrvtests/tests/tickets/ticket47462_test.py | 2
dirsrvtests/tests/tickets/ticket47490_test.py | 2
dirsrvtests/tests/tickets/ticket47553_test.py | 2
dirsrvtests/tests/tickets/ticket47560_test.py | 2
dirsrvtests/tests/tickets/ticket47573_test.py | 2
dirsrvtests/tests/tickets/ticket47619_test.py | 2
dirsrvtests/tests/tickets/ticket47640_test.py | 2
dirsrvtests/tests/tickets/ticket47653MMR_test.py | 2
dirsrvtests/tests/tickets/ticket47653_test.py | 2
dirsrvtests/tests/tickets/ticket47669_test.py | 2
dirsrvtests/tests/tickets/ticket47676_test.py | 2
dirsrvtests/tests/tickets/ticket47714_test.py | 2
dirsrvtests/tests/tickets/ticket47721_test.py | 2
dirsrvtests/tests/tickets/ticket47781_test.py | 2
dirsrvtests/tests/tickets/ticket47787_test.py | 2
dirsrvtests/tests/tickets/ticket47808_test.py | 2
dirsrvtests/tests/tickets/ticket47815_test.py | 2
dirsrvtests/tests/tickets/ticket47819_test.py | 2
dirsrvtests/tests/tickets/ticket47823_test.py | 2
dirsrvtests/tests/tickets/ticket47828_test.py | 2
dirsrvtests/tests/tickets/ticket47829_test.py | 2
dirsrvtests/tests/tickets/ticket47833_test.py | 2
dirsrvtests/tests/tickets/ticket47838_test.py | 2
dirsrvtests/tests/tickets/ticket47869MMR_test.py | 2
dirsrvtests/tests/tickets/ticket47871_test.py | 2
dirsrvtests/tests/tickets/ticket47900_test.py | 2
dirsrvtests/tests/tickets/ticket47910_test.py | 2
dirsrvtests/tests/tickets/ticket47920_test.py | 2
dirsrvtests/tests/tickets/ticket47921_test.py | 2
dirsrvtests/tests/tickets/ticket47927_test.py | 2
dirsrvtests/tests/tickets/ticket47937_test.py | 2
dirsrvtests/tests/tickets/ticket47950_test.py | 2
dirsrvtests/tests/tickets/ticket47953_test.py | 2
dirsrvtests/tests/tickets/ticket47963_test.py | 2
dirsrvtests/tests/tickets/ticket47966_test.py | 2
dirsrvtests/tests/tickets/ticket47970_test.py | 2
dirsrvtests/tests/tickets/ticket47973_test.py | 2
dirsrvtests/tests/tickets/ticket47980_test.py | 2
dirsrvtests/tests/tickets/ticket47981_test.py | 2
dirsrvtests/tests/tickets/ticket47988_test.py | 2
dirsrvtests/tests/tickets/ticket48005_test.py | 2
dirsrvtests/tests/tickets/ticket48026_test.py | 2
dirsrvtests/tests/tickets/ticket48109_test.py | 2
dirsrvtests/tests/tickets/ticket48170_test.py | 2
dirsrvtests/tests/tickets/ticket48226_test.py | 2
dirsrvtests/tests/tickets/ticket48228_test.py | 2
dirsrvtests/tests/tickets/ticket48252_test.py | 2
dirsrvtests/tests/tickets/ticket48265_test.py | 2
dirsrvtests/tests/tickets/ticket48294_test.py | 2
dirsrvtests/tests/tickets/ticket48295_test.py | 2
dirsrvtests/tests/tickets/ticket48366_test.py | 2
dirsrvtests/tests/tickets/ticket48759_test.py | 2
dirsrvtests/tests/tickets/ticket48891_test.py | 2
dirsrvtests/tests/tickets/ticket48906_test.py | 2
dirsrvtests/tests/tickets/ticket548_test.py | 2
125 files changed, 2575 insertions(+), 7047 deletions(-)
New commits:
commit 82030d144e4a691beb1b3ce159d5ce1dc8095695
Author: Simon Pichugin <spichugi(a)redhat.com>
Date: Fri Nov 18 10:17:27 2016 +0100
Ticket 49055 - Clean up test suites
Description: Add all topology fixture imports
for all suites and refactor them accordingly.
Fix PEP8 and some logic issues.
Set 2016 date in licenses.
Fix create_test.py according to the new changes plus PEP8.
For now we have:
- topology_st - with topology_st.standalone inst;
- topology_m2 and topology_m4 - with dicts
topology_mN.ms["masterX"]
topology_mN.ms["masterX_agmts"]["mX_mY"]
Using this setup we can easily run through masters
and agreements in loops.
https://fedorahosted.org/389/ticket/49055
Reviewed by: wibrown (Thanks!)
diff --git a/dirsrvtests/create_test.py b/dirsrvtests/create_test.py
index 3898279..0eb3b01 100755
--- a/dirsrvtests/create_test.py
+++ b/dirsrvtests/create_test.py
@@ -13,15 +13,16 @@ import optparse
"""This script generates a template test script that handles the
non-interesting parts of a test script:
-- topology,
-- test (to be completed by the user),
-- final,
-- and run-isolated function
+- topology fixture (only for tickets),
+ for suites we have predefined fixtures in lib389/topologies.py
+- test function (to be completed by the user),
+- run-isolated function
"""
def displayUsage():
"""Display the usage"""
+
print ('\nUsage:\ncreate_ticket.py -t|--ticket <ticket number> ' +
'-s|--suite <suite name> ' +
'[ i|--instances <number of standalone instances> ' +
@@ -33,6 +34,9 @@ def displayUsage():
'can add mulitple standalone instances(maximum 10). However, you' +
' can not mix "-i" with the replication options(-m, -h , -c). ' +
'There is a maximum of 10 masters, 10 hubs, and 10 consumers.')
+ print('If "-s|--suite" option was chosen, then no topology would be added ' +
+ 'to the test script. You can find predefined fixtures in the lib389/topologies.py ' +
+ 'and usem them or write new one if you have special case.')
exit(1)
@@ -41,6 +45,7 @@ def writeFinalizer():
def writeInstanceOp(action):
"""Write instance finializer action"""
+
if repl_deployment:
for idx in range(masters):
idx += 1
@@ -64,16 +69,15 @@ def writeFinalizer():
TEST.write(' standalone' + idx + '.' + action +
'()\n')
- TEST.write(' def fin():\n')
- TEST.write(' """')
- TEST.write('If we are debugging just stop the instances, ' +
- 'otherwise remove them\n')
- TEST.write(' """\n')
+ TEST.write('\n def fin():\n')
+ TEST.write(' """If we are debugging just stop the instances,\n' +
+ ' otherwise remove them\n')
+ TEST.write(' """\n\n')
TEST.write(' if DEBUGGING:\n')
writeInstanceOp('stop')
TEST.write(' else:\n')
writeInstanceOp('delete')
- TEST.write(' request.addfinalizer(fin)')
+ TEST.write('\n request.addfinalizer(fin)')
TEST.write('\n\n')
@@ -158,14 +162,11 @@ if len(sys.argv) > 0:
instances = int(args.inst)
filename = args.filename
- #
# Create/open the new test script file
- #
if not filename:
if ticket:
filename = 'ticket' + ticket + '_test.py'
else:
- # suite
filename = suite + '_test.py'
try:
@@ -174,9 +175,7 @@ if len(sys.argv) > 0:
print("Can\'t open file:", filename)
exit(1)
- #
# Write the imports
- #
TEST.write('import os\nimport sys\nimport time\nimport ldap\n' +
'import logging\nimport pytest\n')
TEST.write('from lib389 import DirSrv, Entry, tools, tasks\nfrom ' +
@@ -184,306 +183,238 @@ if len(sys.argv) > 0:
'import *\nfrom lib389.properties import *\n' +
'from lib389.tasks import *\nfrom lib389.utils import *\n\n')
- #
- # Set the logger and other settings
- #
- TEST.write('DEBUGGING = False\n\n')
- TEST.write('if DEBUGGING:\n')
- TEST.write(' logging.getLogger(__name__).setLevel(logging.DEBUG)\n')
- TEST.write('else:\n')
- TEST.write(' logging.getLogger(__name__).setLevel(logging.INFO)\n')
- TEST.write('log = logging.getLogger(__name__)\n\n\n')
-
- #
- # Write the replication or standalone classes
- #
- repl_deployment = False
- if masters + hubs + consumers > 0:
- #
- # Write the replication class
- #
- repl_deployment = True
-
- TEST.write('class TopologyReplication(object):\n')
- TEST.write(' """The Replication Topology Class"""\n')
- TEST.write(' def __init__(self')
- for idx in range(masters):
- TEST.write(', master' + str(idx + 1))
- for idx in range(hubs):
- TEST.write(', hub' + str(idx + 1))
- for idx in range(consumers):
- TEST.write(', consumer' + str(idx + 1))
- TEST.write('):\n')
- TEST.write(' """Init"""\n')
-
- for idx in range(masters):
- TEST.write(' master' + str(idx + 1) + '.open()\n')
- TEST.write(' self.master' + str(idx + 1) + ' = master' +
- str(idx + 1) + '\n')
- for idx in range(hubs):
- TEST.write(' hub' + str(idx + 1) + '.open()\n')
- TEST.write(' self.hub' + str(idx + 1) + ' = hub' +
- str(idx + 1) + '\n')
- for idx in range(consumers):
- TEST.write(' consumer' + str(idx + 1) + '.open()\n')
- TEST.write(' self.consumer' + str(idx + 1) + ' = consumer' +
- str(idx + 1) + '\n')
- TEST.write('\n\n')
- else:
- #
- # Write the standalone class
- #
- TEST.write('class TopologyStandalone(object):\n')
- TEST.write(' """The DS Topology Class"""\n')
- TEST.write(' def __init__(self')
- for idx in range(instances):
- idx += 1
- if idx == 1:
- idx = ''
- else:
- idx = str(idx)
- TEST.write(', standalone' + idx)
- TEST.write('):\n')
- TEST.write(' """Init"""\n')
- for idx in range(instances):
- idx += 1
- if idx == 1:
- idx = ''
- else:
- idx = str(idx)
- TEST.write(' standalone' + idx + '.open()\n')
- TEST.write(' self.standalone' + idx + ' = standalone' +
- idx + '\n')
- TEST.write('\n\n')
-
- #
- # Write the 'topology function'
- #
- TEST.write('@pytest.fixture(scope="module")\n')
- TEST.write('def topology(request):\n')
-
- if repl_deployment:
- #
- # Create the replication instances
- #
- TEST.write(' """Create Replication Deployment"""\n\n')
- for idx in range(masters):
- idx = str(idx + 1)
- TEST.write(' # Creating master ' + idx + '...\n')
- TEST.write(' if DEBUGGING:\n')
- TEST.write(' master' + idx + ' = DirSrv(verbose=True)\n')
- TEST.write(' else:\n')
- TEST.write(' master' + idx + ' = DirSrv(verbose=False)\n')
- TEST.write(' args_instance[SER_HOST] = HOST_MASTER_' + idx +
- '\n')
- TEST.write(' args_instance[SER_PORT] = PORT_MASTER_' + idx +
- '\n')
- TEST.write(' args_instance[SER_SERVERID_PROP] = ' +
- 'SERVERID_MASTER_' + idx + '\n')
- TEST.write(' args_instance[SER_CREATION_SUFFIX] = ' +
- 'DEFAULT_SUFFIX\n')
- TEST.write(' args_master = args_instance.copy()\n')
- TEST.write(' master' + idx + '.allocate(args_master)\n')
- TEST.write(' instance_master' + idx + ' = master' + idx +
- '.exists()\n')
- TEST.write(' if instance_master' + idx + ':\n')
- TEST.write(' master' + idx + '.delete()\n')
- TEST.write(' master' + idx + '.create()\n')
- TEST.write(' master' + idx + '.open()\n')
- TEST.write(' master' + idx + '.replica.enableReplication' +
- '(suffix=SUFFIX, role=REPLICAROLE_MASTER, ' +
- 'replicaId=REPLICAID_MASTER_' + idx + ')\n\n')
-
- for idx in range(hubs):
- idx = str(idx + 1)
- TEST.write(' # Creating hub ' + idx + '...\n')
- TEST.write(' if DEBUGGING:\n')
- TEST.write(' hub' + idx + ' = DirSrv(verbose=True)\n')
- TEST.write(' else:\n')
- TEST.write(' hub' + idx + ' = DirSrv(verbose=False)\n')
- TEST.write(' args_instance[SER_HOST] = HOST_HUB_' + idx + '\n')
- TEST.write(' args_instance[SER_PORT] = PORT_HUB_' + idx + '\n')
- TEST.write(' args_instance[SER_SERVERID_PROP] = SERVERID_HUB_' +
- idx + '\n')
- TEST.write(' args_instance[SER_CREATION_SUFFIX] = ' +
- 'DEFAULT_SUFFIX\n')
- TEST.write(' args_hub = args_instance.copy()\n')
- TEST.write(' hub' + idx + '.allocate(args_hub)\n')
- TEST.write(' instance_hub' + idx + ' = hub' + idx +
- '.exists()\n')
- TEST.write(' if instance_hub' + idx + ':\n')
- TEST.write(' hub' + idx + '.delete()\n')
- TEST.write(' hub' + idx + '.create()\n')
- TEST.write(' hub' + idx + '.open()\n')
- TEST.write(' hub' + idx + '.replica.enableReplication' +
- '(suffix=SUFFIX, role=REPLICAROLE_HUB, ' +
- 'replicaId=REPLICAID_HUB_' + idx + ')\n\n')
-
- for idx in range(consumers):
- idx = str(idx + 1)
- TEST.write(' # Creating consumer ' + idx + '...\n')
- TEST.write(' if DEBUGGING:\n')
- TEST.write(' consumer' + idx + ' = DirSrv(verbose=True)\n')
- TEST.write(' else:\n')
- TEST.write(' consumer' + idx + ' = DirSrv(verbose=False)\n')
- TEST.write(' args_instance[SER_HOST] = HOST_CONSUMER_' + idx +
- '\n')
- TEST.write(' args_instance[SER_PORT] = PORT_CONSUMER_' + idx +
- '\n')
- TEST.write(' args_instance[SER_SERVERID_PROP] = ' +
- 'SERVERID_CONSUMER_' + idx + '\n')
- TEST.write(' args_instance[SER_CREATION_SUFFIX] = ' +
- 'DEFAULT_SUFFIX\n')
- TEST.write(' args_consumer = args_instance.copy()\n')
- TEST.write(' consumer' + idx + '.allocate(args_consumer)\n')
- TEST.write(' instance_consumer' + idx + ' = consumer' + idx +
- '.exists()\n')
- TEST.write(' if instance_consumer' + idx + ':\n')
- TEST.write(' consumer' + idx + '.delete()\n')
- TEST.write(' consumer' + idx + '.create()\n')
- TEST.write(' consumer' + idx + '.open()\n')
- TEST.write(' consumer' + idx + '.replica.enableReplication' +
- '(suffix=SUFFIX, role=REPLICAROLE_CONSUMER, ' +
- 'replicaId=CONSUMER_REPLICAID)\n\n')
-
- writeFinalizer()
-
- #
- # Create the master agreements
- #
- TEST.write(' #\n')
- TEST.write(' # Create all the agreements\n')
- TEST.write(' #\n')
- agmt_count = 0
- for idx in range(masters):
- master_idx = idx + 1
+ # Add topology function for a ticket only.
+ # Suites have presetuped fixtures in lib389/topologies.py
+ if ticket:
+ TEST.write('DEBUGGING = False\n\n')
+ TEST.write('if DEBUGGING:\n')
+ TEST.write(' logging.getLogger(__name__).setLevel(logging.DEBUG)\n')
+ TEST.write('else:\n')
+ TEST.write(' logging.getLogger(__name__).setLevel(logging.INFO)\n')
+ TEST.write('log = logging.getLogger(__name__)\n\n\n')
+
+ # Write the replication or standalone classes
+ repl_deployment = False
+
+ if masters + hubs + consumers > 0:
+ repl_deployment = True
+
+ TEST.write('class TopologyReplication(object):\n')
+ TEST.write(' def __init__(self')
for idx in range(masters):
- #
- # Create agreements with the other masters (master -> master)
- #
- idx += 1
- if master_idx == idx:
- # skip ourselves
- continue
- TEST.write(' # Creating agreement from master ' +
- str(master_idx) + ' to master ' + str(idx) + '\n')
- TEST.write(" properties = {RA_NAME: " +
- "'meTo_' + master" + str(idx) +
- ".host + ':' + str(master" + str(idx) +
- ".port),\n")
- TEST.write(" RA_BINDDN: " +
- "defaultProperties[REPLICATION_BIND_DN],\n")
- TEST.write(" RA_BINDPW: " +
- "defaultProperties[REPLICATION_BIND_PW],\n")
- TEST.write(" RA_METHOD: " +
- "defaultProperties[REPLICATION_BIND_METHOD],\n")
- TEST.write(" RA_TRANSPORT_PROT: " +
- "defaultProperties[REPLICATION_TRANSPORT]}\n")
- TEST.write(' m' + str(master_idx) + '_m' + str(idx) +
- '_agmt = master' + str(master_idx) +
- '.agreement.create(suffix=SUFFIX, host=master' +
- str(idx) + '.host, port=master' + str(idx) +
- '.port, properties=properties)\n')
- TEST.write(' if not m' + str(master_idx) + '_m' + str(idx) +
- '_agmt:\n')
- TEST.write(' log.fatal("Fail to create a master -> ' +
- 'master replica agreement")\n')
- TEST.write(' sys.exit(1)\n')
- TEST.write(' log.debug("%s created" % m' + str(master_idx) +
- '_m' + str(idx) + '_agmt)\n\n')
- agmt_count += 1
+ TEST.write(', master' + str(idx + 1))
+ for idx in range(hubs):
+ TEST.write(', hub' + str(idx + 1))
+ for idx in range(consumers):
+ TEST.write(', consumer' + str(idx + 1))
+ TEST.write('):\n')
+ for idx in range(masters):
+ TEST.write(' master' + str(idx + 1) + '.open()\n')
+ TEST.write(' self.master' + str(idx + 1) + ' = master' +
+ str(idx + 1) + '\n')
for idx in range(hubs):
- idx += 1
- #
- # Create agmts from each master to each hub (master -> hub)
- #
- TEST.write(' # Creating agreement from master ' +
- str(master_idx) + ' to hub ' + str(idx) + '\n')
- TEST.write(" properties = {RA_NAME: " +
- "'meTo_' + hub" + str(idx) +
- ".host + ':' + str(hub" + str(idx) +
- ".port),\n")
- TEST.write(" RA_BINDDN: " +
- "defaultProperties[REPLICATION_BIND_DN],\n")
- TEST.write(" RA_BINDPW: " +
- "defaultProperties[REPLICATION_BIND_PW],\n")
- TEST.write(" RA_METHOD: " +
- "defaultProperties[REPLICATION_BIND_METHOD],\n")
- TEST.write(" RA_TRANSPORT_PROT: " +
- "defaultProperties[REPLICATION_TRANSPORT]}\n")
- TEST.write(' m' + str(master_idx) + '_h' + str(idx) +
- '_agmt = master' + str(master_idx) +
- '.agreement.create(suffix=SUFFIX, host=hub' +
- str(idx) + '.host, port=hub' + str(idx) +
- '.port, properties=properties)\n')
- TEST.write(' if not m' + str(master_idx) + '_h' + str(idx) +
- '_agmt:\n')
- TEST.write(' log.fatal("Fail to create a master -> ' +
- 'hub replica agreement")\n')
- TEST.write(' sys.exit(1)\n')
- TEST.write(' log.debug("%s created" % m' + str(master_idx) +
- '_h' + str(idx) + '_agmt)\n\n')
- agmt_count += 1
-
- #
- # Create the hub agreements
- #
- for idx in range(hubs):
- hub_idx = idx + 1
- #
- # Add agreements from each hub to each consumer (hub -> consumer)
- #
+ TEST.write(' hub' + str(idx + 1) + '.open()\n')
+ TEST.write(' self.hub' + str(idx + 1) + ' = hub' +
+ str(idx + 1) + '\n')
for idx in range(consumers):
+ TEST.write(' consumer' + str(idx + 1) + '.open()\n')
+ TEST.write(' self.consumer' + str(idx + 1) + ' = consumer' +
+ str(idx + 1) + '\n')
+ TEST.write('\n\n')
+ else:
+ TEST.write('class TopologyStandalone(object):\n')
+ TEST.write(' def __init__(self')
+ for idx in range(instances):
idx += 1
- #
- # Create agreements from each hub to each consumer
- #
- TEST.write(' # Creating agreement from hub ' + str(hub_idx)
- + ' to consumer ' + str(idx) + '\n')
- TEST.write(" properties = {RA_NAME: " +
- "'meTo_' + consumer" + str(idx) +
- ".host + ':' + str(consumer" + str(idx) +
- ".port),\n")
- TEST.write(" RA_BINDDN: " +
- "defaultProperties[REPLICATION_BIND_DN],\n")
- TEST.write(" RA_BINDPW: " +
- "defaultProperties[REPLICATION_BIND_PW],\n")
- TEST.write(" RA_METHOD: " +
- "defaultProperties[REPLICATION_BIND_METHOD],\n")
- TEST.write(" RA_TRANSPORT_PROT: " +
- "defaultProperties[REPLICATION_TRANSPORT]}\n")
- TEST.write(' h' + str(hub_idx) + '_c' + str(idx) +
- '_agmt = hub' + str(hub_idx) +
- '.agreement.create(suffix=SUFFIX, host=consumer' +
- str(idx) + '.host, port=consumer' + str(idx) +
- '.port, properties=properties)\n')
- TEST.write(' if not h' + str(hub_idx) + '_c' + str(idx) +
- '_agmt:\n')
- TEST.write(' log.fatal("Fail to create a hub -> ' +
- 'consumer replica agreement")\n')
- TEST.write(' sys.exit(1)\n')
- TEST.write(' log.debug("%s created" % h' + str(hub_idx) +
- '_c' + str(idx) + '_agmt)\n\n')
- agmt_count += 1
-
- if hubs == 0:
- #
- # No Hubs, see if there are any consumers to create agreements to
- #
+ if idx == 1:
+ idx = ''
+ else:
+ idx = str(idx)
+ TEST.write(', standalone' + idx)
+ TEST.write('):\n')
+
+ for idx in range(instances):
+ idx += 1
+ if idx == 1:
+ idx = ''
+ else:
+ idx = str(idx)
+ TEST.write(' standalone' + idx + '.open()\n')
+ TEST.write(' self.standalone' + idx + ' = standalone' +
+ idx + '\n')
+ TEST.write('\n\n')
+
+ # Write the 'topology function'
+ TEST.write('@pytest.fixture(scope="module")\n')
+ TEST.write('def topology(request):\n')
+
+ if repl_deployment:
+ TEST.write(' """Create Replication Deployment"""\n')
+ for idx in range(masters):
+ idx = str(idx + 1)
+ TEST.write('\n # Creating master ' + idx + '...\n')
+ TEST.write(' if DEBUGGING:\n')
+ TEST.write(' master' + idx + ' = DirSrv(verbose=True)\n')
+ TEST.write(' else:\n')
+ TEST.write(' master' + idx + ' = DirSrv(verbose=False)\n')
+ TEST.write(' args_instance[SER_HOST] = HOST_MASTER_' + idx +
+ '\n')
+ TEST.write(' args_instance[SER_PORT] = PORT_MASTER_' + idx +
+ '\n')
+ TEST.write(' args_instance[SER_SERVERID_PROP] = ' +
+ 'SERVERID_MASTER_' + idx + '\n')
+ TEST.write(' args_instance[SER_CREATION_SUFFIX] = ' +
+ 'DEFAULT_SUFFIX\n')
+ TEST.write(' args_master = args_instance.copy()\n')
+ TEST.write(' master' + idx + '.allocate(args_master)\n')
+ TEST.write(' instance_master' + idx + ' = master' + idx +
+ '.exists()\n')
+ TEST.write(' if instance_master' + idx + ':\n')
+ TEST.write(' master' + idx + '.delete()\n')
+ TEST.write(' master' + idx + '.create()\n')
+ TEST.write(' master' + idx + '.open()\n')
+ TEST.write(' master' + idx + '.replica.enableReplication' +
+ '(suffix=SUFFIX, role=REPLICAROLE_MASTER, ' +
+ 'replicaId=REPLICAID_MASTER_' + idx + ')\n')
+
+ for idx in range(hubs):
+ idx = str(idx + 1)
+ TEST.write('\n # Creating hub ' + idx + '...\n')
+ TEST.write(' if DEBUGGING:\n')
+ TEST.write(' hub' + idx + ' = DirSrv(verbose=True)\n')
+ TEST.write(' else:\n')
+ TEST.write(' hub' + idx + ' = DirSrv(verbose=False)\n')
+ TEST.write(' args_instance[SER_HOST] = HOST_HUB_' + idx + '\n')
+ TEST.write(' args_instance[SER_PORT] = PORT_HUB_' + idx + '\n')
+ TEST.write(' args_instance[SER_SERVERID_PROP] = SERVERID_HUB_' +
+ idx + '\n')
+ TEST.write(' args_instance[SER_CREATION_SUFFIX] = ' +
+ 'DEFAULT_SUFFIX\n')
+ TEST.write(' args_hub = args_instance.copy()\n')
+ TEST.write(' hub' + idx + '.allocate(args_hub)\n')
+ TEST.write(' instance_hub' + idx + ' = hub' + idx +
+ '.exists()\n')
+ TEST.write(' if instance_hub' + idx + ':\n')
+ TEST.write(' hub' + idx + '.delete()\n')
+ TEST.write(' hub' + idx + '.create()\n')
+ TEST.write(' hub' + idx + '.open()\n')
+ TEST.write(' hub' + idx + '.replica.enableReplication' +
+ '(suffix=SUFFIX, role=REPLICAROLE_HUB, ' +
+ 'replicaId=REPLICAID_HUB_' + idx + ')\n')
+
+ for idx in range(consumers):
+ idx = str(idx + 1)
+ TEST.write('\n # Creating consumer ' + idx + '...\n')
+ TEST.write(' if DEBUGGING:\n')
+ TEST.write(' consumer' + idx + ' = DirSrv(verbose=True)\n')
+ TEST.write(' else:\n')
+ TEST.write(' consumer' + idx + ' = DirSrv(verbose=False)\n')
+ TEST.write(' args_instance[SER_HOST] = HOST_CONSUMER_' + idx +
+ '\n')
+ TEST.write(' args_instance[SER_PORT] = PORT_CONSUMER_' + idx +
+ '\n')
+ TEST.write(' args_instance[SER_SERVERID_PROP] = ' +
+ 'SERVERID_CONSUMER_' + idx + '\n')
+ TEST.write(' args_instance[SER_CREATION_SUFFIX] = ' +
+ 'DEFAULT_SUFFIX\n')
+ TEST.write(' args_consumer = args_instance.copy()\n')
+ TEST.write(' consumer' + idx + '.allocate(args_consumer)\n')
+ TEST.write(' instance_consumer' + idx + ' = consumer' + idx +
+ '.exists()\n')
+ TEST.write(' if instance_consumer' + idx + ':\n')
+ TEST.write(' consumer' + idx + '.delete()\n')
+ TEST.write(' consumer' + idx + '.create()\n')
+ TEST.write(' consumer' + idx + '.open()\n')
+ TEST.write(' consumer' + idx + '.replica.enableReplication' +
+ '(suffix=SUFFIX, role=REPLICAROLE_CONSUMER, ' +
+ 'replicaId=CONSUMER_REPLICAID)\n')
+
+ writeFinalizer()
+
+ # Create the master agreements
+ TEST.write(' # Create all the agreements\n')
+ agmt_count = 0
+
for idx in range(masters):
master_idx = idx + 1
- #
- # Create agreements with the consumers (master -> consumer)
- #
+
+ # Create agreements with the other masters (master -> master)
+ for idx in range(masters):
+ idx += 1
+
+ # Skip ourselves
+ if master_idx == idx:
+ continue
+
+ TEST.write('\n # Creating agreement from master ' +
+ str(master_idx) + ' to master ' + str(idx) + '\n')
+ TEST.write(" properties = {RA_NAME: " +
+ "'meTo_' + master" + str(idx) +
+ ".host + ':' + str(master" + str(idx) +
+ ".port),\n")
+ TEST.write(" RA_BINDDN: " +
+ "defaultProperties[REPLICATION_BIND_DN],\n")
+ TEST.write(" RA_BINDPW: " +
+ "defaultProperties[REPLICATION_BIND_PW],\n")
+ TEST.write(" RA_METHOD: " +
+ "defaultProperties[REPLICATION_BIND_METHOD],\n")
+ TEST.write(" RA_TRANSPORT_PROT: " +
+ "defaultProperties[REPLICATION_TRANSPORT]}\n")
+ TEST.write(' m' + str(master_idx) + '_m' + str(idx) +
+ '_agmt = master' + str(master_idx) +
+ '.agreement.create(suffix=SUFFIX, host=master' +
+ str(idx) + '.host, port=master' + str(idx) +
+ '.port, properties=properties)\n')
+ TEST.write(' if not m' + str(master_idx) + '_m' + str(idx) +
+ '_agmt:\n')
+ TEST.write(' log.fatal("Fail to create a master -> ' +
+ 'master replica agreement")\n')
+ TEST.write(' sys.exit(1)\n')
+ TEST.write(' log.debug("%s created" % m' + str(master_idx) +
+ '_m' + str(idx) + '_agmt)\n')
+ agmt_count += 1
+
+ # Create agmts from each master to each hub (master -> hub)
+ for idx in range(hubs):
+ idx += 1
+ TEST.write('\n # Creating agreement from master ' +
+ str(master_idx) + ' to hub ' + str(idx) + '\n')
+ TEST.write(" properties = {RA_NAME: " +
+ "'meTo_' + hub" + str(idx) +
+ ".host + ':' + str(hub" + str(idx) +
+ ".port),\n")
+ TEST.write(" RA_BINDDN: " +
+ "defaultProperties[REPLICATION_BIND_DN],\n")
+ TEST.write(" RA_BINDPW: " +
+ "defaultProperties[REPLICATION_BIND_PW],\n")
+ TEST.write(" RA_METHOD: " +
+ "defaultProperties[REPLICATION_BIND_METHOD],\n")
+ TEST.write(" RA_TRANSPORT_PROT: " +
+ "defaultProperties[REPLICATION_TRANSPORT]}\n")
+ TEST.write(' m' + str(master_idx) + '_h' + str(idx) +
+ '_agmt = master' + str(master_idx) +
+ '.agreement.create(suffix=SUFFIX, host=hub' +
+ str(idx) + '.host, port=hub' + str(idx) +
+ '.port, properties=properties)\n')
+ TEST.write(' if not m' + str(master_idx) + '_h' + str(idx) +
+ '_agmt:\n')
+ TEST.write(' log.fatal("Fail to create a master -> ' +
+ 'hub replica agreement")\n')
+ TEST.write(' sys.exit(1)\n')
+ TEST.write(' log.debug("%s created" % m' + str(master_idx) +
+ '_h' + str(idx) + '_agmt)\n')
+ agmt_count += 1
+
+ # Create the hub agreements
+ for idx in range(hubs):
+ hub_idx = idx + 1
+
+ # Add agreements from each hub to each consumer (hub -> consumer)
for idx in range(consumers):
idx += 1
- #
- # Create agreements from each master to each consumer
- #
- TEST.write(' # Creating agreement from master ' +
- str(master_idx) + ' to consumer ' + str(idx) +
- '\n')
+ TEST.write('\n # Creating agreement from hub ' + str(hub_idx)
+ + ' to consumer ' + str(idx) + '\n')
TEST.write(" properties = {RA_NAME: " +
"'meTo_' + consumer" + str(idx) +
".host + ':' + str(consumer" + str(idx) +
@@ -496,172 +427,195 @@ if len(sys.argv) > 0:
"defaultProperties[REPLICATION_BIND_METHOD],\n")
TEST.write(" RA_TRANSPORT_PROT: " +
"defaultProperties[REPLICATION_TRANSPORT]}\n")
- TEST.write(' m' + str(master_idx) + '_c' + str(idx) +
- '_agmt = master' + str(master_idx) +
- '.agreement.create(suffix=SUFFIX, ' +
- 'host=consumer' + str(idx) +
- '.host, port=consumer' + str(idx) +
+ TEST.write(' h' + str(hub_idx) + '_c' + str(idx) +
+ '_agmt = hub' + str(hub_idx) +
+ '.agreement.create(suffix=SUFFIX, host=consumer' +
+ str(idx) + '.host, port=consumer' + str(idx) +
'.port, properties=properties)\n')
- TEST.write(' if not m' + str(master_idx) + '_c' +
- str(idx) + '_agmt:\n')
+ TEST.write(' if not h' + str(hub_idx) + '_c' + str(idx) +
+ '_agmt:\n')
TEST.write(' log.fatal("Fail to create a hub -> ' +
'consumer replica agreement")\n')
TEST.write(' sys.exit(1)\n')
- TEST.write(' log.debug("%s created" % m' +
- str(master_idx) + '_c' + str(idx) +
- '_agmt)\n\n')
+ TEST.write(' log.debug("%s created" % h' + str(hub_idx) +
+ '_c' + str(idx) + '_agmt)\n')
agmt_count += 1
- #
- # Add sleep that allows all the agreemnts to get situated
- #
- TEST.write(' # Allow the replicas to get situated with the new ' +
- 'agreements...\n')
- TEST.write(' time.sleep(5)\n\n')
-
- #
- # Write the replication initializations
- #
- TEST.write(' #\n')
- TEST.write(' # Initialize all the agreements\n')
- TEST.write(' #\n')
-
- # Masters
- for idx in range(masters):
- idx += 1
- if idx == 1:
- continue
- TEST.write(' master1.agreement.init(SUFFIX, HOST_MASTER_' +
- str(idx) + ', PORT_MASTER_' + str(idx) + ')\n')
- TEST.write(' master1.waitForReplInit(m1_m' + str(idx) +
- '_agmt)\n')
-
- # Hubs
- consumers_inited = False
- for idx in range(hubs):
- idx += 1
- TEST.write(' master1.agreement.init(SUFFIX, HOST_HUB_' +
- str(idx) + ', PORT_HUB_' + str(idx) + ')\n')
- TEST.write(' master1.waitForReplInit(m1_h' + str(idx) +
- '_agmt)\n')
- for idx in range(consumers):
- if consumers_inited:
- continue
+ # No Hubs, see if there are any consumers to create agreements to
+ if hubs == 0:
+
+ # Create agreements with the consumers (master -> consumer)
+ for idx in range(masters):
+ master_idx = idx + 1
+
+ for idx in range(consumers):
+ idx += 1
+ TEST.write('\n # Creating agreement from master ' +
+ str(master_idx) + ' to consumer ' + str(idx) +
+ '\n')
+ TEST.write(" properties = {RA_NAME: " +
+ "'meTo_' + consumer" + str(idx) +
+ ".host + ':' + str(consumer" + str(idx) +
+ ".port),\n")
+ TEST.write(" RA_BINDDN: " +
+ "defaultProperties[REPLICATION_BIND_DN],\n")
+ TEST.write(" RA_BINDPW: " +
+ "defaultProperties[REPLICATION_BIND_PW],\n")
+ TEST.write(" RA_METHOD: " +
+ "defaultProperties[REPLICATION_BIND_METHOD],\n")
+ TEST.write(" RA_TRANSPORT_PROT: " +
+ "defaultProperties[REPLICATION_TRANSPORT]}\n")
+ TEST.write(' m' + str(master_idx) + '_c' + str(idx) +
+ '_agmt = master' + str(master_idx) +
+ '.agreement.create(suffix=SUFFIX, ' +
+ 'host=consumer' + str(idx) +
+ '.host, port=consumer' + str(idx) +
+ '.port, properties=properties)\n')
+ TEST.write(' if not m' + str(master_idx) + '_c' +
+ str(idx) + '_agmt:\n')
+ TEST.write(' log.fatal("Fail to create a hub -> ' +
+ 'consumer replica agreement")\n')
+ TEST.write(' sys.exit(1)\n')
+ TEST.write(' log.debug("%s created" % m' +
+ str(master_idx) + '_c' + str(idx) +
+ '_agmt)\n')
+ agmt_count += 1
+
+ # Add sleep that allows all the agreements to get situated
+ TEST.write('\n # Allow the replicas to get situated with the new ' +
+ 'agreements...\n')
+ TEST.write(' time.sleep(5)\n')
+
+ # Write the replication initializations
+ TEST.write('\n # Initialize all the agreements\n')
+
+ # Masters
+ for idx in range(masters):
idx += 1
- TEST.write(' hub1.agreement.init(SUFFIX, HOST_CONSUMER_' +
- str(idx) + ', PORT_CONSUMER_' + str(idx) + ')\n')
- TEST.write(' hub1.waitForReplInit(h1_c' + str(idx) +
+ if idx == 1:
+ continue
+ TEST.write(' master1.agreement.init(SUFFIX, HOST_MASTER_' +
+ str(idx) + ', PORT_MASTER_' + str(idx) + ')\n')
+ TEST.write(' master1.waitForReplInit(m1_m' + str(idx) +
'_agmt)\n')
- consumers_inited = True
- # Consumers (master -> consumer)
- if hubs == 0:
- for idx in range(consumers):
+ # Hubs
+ consumers_inited = False
+ for idx in range(hubs):
idx += 1
- TEST.write(' master1.agreement.init(SUFFIX, ' +
- 'HOST_CONSUMER_' + str(idx) + ', PORT_CONSUMER_' +
- str(idx) + ')\n')
- TEST.write(' master1.waitForReplInit(m1_c' + str(idx) +
+ TEST.write(' master1.agreement.init(SUFFIX, HOST_HUB_' +
+ str(idx) + ', PORT_HUB_' + str(idx) + ')\n')
+ TEST.write(' master1.waitForReplInit(m1_h' + str(idx) +
'_agmt)\n')
+ for idx in range(consumers):
+ if consumers_inited:
+ continue
+ idx += 1
+ TEST.write(' hub1.agreement.init(SUFFIX, HOST_CONSUMER_' +
+ str(idx) + ', PORT_CONSUMER_' + str(idx) + ')\n')
+ TEST.write(' hub1.waitForReplInit(h1_c' + str(idx) +
+ '_agmt)\n')
+ consumers_inited = True
+
+ # Consumers (master -> consumer)
+ if hubs == 0:
+ for idx in range(consumers):
+ idx += 1
+ TEST.write(' master1.agreement.init(SUFFIX, ' +
+ 'HOST_CONSUMER_' + str(idx) + ', PORT_CONSUMER_' +
+ str(idx) + ')\n')
+ TEST.write(' master1.waitForReplInit(m1_c' + str(idx) +
+ '_agmt)\n')
- TEST.write('\n')
-
- #
- # Write replicaton check
- #
- if agmt_count > 0:
- # Find the lowest replica type (consumer -> master)
- if consumers > 0:
- replica = 'consumer1'
- elif hubs > 0:
- replica = 'hub1'
- else:
- replica = 'master2'
- TEST.write(' # Check replication is working...\n')
- TEST.write(' if master1.testReplication(DEFAULT_SUFFIX, ' +
- replica + '):\n')
- TEST.write(" log.info('Replication is working.')\n")
- TEST.write(' else:\n')
- TEST.write(" log.fatal('Replication is not working.')\n")
- TEST.write(' assert False\n')
TEST.write('\n')
- #
- # Write the finals steps for replication
- #
- TEST.write(' # Clear out the tmp dir\n')
- TEST.write(' master1.clearTmpDir(__file__)\n\n')
- TEST.write(' return TopologyReplication(master1')
- for idx in range(masters):
- idx += 1
- if idx == 1:
- continue
- TEST.write(', master' + str(idx))
- for idx in range(hubs):
- TEST.write(', hub' + str(idx + 1))
- for idx in range(consumers):
- TEST.write(', consumer' + str(idx + 1))
- TEST.write(')\n')
- else:
- #
+ # Write replicaton check
+ if agmt_count > 0:
+ # Find the lowest replica type (consumer -> master)
+ if consumers > 0:
+ replica = 'consumer1'
+ elif hubs > 0:
+ replica = 'hub1'
+ else:
+ replica = 'master2'
+ TEST.write(' # Check replication is working...\n')
+ TEST.write(' if master1.testReplication(DEFAULT_SUFFIX, ' +
+ replica + '):\n')
+ TEST.write(" log.info('Replication is working.')\n")
+ TEST.write(' else:\n')
+ TEST.write(" log.fatal('Replication is not working.')\n")
+ TEST.write(' assert False\n')
+ TEST.write('\n')
+
+ # Write the finals steps for replication
+ TEST.write(' # Clear out the tmp dir\n')
+ TEST.write(' master1.clearTmpDir(__file__)\n\n')
+ TEST.write(' return TopologyReplication(master1')
+
+ for idx in range(masters):
+ idx += 1
+ if idx == 1:
+ continue
+ TEST.write(', master' + str(idx))
+ for idx in range(hubs):
+ TEST.write(', hub' + str(idx + 1))
+ for idx in range(consumers):
+ TEST.write(', consumer' + str(idx + 1))
+ TEST.write(')\n\n')
+
# Standalone servers
- #
-
- # Args for the standalone instance
- TEST.write(' """Create DS Deployment"""\n\n')
- for idx in range(instances):
- idx += 1
- if idx == 1:
- idx = ''
- else:
- idx = str(idx)
- TEST.write(' # Creating standalone instance ' + idx + '...\n')
- TEST.write(' if DEBUGGING:\n')
- TEST.write(' standalone' + idx +
- ' = DirSrv(verbose=True)\n')
- TEST.write(' else:\n')
- TEST.write(' standalone' + idx +
- ' = DirSrv(verbose=False)\n')
- TEST.write(' args_instance[SER_HOST] = HOST_STANDALONE' +
- idx + '\n')
- TEST.write(' args_instance[SER_PORT] = PORT_STANDALONE' +
- idx + '\n')
- TEST.write(' args_instance[SER_SERVERID_PROP] = ' +
- 'SERVERID_STANDALONE' + idx + '\n')
- TEST.write(' args_instance[SER_CREATION_SUFFIX] = ' +
- 'DEFAULT_SUFFIX\n')
- TEST.write(' args_standalone' + idx + ' = args_instance.copy' +
- '()\n')
- TEST.write(' standalone' + idx + '.allocate(args_standalone' +
- idx + ')\n')
-
- # Get the status of the instance and restart it if it exists
- TEST.write(' instance_standalone' + idx + ' = standalone' +
- idx + '.exists()\n')
-
- # Remove the instance
- TEST.write(' if instance_standalone' + idx + ':\n')
- TEST.write(' standalone' + idx + '.delete()\n')
-
- # Create and open the instance
- TEST.write(' standalone' + idx + '.create()\n')
- TEST.write(' standalone' + idx + '.open()\n\n')
-
- writeFinalizer()
-
- TEST.write(' return TopologyStandalone(standalone')
- for idx in range(instances):
- idx += 1
- if idx == 1:
- continue
- TEST.write(', standalone' + str(idx))
- TEST.write(')\n')
+ else:
+ TEST.write(' """Create DS Deployment"""\n')
+ for idx in range(instances):
+ idx += 1
+ if idx == 1:
+ idx = ''
+ else:
+ idx = str(idx)
+ TEST.write('\n # Creating standalone instance ' + idx + '...\n')
+ TEST.write(' if DEBUGGING:\n')
+ TEST.write(' standalone' + idx +
+ ' = DirSrv(verbose=True)\n')
+ TEST.write(' else:\n')
+ TEST.write(' standalone' + idx +
+ ' = DirSrv(verbose=False)\n')
+ TEST.write(' args_instance[SER_HOST] = HOST_STANDALONE' +
+ idx + '\n')
+ TEST.write(' args_instance[SER_PORT] = PORT_STANDALONE' +
+ idx + '\n')
+ TEST.write(' args_instance[SER_SERVERID_PROP] = ' +
+ 'SERVERID_STANDALONE' + idx + '\n')
+ TEST.write(' args_instance[SER_CREATION_SUFFIX] = ' +
+ 'DEFAULT_SUFFIX\n')
+ TEST.write(' args_standalone' + idx + ' = args_instance.copy' +
+ '()\n')
+ TEST.write(' standalone' + idx + '.allocate(args_standalone' +
+ idx + ')\n')
- TEST.write('\n\n')
+ # Get the status of the instance and restart it if it exists
+ TEST.write(' instance_standalone' + idx + ' = standalone' +
+ idx + '.exists()\n')
+
+ # Remove the instance
+ TEST.write(' if instance_standalone' + idx + ':\n')
+ TEST.write(' standalone' + idx + '.delete()\n')
+
+ # Create and open the instance
+ TEST.write(' standalone' + idx + '.create()\n')
+ TEST.write(' standalone' + idx + '.open()\n')
+
+ writeFinalizer()
+
+ TEST.write(' return TopologyStandalone(standalone')
+ for idx in range(instances):
+ idx += 1
+ if idx == 1:
+ continue
+ TEST.write(', standalone' + str(idx))
+ TEST.write(')\n\n')
+ TEST.write('\n')
- #
# Write the test function
- #
if ticket:
TEST.write('def test_ticket' + ticket + '(topology):\n')
if repl_deployment:
@@ -678,33 +632,35 @@ if len(sys.argv) > 0:
TEST.write(' Also, if you need any testcase initialization,\n')
TEST.write(' please, write additional fixture for that' +
'(include finalizer).\n')
- TEST.write('\n """\n\n')
-
+ TEST.write(' """\n\n')
else:
- # Write the first initial empty test function
- TEST.write('def test_' + suite + '(topology):\n')
+ TEST.write('def test_something(topology_XX):\n')
TEST.write(' """Write a single test here...\n\n')
TEST.write(' Also, if you need any test suite initialization,\n')
- TEST.write(' please, write additional fixture for that(include ' +
- 'finalizer).\n """\n')
+ TEST.write(' please, write additional fixture for that(include finalizer).\n' +
+ ' Topology for suites are predefined in lib389/topologies.py.\n\n'
+ ' Choose one of the options:\n'
+ ' 1) topology_st for standalone\n'
+ ' topology.standalone\n'
+ ' 2) topology_m2 for two masters\n'
+ ' topology.ms["master{1,2}"]\n'
+ ' each master has agreements\n'
+ ' topology.ms["master{1,2}_agmts"][m{1,2}_m{2,1}]\n'
+ ' 3) topology_m4 for four masters\n'
+ ' the same as topology_m2 but has more masters and agreements\n'
+ ' """\n\n')
TEST.write(' if DEBUGGING:\n')
TEST.write(' # Add debugging steps(if any)...\n')
- TEST.write(' pass\n\n')
- TEST.write(" log.info('Test PASSED')\n")
- TEST.write('\n\n')
+ TEST.write(' pass\n\n\n')
- #
# Write the main function
- #
TEST.write("if __name__ == '__main__':\n")
TEST.write(' # Run isolated\n')
TEST.write(' # -s for DEBUG mode\n')
TEST.write(' CURRENT_FILE = os.path.realpath(__file__)\n')
TEST.write(' pytest.main("-s %s" % CURRENT_FILE)\n\n')
- #
# Done, close things up
- #
TEST.close()
print('Created: ' + filename)
diff --git a/dirsrvtests/tests/suites/acct_usability_plugin/acct_usability_test.py b/dirsrvtests/tests/suites/acct_usability_plugin/acct_usability_test.py
deleted file mode 100644
index b7ac869..0000000
--- a/dirsrvtests/tests/suites/acct_usability_plugin/acct_usability_test.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_acct_usability_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_acct_usability_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/acctpolicy_plugin/accpol_test.py b/dirsrvtests/tests/suites/acctpolicy_plugin/accpol_test.py
index 22acc39..a005f3f 100644
--- a/dirsrvtests/tests/suites/acctpolicy_plugin/accpol_test.py
+++ b/dirsrvtests/tests/suites/acctpolicy_plugin/accpol_test.py
@@ -6,16 +6,14 @@ import logging
import pytest
import ldif
import ldap.modlist as modlist
-from ldif import LDIFParser,LDIFWriter
+from ldif import LDIFParser, LDIFWriter
from lib389 import DirSrv, Entry, tools, tasks
from lib389.tools import DirSrvTools
from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
+from lib389.topologies import topology_st
LOCAL_CONFIG = 'cn=AccountPolicy1,ou=people,dc=example,dc=com'
TEMPLT_COS = 'cn=TempltCoS,ou=people,dc=example,dc=com'
@@ -32,79 +30,45 @@ USR_DN = 'uid={},{},{}'.format(USR_RDN, SUBTREE, SUFFIX)
USER_PW = 'Secret1234'
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
@pytest.fixture(scope="module")
-def topology(request):
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- log.info("Instance detected")
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-
-(a)pytest.fixture(scope="module")
-def accpolicy_local(topology):
+def accpolicy_local(topology_st):
"""Configure account policy plugin based
on LDIF file and restart the server.
"""
log.info('Enabling account policy plugin and restarting the server')
try:
- topology.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY)
- topology.standalone.modify_s(ACCPOL_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', CONFIG_DN)])
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes')])
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime')])
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'altstateattrname', 'createTimestamp')])
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry')])
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'limitattrname', 'accountInactivityLimit')])
+ topology_st.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY)
+ topology_st.standalone.modify_s(ACCPOL_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', CONFIG_DN)])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes')])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime')])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'altstateattrname', 'createTimestamp')])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry')])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'limitattrname', 'accountInactivityLimit')])
except ldap.LDAPError as e:
- log.error("Failed to modify account policy plugin attrs attrs")
+ log.error("Failed to modify account policy plugin attrs attrs")
raise
log.info("Adding Local account policy plugin configuration entries")
try:
- topology.standalone.add_s(Entry((LOCAL_CONFIG, {
- 'objectclass': ['top', 'ldapsubentry', 'extensibleObject', 'accountpolicy'],
- 'accountInactivityLimit': '15'})))
- topology.standalone.add_s(Entry((TEMPLT_COS, {
- 'objectclass': ['top', 'ldapsubentry', 'extensibleObject', 'cosTemplate'],
- 'acctPolicySubentry': LOCAL_CONFIG})))
- topology.standalone.add_s(Entry((DEFN_COS, {
- 'objectclass': ['top', 'ldapsubentry', 'cosSuperDefinition', 'cosPointerDefinition'],
- 'cosTemplateDn': TEMPLT_COS,
- 'cosAttribute': 'acctPolicySubentry default operational-default'})))
+ topology_st.standalone.add_s(Entry((LOCAL_CONFIG, {
+ 'objectclass': ['top', 'ldapsubentry', 'extensibleObject', 'accountpolicy'],
+ 'accountInactivityLimit': '15'})))
+ topology_st.standalone.add_s(Entry((TEMPLT_COS, {
+ 'objectclass': ['top', 'ldapsubentry', 'extensibleObject', 'cosTemplate'],
+ 'acctPolicySubentry': LOCAL_CONFIG})))
+ topology_st.standalone.add_s(Entry((DEFN_COS, {
+ 'objectclass': ['top', 'ldapsubentry', 'cosSuperDefinition', 'cosPointerDefinition'],
+ 'cosTemplateDn': TEMPLT_COS,
+ 'cosAttribute': 'acctPolicySubentry default operational-default'})))
except ldap.LDAPError as e:
- log.error('Failed to add entry ({}, {}, {}):'.format(LOCAL_CONFIG, TEMPLT_COS, DEFN_COS))
- raise
- topology.standalone.restart(timeout=10)
+ log.error('Failed to add entry ({}, {}, {}):'.format(LOCAL_CONFIG, TEMPLT_COS, DEFN_COS))
+ raise
+ topology_st.standalone.restart(timeout=10)
@pytest.fixture(scope="module")
-def users(topology, request):
+def users(topology_st, request):
"""Add users to the given SUFFIX and SUBTREE."""
log.info('Adding {} {} users to {} SUBTREE {} SUFFIX'.format(NOF_USERS, USR_NAME, SUBTREE, SUFFIX))
@@ -112,33 +76,35 @@ def users(topology, request):
USR_RDN = '{}{}'.format(USR_NAME, NUM)
USR_DN = 'uid={},{},{}'.format(USR_RDN, SUBTREE, SUFFIX)
try:
- topology.standalone.add_s(Entry((USR_DN, {
- 'objectclass': 'top person'.split(),
- 'objectclass': 'inetorgperson',
- 'cn': USR_RDN,
- 'sn': USR_RDN,
- 'userpassword': 'Secret1234',
- 'mail': '{}(a)redhat.com'.format(USR_RDN)})))
+ topology_st.standalone.add_s(Entry((USR_DN, {
+ 'objectclass': 'top person'.split(),
+ 'objectclass': 'inetorgperson',
+ 'cn': USR_RDN,
+ 'sn': USR_RDN,
+ 'userpassword': 'Secret1234',
+ 'mail': '{}(a)redhat.com'.format(USR_RDN)})))
except ldap.LDAPError as e:
log.error('Failed to add {} user: error {}'.format(USR_DN, e.message['desc']))
raise
def fin():
- log.info('Deleting {} {} users from {} {}'.format(NOF_USERS, USR_NAME, SUBTREE, SUFFIX))
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- for NUM in range(1, NOF_USERS):
- USR_RDN = '{}{}'.format(USR_NAME, NUM)
- USR_DN = 'uid={},{},{}'.format(USR_RDN, SUBTREE, SUFFIX)
- try:
- topology.standalone.delete_s(USR_DN)
- except ldap.LDAPError as e:
- log.error('Failed to delete {} :error- {}'.format(USR_DN, e.message['desc']))
- raise
+ log.info('Deleting {} {} users from {} {}'.format(NOF_USERS, USR_NAME, SUBTREE, SUFFIX))
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ for NUM in range(1, NOF_USERS):
+ USR_RDN = '{}{}'.format(USR_NAME, NUM)
+ USR_DN = 'uid={},{},{}'.format(USR_RDN, SUBTREE, SUFFIX)
+ try:
+ topology_st.standalone.delete_s(USR_DN)
+ except ldap.LDAPError as e:
+ log.error('Failed to delete {} :error- {}'.format(USR_DN, e.message['desc']))
+ raise
+
request.addfinalizer(fin)
-def test_inact_plugin(topology, accpolicy_local, users):
- """Verify if user account is inactivated when accountInactivityLimit is exceeded. User is created in the default SUFFIX.
+def test_inact_plugin(topology_st, accpolicy_local, users):
+ """Verify if user account is inactivated when accountInactivityLimit is exceeded.
+ User is created in the default SUFFIX.
:Feature: Account Policy Plugin
@@ -162,21 +128,21 @@ def test_inact_plugin(topology, accpolicy_local, users):
USR_RDN = '{}{}'.format(USR_NAME, NUM)
USR_DN = 'uid={},{},{}'.format(USR_RDN, SUBTREE, SUFFIX)
try:
- topology.standalone.simple_bind_s(USR_DN, USER_PW)
+ topology_st.standalone.simple_bind_s(USR_DN, USER_PW)
except ldap.LDAPError as e:
log.error('Checking if {} is inactivated: error {}'.format(USR_DN, e.message['desc']))
raise
-
+
USR_DN = 'uid={}1,{},{}'.format(USR_NAME, SUBTREE, SUFFIX)
log.info("Sleeping for 4 more secs to check if {} is inactivated, expected error 19".format(USR_DN))
time.sleep(4)
with pytest.raises(ldap.CONSTRAINT_VIOLATION) as e:
- topology.standalone.simple_bind_s(USR_DN, USER_PW)
+ topology_st.standalone.simple_bind_s(USR_DN, USER_PW)
USR_DN = 'uid={}2,{},{}'.format(USR_NAME, SUBTREE, SUFFIX)
log.info("Checking if {} is not inactivated, expected value 0".format(USR_DN))
try:
- topology.standalone.simple_bind_s(USR_DN, USER_PW)
+ topology_st.standalone.simple_bind_s(USR_DN, USER_PW)
except ldap.LDAPError as e:
log.error('Checking if {} is inactivated : error {}'.format(USR_DN, e.message['desc']))
raise
@@ -186,7 +152,7 @@ def test_inact_plugin(topology, accpolicy_local, users):
USR_DN = 'uid={},{},{}'.format(USR_RDN, SUBTREE, SUFFIX)
log.info("Checking if {} is inactivated, expected error 19".format(USR_DN))
with pytest.raises(ldap.CONSTRAINT_VIOLATION) as e:
- topology.standalone.simple_bind_s(USR_DN, USER_PW)
+ topology_st.standalone.simple_bind_s(USR_DN, USER_PW)
if __name__ == '__main__':
diff --git a/dirsrvtests/tests/suites/acctpolicy_plugin/acctpolicy_test.py b/dirsrvtests/tests/suites/acctpolicy_plugin/acctpolicy_test.py
deleted file mode 100644
index 14c6851..0000000
--- a/dirsrvtests/tests/suites/acctpolicy_plugin/acctpolicy_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_acctpolicy_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_acctpolicy_(topology):
- '''
- Write a single test here...
- '''
- log.info('acctpolicy test suite PASSED')
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/acl/acl_test.py b/dirsrvtests/tests/suites/acl/acl_test.py
index f42a584..cb58352 100644
--- a/dirsrvtests/tests/suites/acl/acl_test.py
+++ b/dirsrvtests/tests/suites/acl/acl_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -18,38 +18,33 @@ from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_m2
from ldap.controls.simple import GetEffectiveRightsControl
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-#
-# important part. We can deploy Master1 and Master2 on different versions
-#
-installation1_prefix = None
-installation2_prefix = None
-
TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
-STAGING_CN = "staged user"
-PRODUCTION_CN = "accounts"
-EXCEPT_CN = "excepts"
+STAGING_CN = "staged user"
+PRODUCTION_CN = "accounts"
+EXCEPT_CN = "excepts"
-STAGING_DN = "cn=%s,%s" % (STAGING_CN, SUFFIX)
+STAGING_DN = "cn=%s,%s" % (STAGING_CN, SUFFIX)
PRODUCTION_DN = "cn=%s,%s" % (PRODUCTION_CN, SUFFIX)
PROD_EXCEPT_DN = "cn=%s,%s" % (EXCEPT_CN, PRODUCTION_DN)
-STAGING_PATTERN = "cn=%s*,%s" % (STAGING_CN[:2], SUFFIX)
+STAGING_PATTERN = "cn=%s*,%s" % (STAGING_CN[:2], SUFFIX)
PRODUCTION_PATTERN = "cn=%s*,%s" % (PRODUCTION_CN[:2], SUFFIX)
-BAD_STAGING_PATTERN = "cn=bad*,%s" % (SUFFIX)
+BAD_STAGING_PATTERN = "cn=bad*,%s" % (SUFFIX)
BAD_PRODUCTION_PATTERN = "cn=bad*,%s" % (SUFFIX)
-BIND_CN = "bind_entry"
-BIND_DN = "cn=%s,%s" % (BIND_CN, SUFFIX)
-BIND_PW = "password"
+BIND_CN = "bind_entry"
+BIND_DN = "cn=%s,%s" % (BIND_CN, SUFFIX)
+BIND_PW = "password"
-NEW_ACCOUNT = "new_account"
-MAX_ACCOUNTS = 20
+NEW_ACCOUNT = "new_account"
+MAX_ACCOUNTS = 20
CONFIG_MODDN_ACI_ATTR = "nsslapd-moddn-aci"
@@ -61,127 +56,7 @@ SRC_ENTRY_DN = "cn=%s,%s" % (SRC_ENTRY_CN, SUFFIX)
DST_ENTRY_DN = "cn=%s,%s" % (DST_ENTRY_CN, SUFFIX)
-class TopologyMaster1Master2(object):
- def __init__(self, master1, master2):
- master1.open()
- self.master1 = master1
-
- master2.open()
- self.master2 = master2
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- """This fixture is used to create a replicated topology for the 'module'.
- The replicated topology is MASTER1 <-> Master2.
- """
-
- global installation1_prefix
- global installation2_prefix
-
- # allocate master1 on a given deployement
- master1 = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Args for the master1 instance
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_master = args_instance.copy()
- master1.allocate(args_master)
-
- # allocate master1 on a given deployement
- master2 = DirSrv(verbose=False)
- if installation2_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation2_prefix
-
- # Args for the consumer instance
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_master = args_instance.copy()
- master2.allocate(args_master)
-
- # Get the status of the instance and restart it if it exists
- instance_master1 = master1.exists()
- instance_master2 = master2.exists()
-
- # Remove all the instances
- if instance_master1:
- master1.delete()
- if instance_master2:
- master2.delete()
-
- # Create the instances
- master1.create()
- master1.open()
- master2.create()
- master2.open()
-
- #
- # Now prepare the Master-Consumer topology
- #
- # First Enable replication
- master1.replica.enableReplication(suffix=SUFFIX,
- role=REPLICAROLE_MASTER,
- replicaId=REPLICAID_MASTER_1)
- master2.replica.enableReplication(suffix=SUFFIX,
- role=REPLICAROLE_MASTER,
- replicaId=REPLICAID_MASTER_2)
-
- # Initialize the supplier->consumer
-
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- repl_agreement = master1.agreement.create(suffix=SUFFIX,
- host=master2.host,
- port=master2.port,
- properties=properties)
-
- if not repl_agreement:
- log.fatal("Fail to create a replica agreement")
- sys.exit(1)
-
- log.debug("%s created" % repl_agreement)
-
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- master2.agreement.create(suffix=SUFFIX,
- host=master1.host,
- port=master1.port,
- properties=properties)
-
- master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- master1.waitForReplInit(repl_agreement)
-
- # Check replication is working fine
- if master1.testReplication(DEFAULT_SUFFIX, master2):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- def fin():
- master1.delete()
- master2.delete()
- request.addfinalizer(fin)
-
- # clear the tmp directory
- master1.clearTmpDir(__file__)
-
- # Here we have two instances master and consumer
- # with replication working.
- return TopologyMaster1Master2(master1, master2)
-
-
-def add_attr(topology, attr_name):
+def add_attr(topology_m2, attr_name):
"""Adds attribute to the schema"""
ATTR_VALUE = """(NAME '%s' \
@@ -190,7 +65,7 @@ def add_attr(topology, attr_name):
mod = [(ldap.MOD_ADD, 'attributeTypes', ATTR_VALUE)]
try:
- topology.master1.modify_s(DN_SCHEMA, mod)
+ topology_m2.ms["master1"].modify_s(DN_SCHEMA, mod)
except ldap.LDAPError as e:
log.fatal('Failed to add attr (%s): error (%s)' % (attr_name,
e.message['desc']))
@@ -198,7 +73,7 @@ def add_attr(topology, attr_name):
@pytest.fixture(params=["lang-ja", "binary", "phonetic"])
-def aci_with_attr_subtype(request, topology):
+def aci_with_attr_subtype(request, topology_m2):
"""Adds and deletes an ACI in the DEFAULT_SUFFIX"""
TARGET_ATTR = 'protectedOperation'
@@ -207,10 +82,10 @@ def aci_with_attr_subtype(request, topology):
log.info("========Executing test with '%s' subtype========" % SUBTYPE)
log.info(" Add a target attribute")
- add_attr(topology, TARGET_ATTR)
+ add_attr(topology_m2, TARGET_ATTR)
log.info(" Add a user attribute")
- add_attr(topology, USER_ATTR)
+ add_attr(topology_m2, USER_ATTR)
ACI_TARGET = '(targetattr=%s;%s)' % (TARGET_ATTR, SUBTYPE)
ACI_ALLOW = '(version 3.0; acl "test aci for subtypes"; allow (read) '
@@ -220,26 +95,27 @@ def aci_with_attr_subtype(request, topology):
log.info(" Add an ACI with attribute subtype")
mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
try:
- topology.master1.modify_s(DEFAULT_SUFFIX, mod)
+ topology_m2.ms["master1"].modify_s(DEFAULT_SUFFIX, mod)
except ldap.LDAPError as e:
log.fatal('Failed to add ACI: error (%s)' % (e.message['desc']))
assert False
def fin():
log.info(" Finally, delete an ACI with the '%s' subtype" %
- SUBTYPE)
+ SUBTYPE)
mod = [(ldap.MOD_DELETE, 'aci', ACI_BODY)]
try:
- topology.master1.modify_s(DEFAULT_SUFFIX, mod)
+ topology_m2.ms["master1"].modify_s(DEFAULT_SUFFIX, mod)
except ldap.LDAPError as e:
log.fatal('Failed to delete ACI: error (%s)' % (e.message['desc']))
assert False
+
request.addfinalizer(fin)
return ACI_BODY
-def test_aci_attr_subtype_targetattr(topology, aci_with_attr_subtype):
+def test_aci_attr_subtype_targetattr(topology_m2, aci_with_attr_subtype):
"""Checks, that ACIs allow attribute subtypes in the targetattr keyword
Test description:
@@ -254,9 +130,9 @@ def test_aci_attr_subtype_targetattr(topology, aci_with_attr_subtype):
log.info(" Search for the added attribute")
try:
- entries = topology.master1.search_s(DEFAULT_SUFFIX,
- ldap.SCOPE_BASE,
- '(objectclass=*)', ['aci'])
+ entries = topology_m2.ms["master1"].search_s(DEFAULT_SUFFIX,
+ ldap.SCOPE_BASE,
+ '(objectclass=*)', ['aci'])
entry = str(entries[0])
assert aci_with_attr_subtype in entry
log.info(" The added attribute was found")
@@ -266,98 +142,97 @@ def test_aci_attr_subtype_targetattr(topology, aci_with_attr_subtype):
assert False
-def _bind_manager(topology):
- topology.master1.log.info("Bind as %s " % DN_DM)
- topology.master1.simple_bind_s(DN_DM, PASSWORD)
+def _bind_manager(topology_m2):
+ topology_m2.ms["master1"].log.info("Bind as %s " % DN_DM)
+ topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD)
-def _bind_normal(topology):
+def _bind_normal(topology_m2):
# bind as bind_entry
- topology.master1.log.info("Bind as %s" % BIND_DN)
- topology.master1.simple_bind_s(BIND_DN, BIND_PW)
+ topology_m2.ms["master1"].log.info("Bind as %s" % BIND_DN)
+ topology_m2.ms["master1"].simple_bind_s(BIND_DN, BIND_PW)
-def _moddn_aci_deny_tree(topology, mod_type=None,
+def _moddn_aci_deny_tree(topology_m2, mod_type=None,
target_from=STAGING_DN, target_to=PROD_EXCEPT_DN):
"""It denies the access moddn_to in cn=except,cn=accounts,SUFFIX"""
assert mod_type is not None
ACI_TARGET_FROM = ""
- ACI_TARGET_TO = ""
+ ACI_TARGET_TO = ""
if target_from:
ACI_TARGET_FROM = "(target_from = \"ldap:///%s\")" % (target_from)
if target_to:
- ACI_TARGET_TO = "(target_to = \"ldap:///%s\")" % (target_to)
+ ACI_TARGET_TO = "(target_to = \"ldap:///%s\")" % (target_to)
- ACI_ALLOW = "(version 3.0; acl \"Deny MODDN to prod_except\"; deny (moddn)"
- ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN
- ACI_BODY = ACI_TARGET_TO + ACI_TARGET_FROM + ACI_ALLOW + ACI_SUBJECT
+ ACI_ALLOW = "(version 3.0; acl \"Deny MODDN to prod_except\"; deny (moddn)"
+ ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN
+ ACI_BODY = ACI_TARGET_TO + ACI_TARGET_FROM + ACI_ALLOW + ACI_SUBJECT
mod = [(mod_type, 'aci', ACI_BODY)]
- #topology.master1.modify_s(SUFFIX, mod)
- topology.master1.log.info("Add a DENY aci under %s " % PROD_EXCEPT_DN)
- topology.master1.modify_s(PROD_EXCEPT_DN, mod)
+ # topology_m2.ms["master1"].modify_s(SUFFIX, mod)
+ topology_m2.ms["master1"].log.info("Add a DENY aci under %s " % PROD_EXCEPT_DN)
+ topology_m2.ms["master1"].modify_s(PROD_EXCEPT_DN, mod)
-def _write_aci_staging(topology, mod_type=None):
+def _write_aci_staging(topology_m2, mod_type=None):
assert mod_type is not None
ACI_TARGET = "(targetattr= \"cn\")(target=\"ldap:///cn=*,%s\")" % STAGING_DN
- ACI_ALLOW = "(version 3.0; acl \"write staging entries\"; allow (write)"
- ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN
- ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT
+ ACI_ALLOW = "(version 3.0; acl \"write staging entries\"; allow (write)"
+ ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN
+ ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT
mod = [(mod_type, 'aci', ACI_BODY)]
- topology.master1.modify_s(SUFFIX, mod)
+ topology_m2.ms["master1"].modify_s(SUFFIX, mod)
-def _write_aci_production(topology, mod_type=None):
+def _write_aci_production(topology_m2, mod_type=None):
assert mod_type is not None
ACI_TARGET = "(targetattr= \"cn\")(target=\"ldap:///cn=*,%s\")" % PRODUCTION_DN
- ACI_ALLOW = "(version 3.0; acl \"write production entries\"; allow (write)"
- ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN
- ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT
+ ACI_ALLOW = "(version 3.0; acl \"write production entries\"; allow (write)"
+ ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN
+ ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT
mod = [(mod_type, 'aci', ACI_BODY)]
- topology.master1.modify_s(SUFFIX, mod)
+ topology_m2.ms["master1"].modify_s(SUFFIX, mod)
-def _moddn_aci_staging_to_production(topology, mod_type=None,
+def _moddn_aci_staging_to_production(topology_m2, mod_type=None,
target_from=STAGING_DN, target_to=PRODUCTION_DN):
assert mod_type is not None
-
ACI_TARGET_FROM = ""
- ACI_TARGET_TO = ""
+ ACI_TARGET_TO = ""
if target_from:
ACI_TARGET_FROM = "(target_from = \"ldap:///%s\")" % (target_from)
if target_to:
- ACI_TARGET_TO = "(target_to = \"ldap:///%s\")" % (target_to)
+ ACI_TARGET_TO = "(target_to = \"ldap:///%s\")" % (target_to)
- ACI_ALLOW = "(version 3.0; acl \"MODDN from staging to production\"; allow (moddn)"
- ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN
- ACI_BODY = ACI_TARGET_FROM + ACI_TARGET_TO + ACI_ALLOW + ACI_SUBJECT
+ ACI_ALLOW = "(version 3.0; acl \"MODDN from staging to production\"; allow (moddn)"
+ ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN
+ ACI_BODY = ACI_TARGET_FROM + ACI_TARGET_TO + ACI_ALLOW + ACI_SUBJECT
mod = [(mod_type, 'aci', ACI_BODY)]
- topology.master1.modify_s(SUFFIX, mod)
+ topology_m2.ms["master1"].modify_s(SUFFIX, mod)
- _write_aci_staging(topology, mod_type=mod_type)
+ _write_aci_staging(topology_m2, mod_type=mod_type)
-def _moddn_aci_from_production_to_staging(topology, mod_type=None):
+def _moddn_aci_from_production_to_staging(topology_m2, mod_type=None):
assert mod_type is not None
- ACI_TARGET = "(target_from = \"ldap:///%s\") (target_to = \"ldap:///%s\")" % (
- PRODUCTION_DN, STAGING_DN)
- ACI_ALLOW = "(version 3.0; acl \"MODDN from production to staging\"; allow (moddn)"
- ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN
- ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT
+ ACI_TARGET = "(target_from = \"ldap:///%s\") (target_to = \"ldap:///%s\")" % (
+ PRODUCTION_DN, STAGING_DN)
+ ACI_ALLOW = "(version 3.0; acl \"MODDN from production to staging\"; allow (moddn)"
+ ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN
+ ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT
mod = [(mod_type, 'aci', ACI_BODY)]
- topology.master1.modify_s(SUFFIX, mod)
+ topology_m2.ms["master1"].modify_s(SUFFIX, mod)
- _write_aci_production(topology, mod_type=mod_type)
+ _write_aci_production(topology_m2, mod_type=mod_type)
@pytest.fixture(scope="module")
-def moddn_setup(topology):
+def moddn_setup(topology_m2):
"""Creates
- a staging DIT
- a production DIT
@@ -365,94 +240,94 @@ def moddn_setup(topology):
- enable ACL logging (commented for performance reason)
"""
- topology.master1.log.info("\n\n######## INITIALIZATION ########\n")
+ topology_m2.ms["master1"].log.info("\n\n######## INITIALIZATION ########\n")
# entry used to bind with
- topology.master1.log.info("Add %s" % BIND_DN)
- topology.master1.add_s(Entry((BIND_DN, {
- 'objectclass': "top person".split(),
- 'sn': BIND_CN,
- 'cn': BIND_CN,
- 'userpassword': BIND_PW})))
+ topology_m2.ms["master1"].log.info("Add %s" % BIND_DN)
+ topology_m2.ms["master1"].add_s(Entry((BIND_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': BIND_CN,
+ 'cn': BIND_CN,
+ 'userpassword': BIND_PW})))
# DIT for staging
- topology.master1.log.info("Add %s" % STAGING_DN)
- topology.master1.add_s(Entry((STAGING_DN, {
- 'objectclass': "top organizationalRole".split(),
- 'cn': STAGING_CN,
- 'description': "staging DIT"})))
+ topology_m2.ms["master1"].log.info("Add %s" % STAGING_DN)
+ topology_m2.ms["master1"].add_s(Entry((STAGING_DN, {
+ 'objectclass': "top organizationalRole".split(),
+ 'cn': STAGING_CN,
+ 'description': "staging DIT"})))
# DIT for production
- topology.master1.log.info("Add %s" % PRODUCTION_DN)
- topology.master1.add_s(Entry((PRODUCTION_DN, {
- 'objectclass': "top organizationalRole".split(),
- 'cn': PRODUCTION_CN,
- 'description': "production DIT"})))
+ topology_m2.ms["master1"].log.info("Add %s" % PRODUCTION_DN)
+ topology_m2.ms["master1"].add_s(Entry((PRODUCTION_DN, {
+ 'objectclass': "top organizationalRole".split(),
+ 'cn': PRODUCTION_CN,
+ 'description': "production DIT"})))
# DIT for production/except
- topology.master1.log.info("Add %s" % PROD_EXCEPT_DN)
- topology.master1.add_s(Entry((PROD_EXCEPT_DN, {
- 'objectclass': "top organizationalRole".split(),
- 'cn': EXCEPT_CN,
- 'description': "production except DIT"})))
+ topology_m2.ms["master1"].log.info("Add %s" % PROD_EXCEPT_DN)
+ topology_m2.ms["master1"].add_s(Entry((PROD_EXCEPT_DN, {
+ 'objectclass': "top organizationalRole".split(),
+ 'cn': EXCEPT_CN,
+ 'description': "production except DIT"})))
# enable acl error logging
- #mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '128')]
- #topology.master1.modify_s(DN_CONFIG, mod)
- #topology.master2.modify_s(DN_CONFIG, mod)
+ # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '128')]
+ # topology_m2.ms["master1"].modify_s(DN_CONFIG, mod)
+ # topology_m2.ms["master2"].modify_s(DN_CONFIG, mod)
# add dummy entries in the staging DIT
for cpt in range(MAX_ACCOUNTS):
name = "%s%d" % (NEW_ACCOUNT, cpt)
- topology.master1.add_s(Entry(("cn=%s,%s" % (name, STAGING_DN), {
- 'objectclass': "top person".split(),
- 'sn': name,
- 'cn': name})))
+ topology_m2.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, STAGING_DN), {
+ 'objectclass': "top person".split(),
+ 'sn': name,
+ 'cn': name})))
-def test_mode_default_add_deny(topology, moddn_setup):
+def test_mode_default_add_deny(topology_m2, moddn_setup):
"""This test case checks
that the ADD operation fails (no ADD aci on production)
"""
- topology.master1.log.info("\n\n######## mode moddn_aci : ADD (should fail) ########\n")
+ topology_m2.ms["master1"].log.info("\n\n######## mode moddn_aci : ADD (should fail) ########\n")
- _bind_normal(topology)
+ _bind_normal(topology_m2)
#
# First try to add an entry in production => INSUFFICIENT_ACCESS
#
try:
- topology.master1.log.info("Try to add %s" % PRODUCTION_DN)
+ topology_m2.ms["master1"].log.info("Try to add %s" % PRODUCTION_DN)
name = "%s%d" % (NEW_ACCOUNT, 0)
- topology.master1.add_s(Entry(("cn=%s,%s" % (name, PRODUCTION_DN), {
- 'objectclass': "top person".split(),
- 'sn': name,
- 'cn': name})))
+ topology_m2.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, PRODUCTION_DN), {
+ 'objectclass': "top person".split(),
+ 'sn': name,
+ 'cn': name})))
assert 0 # this is an error, we should not be allowed to add an entry in production
except Exception as e:
- topology.master1.log.info("Exception (expected): %s" % type(e).__name__)
+ topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__)
assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
-def test_mode_default_delete_deny(topology, moddn_setup):
+def test_mode_default_delete_deny(topology_m2, moddn_setup):
"""This test case checks
that the DEL operation fails (no 'delete' aci on production)
"""
- topology.master1.log.info("\n\n######## DELETE (should fail) ########\n")
+ topology_m2.ms["master1"].log.info("\n\n######## DELETE (should fail) ########\n")
- _bind_normal(topology)
+ _bind_normal(topology_m2)
#
# Second try to delete an entry in staging => INSUFFICIENT_ACCESS
#
try:
- topology.master1.log.info("Try to delete %s" % STAGING_DN)
+ topology_m2.ms["master1"].log.info("Try to delete %s" % STAGING_DN)
name = "%s%d" % (NEW_ACCOUNT, 0)
- topology.master1.delete_s("cn=%s,%s" % (name, STAGING_DN))
+ topology_m2.ms["master1"].delete_s("cn=%s,%s" % (name, STAGING_DN))
assert 0 # this is an error, we should not be allowed to add an entry in production
except Exception as e:
- topology.master1.log.info("Exception (expected): %s" % type(e).__name__)
+ topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__)
assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
@@ -466,17 +341,17 @@ def test_mode_default_delete_deny(topology, moddn_setup):
(6, None, PRODUCTION_PATTERN, False),
(7, STAGING_PATTERN, None, False),
(8, None, None, False)])
-def test_moddn_staging_prod(topology, moddn_setup,
+def test_moddn_staging_prod(topology_m2, moddn_setup,
index, tfrom, tto, failure):
"""This test case MOVE entry NEW_ACCOUNT0 from staging to prod
target_to/target_from: equality filter
"""
- topology.master1.log.info("\n\n######## MOVE staging -> Prod (%s) ########\n" % index)
- _bind_normal(topology)
+ topology_m2.ms["master1"].log.info("\n\n######## MOVE staging -> Prod (%s) ########\n" % index)
+ _bind_normal(topology_m2)
old_rdn = "cn=%s%s" % (NEW_ACCOUNT, index)
- old_dn = "%s,%s" % (old_rdn, STAGING_DN)
+ old_dn = "%s,%s" % (old_rdn, STAGING_DN)
new_rdn = old_rdn
new_superior = PRODUCTION_DN
@@ -484,39 +359,39 @@ def test_moddn_staging_prod(topology, moddn_setup,
# Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS
#
try:
- topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
- topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior)
+ topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
+ topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior)
assert 0
except AssertionError:
- topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)")
+ topology_m2.ms["master1"].log.info(
+ "Exception (not really expected exception but that is fine as it fails to rename)")
except Exception as e:
- topology.master1.log.info("Exception (expected): %s" % type(e).__name__)
+ topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__)
assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
-
# successfull MOD with the ACI
- topology.master1.log.info("\n\n######## MOVE to and from equality filter ########\n")
- _bind_manager(topology)
- _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD,
+ topology_m2.ms["master1"].log.info("\n\n######## MOVE to and from equality filter ########\n")
+ _bind_manager(topology_m2)
+ _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD,
target_from=tfrom, target_to=tto)
- _bind_normal(topology)
+ _bind_normal(topology_m2)
try:
- topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
- topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior)
+ topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
+ topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior)
except Exception as e:
- topology.master1.log.info("Exception (expected): %s" % type(e).__name__)
+ topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__)
if failure:
assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
# successfull MOD with the both ACI
- _bind_manager(topology)
- _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE,
+ _bind_manager(topology_m2)
+ _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE,
target_from=tfrom, target_to=tto)
- _bind_normal(topology)
+ _bind_normal(topology_m2)
-def test_moddn_staging_prod_9(topology, moddn_setup):
+def test_moddn_staging_prod_9(topology_m2, moddn_setup):
"""This test case disable the 'moddn' right so a MODDN requires a 'add' right
to be successfull.
It fails to MOVE entry NEW_ACCOUNT9 from staging to prod.
@@ -529,11 +404,11 @@ def test_moddn_staging_prod_9(topology, moddn_setup):
Then it succeeds to MOVE NEW_ACCOUNT10 from staging to prod.
"""
- topology.master1.log.info("\n\n######## MOVE staging -> Prod (9) ########\n")
+ topology_m2.ms["master1"].log.info("\n\n######## MOVE staging -> Prod (9) ########\n")
- _bind_normal(topology)
+ _bind_normal(topology_m2)
old_rdn = "cn=%s9" % NEW_ACCOUNT
- old_dn = "%s,%s" % (old_rdn, STAGING_DN)
+ old_dn = "%s,%s" % (old_rdn, STAGING_DN)
new_rdn = old_rdn
new_superior = PRODUCTION_DN
@@ -541,83 +416,85 @@ def test_moddn_staging_prod_9(topology, moddn_setup):
# Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS
#
try:
- topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
- topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior)
+ topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
+ topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior)
assert 0
except AssertionError:
- topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)")
+ topology_m2.ms["master1"].log.info(
+ "Exception (not really expected exception but that is fine as it fails to rename)")
except Exception as e:
- topology.master1.log.info("Exception (expected): %s" % type(e).__name__)
+ topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__)
assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
#############
# Now do tests with no support of moddn aci
#############
- topology.master1.log.info("Disable the moddn right")
- _bind_manager(topology)
+ topology_m2.ms["master1"].log.info("Disable the moddn right")
+ _bind_manager(topology_m2)
mod = [(ldap.MOD_REPLACE, CONFIG_MODDN_ACI_ATTR, 'off')]
- topology.master1.modify_s(DN_CONFIG, mod)
+ topology_m2.ms["master1"].modify_s(DN_CONFIG, mod)
# Add the moddn aci that will not be evaluated because of the config flag
- topology.master1.log.info("\n\n######## MOVE to and from equality filter ########\n")
- _bind_manager(topology)
- _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD,
+ topology_m2.ms["master1"].log.info("\n\n######## MOVE to and from equality filter ########\n")
+ _bind_manager(topology_m2)
+ _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD,
target_from=STAGING_DN, target_to=PRODUCTION_DN)
- _bind_normal(topology)
+ _bind_normal(topology_m2)
# It will fail because it will test the ADD right
try:
- topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
- topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior)
+ topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
+ topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior)
assert 0
except AssertionError:
- topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)")
+ topology_m2.ms["master1"].log.info(
+ "Exception (not really expected exception but that is fine as it fails to rename)")
except Exception as e:
- topology.master1.log.info("Exception (expected): %s" % type(e).__name__)
+ topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__)
assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
# remove the moddn aci
- _bind_manager(topology)
- _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE,
+ _bind_manager(topology_m2)
+ _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE,
target_from=STAGING_DN, target_to=PRODUCTION_DN)
- _bind_normal(topology)
+ _bind_normal(topology_m2)
#
# add the 'add' right to the production DN
# Then do a successfull moddn
#
- ACI_ALLOW = "(version 3.0; acl \"ADD rights to allow moddn\"; allow (add)"
- ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN
- ACI_BODY = ACI_ALLOW + ACI_SUBJECT
+ ACI_ALLOW = "(version 3.0; acl \"ADD rights to allow moddn\"; allow (add)"
+ ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN
+ ACI_BODY = ACI_ALLOW + ACI_SUBJECT
- _bind_manager(topology)
+ _bind_manager(topology_m2)
mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
- topology.master1.modify_s(PRODUCTION_DN, mod)
- _write_aci_staging(topology, mod_type=ldap.MOD_ADD)
- _bind_normal(topology)
+ topology_m2.ms["master1"].modify_s(PRODUCTION_DN, mod)
+ _write_aci_staging(topology_m2, mod_type=ldap.MOD_ADD)
+ _bind_normal(topology_m2)
- topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
- topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior)
+ topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
+ topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior)
- _bind_manager(topology)
+ _bind_manager(topology_m2)
mod = [(ldap.MOD_DELETE, 'aci', ACI_BODY)]
- topology.master1.modify_s(PRODUCTION_DN, mod)
- _write_aci_staging(topology, mod_type=ldap.MOD_DELETE)
- _bind_normal(topology)
+ topology_m2.ms["master1"].modify_s(PRODUCTION_DN, mod)
+ _write_aci_staging(topology_m2, mod_type=ldap.MOD_DELETE)
+ _bind_normal(topology_m2)
#############
# Now do tests with support of moddn aci
#############
- topology.master1.log.info("Enable the moddn right")
- _bind_manager(topology)
+ topology_m2.ms["master1"].log.info("Enable the moddn right")
+ _bind_manager(topology_m2)
mod = [(ldap.MOD_REPLACE, CONFIG_MODDN_ACI_ATTR, 'on')]
- topology.master1.modify_s(DN_CONFIG, mod)
+ topology_m2.ms["master1"].modify_s(DN_CONFIG, mod)
- topology.master1.log.info("\n\n######## MOVE staging -> Prod (10) ########\n")
+ topology_m2.ms["master1"].log.info("\n\n######## MOVE staging -> Prod (10) ########\n")
- _bind_normal(topology)
+ _bind_normal(topology_m2)
old_rdn = "cn=%s10" % NEW_ACCOUNT
- old_dn = "%s,%s" % (old_rdn, STAGING_DN)
+ old_dn = "%s,%s" % (old_rdn, STAGING_DN)
new_rdn = old_rdn
new_superior = PRODUCTION_DN
@@ -625,73 +502,75 @@ def test_moddn_staging_prod_9(topology, moddn_setup):
# Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS
#
try:
- topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
- topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior)
+ topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
+ topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior)
assert 0
except AssertionError:
- topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)")
+ topology_m2.ms["master1"].log.info(
+ "Exception (not really expected exception but that is fine as it fails to rename)")
except Exception as e:
- topology.master1.log.info("Exception (expected): %s" % type(e).__name__)
+ topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__)
assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
#
# add the 'add' right to the production DN
# Then do a failing moddn
#
- ACI_ALLOW = "(version 3.0; acl \"ADD rights to allow moddn\"; allow (add)"
- ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN
- ACI_BODY = ACI_ALLOW + ACI_SUBJECT
+ ACI_ALLOW = "(version 3.0; acl \"ADD rights to allow moddn\"; allow (add)"
+ ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN
+ ACI_BODY = ACI_ALLOW + ACI_SUBJECT
- _bind_manager(topology)
+ _bind_manager(topology_m2)
mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
- topology.master1.modify_s(PRODUCTION_DN, mod)
- _write_aci_staging(topology, mod_type=ldap.MOD_ADD)
- _bind_normal(topology)
+ topology_m2.ms["master1"].modify_s(PRODUCTION_DN, mod)
+ _write_aci_staging(topology_m2, mod_type=ldap.MOD_ADD)
+ _bind_normal(topology_m2)
try:
- topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
- topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior)
+ topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
+ topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior)
assert 0
except AssertionError:
- topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)")
+ topology_m2.ms["master1"].log.info(
+ "Exception (not really expected exception but that is fine as it fails to rename)")
except Exception as e:
- topology.master1.log.info("Exception (expected): %s" % type(e).__name__)
+ topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__)
assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
- _bind_manager(topology)
+ _bind_manager(topology_m2)
mod = [(ldap.MOD_DELETE, 'aci', ACI_BODY)]
- topology.master1.modify_s(PRODUCTION_DN, mod)
- _write_aci_staging(topology, mod_type=ldap.MOD_DELETE)
- _bind_normal(topology)
+ topology_m2.ms["master1"].modify_s(PRODUCTION_DN, mod)
+ _write_aci_staging(topology_m2, mod_type=ldap.MOD_DELETE)
+ _bind_normal(topology_m2)
# Add the moddn aci that will be evaluated because of the config flag
- topology.master1.log.info("\n\n######## MOVE to and from equality filter ########\n")
- _bind_manager(topology)
- _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD,
+ topology_m2.ms["master1"].log.info("\n\n######## MOVE to and from equality filter ########\n")
+ _bind_manager(topology_m2)
+ _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD,
target_from=STAGING_DN, target_to=PRODUCTION_DN)
- _bind_normal(topology)
+ _bind_normal(topology_m2)
- topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
- topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior)
+ topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
+ topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior)
# remove the moddn aci
- _bind_manager(topology)
- _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE,
+ _bind_manager(topology_m2)
+ _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE,
target_from=STAGING_DN, target_to=PRODUCTION_DN)
- _bind_normal(topology)
+ _bind_normal(topology_m2)
-def test_moddn_prod_staging(topology, moddn_setup):
+def test_moddn_prod_staging(topology_m2, moddn_setup):
"""This test checks that we can move ACCOUNT11 from staging to prod
but not move back ACCOUNT11 from prod to staging
"""
- topology.master1.log.info("\n\n######## MOVE staging -> Prod (11) ########\n")
+ topology_m2.ms["master1"].log.info("\n\n######## MOVE staging -> Prod (11) ########\n")
- _bind_normal(topology)
+ _bind_normal(topology_m2)
old_rdn = "cn=%s11" % NEW_ACCOUNT
- old_dn = "%s,%s" % (old_rdn, STAGING_DN)
+ old_dn = "%s,%s" % (old_rdn, STAGING_DN)
new_rdn = old_rdn
new_superior = PRODUCTION_DN
@@ -699,71 +578,73 @@ def test_moddn_prod_staging(topology, moddn_setup):
# Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS
#
try:
- topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
- topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior)
+ topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
+ topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior)
assert 0
except AssertionError:
- topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)")
+ topology_m2.ms["master1"].log.info(
+ "Exception (not really expected exception but that is fine as it fails to rename)")
except Exception as e:
- topology.master1.log.info("Exception (expected): %s" % type(e).__name__)
+ topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__)
assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
# successfull MOD with the ACI
- topology.master1.log.info("\n\n######## MOVE to and from equality filter ########\n")
- _bind_manager(topology)
- _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD,
+ topology_m2.ms["master1"].log.info("\n\n######## MOVE to and from equality filter ########\n")
+ _bind_manager(topology_m2)
+ _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD,
target_from=STAGING_DN, target_to=PRODUCTION_DN)
- _bind_normal(topology)
+ _bind_normal(topology_m2)
- topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
- topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior)
+ topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
+ topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior)
# Now check we can not move back the entry to staging
old_rdn = "cn=%s11" % NEW_ACCOUNT
- old_dn = "%s,%s" % (old_rdn, PRODUCTION_DN)
+ old_dn = "%s,%s" % (old_rdn, PRODUCTION_DN)
new_rdn = old_rdn
new_superior = STAGING_DN
# add the write right because we want to check the moddn
- _bind_manager(topology)
- _write_aci_production(topology, mod_type=ldap.MOD_ADD)
- _bind_normal(topology)
+ _bind_manager(topology_m2)
+ _write_aci_production(topology_m2, mod_type=ldap.MOD_ADD)
+ _bind_normal(topology_m2)
try:
- topology.master1.log.info("Try to move back MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
- topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior)
+ topology_m2.ms["master1"].log.info("Try to move back MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
+ topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior)
assert 0
except AssertionError:
- topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)")
+ topology_m2.ms["master1"].log.info(
+ "Exception (not really expected exception but that is fine as it fails to rename)")
except Exception as e:
- topology.master1.log.info("Exception (expected): %s" % type(e).__name__)
+ topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__)
assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
- _bind_manager(topology)
- _write_aci_production(topology, mod_type=ldap.MOD_DELETE)
- _bind_normal(topology)
+ _bind_manager(topology_m2)
+ _write_aci_production(topology_m2, mod_type=ldap.MOD_DELETE)
+ _bind_normal(topology_m2)
# successfull MOD with the both ACI
- _bind_manager(topology)
- _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE,
+ _bind_manager(topology_m2)
+ _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE,
target_from=STAGING_DN, target_to=PRODUCTION_DN)
- _bind_normal(topology)
+ _bind_normal(topology_m2)
-def test_check_repl_M2_to_M1(topology, moddn_setup):
+def test_check_repl_M2_to_M1(topology_m2, moddn_setup):
"""Checks that replication is still working M2->M1, using ACCOUNT12"""
- topology.master1.log.info("Bind as %s (M2)" % DN_DM)
- topology.master2.simple_bind_s(DN_DM, PASSWORD)
+ topology_m2.ms["master1"].log.info("Bind as %s (M2)" % DN_DM)
+ topology_m2.ms["master2"].simple_bind_s(DN_DM, PASSWORD)
rdn = "cn=%s12" % NEW_ACCOUNT
- dn = "%s,%s" % (rdn, STAGING_DN)
+ dn = "%s,%s" % (rdn, STAGING_DN)
# First wait for the ACCOUNT19 entry being replicated on M2
loop = 0
while loop <= 10:
try:
- ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
+ ent = topology_m2.ms["master2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
break
except ldap.NO_SUCH_OBJECT:
time.sleep(1)
@@ -773,12 +654,12 @@ def test_check_repl_M2_to_M1(topology, moddn_setup):
attribute = 'description'
tested_value = 'Hello world'
mod = [(ldap.MOD_ADD, attribute, tested_value)]
- topology.master1.log.info("Update (M2) %s (%s)" % (dn, attribute))
- topology.master2.modify_s(dn, mod)
+ topology_m2.ms["master1"].log.info("Update (M2) %s (%s)" % (dn, attribute))
+ topology_m2.ms["master2"].modify_s(dn, mod)
loop = 0
while loop <= 10:
- ent = topology.master1.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
+ ent = topology_m2.ms["master1"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent is not None
if ent.hasAttr(attribute) and (ent.getValue(attribute) == tested_value):
break
@@ -786,19 +667,19 @@ def test_check_repl_M2_to_M1(topology, moddn_setup):
time.sleep(1)
loop += 1
assert loop < 10
- topology.master1.log.info("Update %s (%s) replicated on M1" % (dn, attribute))
+ topology_m2.ms["master1"].log.info("Update %s (%s) replicated on M1" % (dn, attribute))
-def test_moddn_staging_prod_except(topology, moddn_setup):
+def test_moddn_staging_prod_except(topology_m2, moddn_setup):
"""This test case MOVE entry NEW_ACCOUNT13 from staging to prod
but fails to move entry NEW_ACCOUNT14 from staging to prod_except
"""
- topology.master1.log.info("\n\n######## MOVE staging -> Prod (13) ########\n")
- _bind_normal(topology)
+ topology_m2.ms["master1"].log.info("\n\n######## MOVE staging -> Prod (13) ########\n")
+ _bind_normal(topology_m2)
old_rdn = "cn=%s13" % NEW_ACCOUNT
- old_dn = "%s,%s" % (old_rdn, STAGING_DN)
+ old_dn = "%s,%s" % (old_rdn, STAGING_DN)
new_rdn = old_rdn
new_superior = PRODUCTION_DN
@@ -806,250 +687,252 @@ def test_moddn_staging_prod_except(topology, moddn_setup):
# Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS
#
try:
- topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
- topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior)
+ topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
+ topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior)
assert 0
except AssertionError:
- topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)")
+ topology_m2.ms["master1"].log.info(
+ "Exception (not really expected exception but that is fine as it fails to rename)")
except Exception as e:
- topology.master1.log.info("Exception (expected): %s" % type(e).__name__)
+ topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__)
assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
# successfull MOD with the ACI
- topology.master1.log.info("\n\n######## MOVE to and from equality filter ########\n")
- _bind_manager(topology)
- _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD,
+ topology_m2.ms["master1"].log.info("\n\n######## MOVE to and from equality filter ########\n")
+ _bind_manager(topology_m2)
+ _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD,
target_from=STAGING_DN, target_to=PRODUCTION_DN)
- _moddn_aci_deny_tree(topology, mod_type=ldap.MOD_ADD)
- _bind_normal(topology)
+ _moddn_aci_deny_tree(topology_m2, mod_type=ldap.MOD_ADD)
+ _bind_normal(topology_m2)
- topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
- topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior)
+ topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
+ topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior)
#
# Now try to move an entry under except
#
- topology.master1.log.info("\n\n######## MOVE staging -> Prod/Except (14) ########\n")
+ topology_m2.ms["master1"].log.info("\n\n######## MOVE staging -> Prod/Except (14) ########\n")
old_rdn = "cn=%s14" % NEW_ACCOUNT
- old_dn = "%s,%s" % (old_rdn, STAGING_DN)
+ old_dn = "%s,%s" % (old_rdn, STAGING_DN)
new_rdn = old_rdn
new_superior = PROD_EXCEPT_DN
try:
- topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
- topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior)
+ topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior))
+ topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior)
assert 0
except AssertionError:
- topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)")
+ topology_m2.ms["master1"].log.info(
+ "Exception (not really expected exception but that is fine as it fails to rename)")
except Exception as e:
- topology.master1.log.info("Exception (expected): %s" % type(e).__name__)
+ topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__)
assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
# successfull MOD with the both ACI
- _bind_manager(topology)
- _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE,
+ _bind_manager(topology_m2)
+ _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE,
target_from=STAGING_DN, target_to=PRODUCTION_DN)
- _moddn_aci_deny_tree(topology, mod_type=ldap.MOD_DELETE)
- _bind_normal(topology)
+ _moddn_aci_deny_tree(topology_m2, mod_type=ldap.MOD_DELETE)
+ _bind_normal(topology_m2)
-def test_mode_default_ger_no_moddn(topology, moddn_setup):
- topology.master1.log.info("\n\n######## mode moddn_aci : GER no moddn ########\n")
+def test_mode_default_ger_no_moddn(topology_m2, moddn_setup):
+ topology_m2.ms["master1"].log.info("\n\n######## mode moddn_aci : GER no moddn ########\n")
request_ctrl = GetEffectiveRightsControl(criticality=True, authzId="dn: " + BIND_DN)
- msg_id = topology.master1.search_ext(PRODUCTION_DN,
- ldap.SCOPE_SUBTREE,
- "objectclass=*",
- serverctrls=[request_ctrl])
- rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id)
- #ger={}
+ msg_id = topology_m2.ms["master1"].search_ext(PRODUCTION_DN,
+ ldap.SCOPE_SUBTREE,
+ "objectclass=*",
+ serverctrls=[request_ctrl])
+ rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["master1"].result3(msg_id)
+ # ger={}
value = ''
for dn, attrs in rdata:
- topology.master1.log.info("dn: %s" % dn)
+ topology_m2.ms["master1"].log.info("dn: %s" % dn)
value = attrs['entryLevelRights'][0]
- topology.master1.log.info("######## entryLevelRights: %r" % value)
+ topology_m2.ms["master1"].log.info("######## entryLevelRights: %r" % value)
assert 'n' not in value
-def test_mode_default_ger_with_moddn(topology, moddn_setup):
+def test_mode_default_ger_with_moddn(topology_m2, moddn_setup):
"""This test case adds the moddn aci and check ger contains 'n'"""
- topology.master1.log.info("\n\n######## mode moddn_aci: GER with moddn ########\n")
+ topology_m2.ms["master1"].log.info("\n\n######## mode moddn_aci: GER with moddn ########\n")
# successfull MOD with the ACI
- _bind_manager(topology)
- _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD,
+ _bind_manager(topology_m2)
+ _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD,
target_from=STAGING_DN, target_to=PRODUCTION_DN)
- _bind_normal(topology)
+ _bind_normal(topology_m2)
request_ctrl = GetEffectiveRightsControl(criticality=True, authzId="dn: " + BIND_DN)
- msg_id = topology.master1.search_ext(PRODUCTION_DN,
- ldap.SCOPE_SUBTREE,
- "objectclass=*",
- serverctrls=[request_ctrl])
- rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id)
- #ger={}
+ msg_id = topology_m2.ms["master1"].search_ext(PRODUCTION_DN,
+ ldap.SCOPE_SUBTREE,
+ "objectclass=*",
+ serverctrls=[request_ctrl])
+ rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["master1"].result3(msg_id)
+ # ger={}
value = ''
for dn, attrs in rdata:
- topology.master1.log.info("dn: %s" % dn)
+ topology_m2.ms["master1"].log.info("dn: %s" % dn)
value = attrs['entryLevelRights'][0]
- topology.master1.log.info("######## entryLevelRights: %r" % value)
+ topology_m2.ms["master1"].log.info("######## entryLevelRights: %r" % value)
assert 'n' in value
# successfull MOD with the both ACI
- _bind_manager(topology)
- _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE,
+ _bind_manager(topology_m2)
+ _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE,
target_from=STAGING_DN, target_to=PRODUCTION_DN)
- _bind_normal(topology)
+ _bind_normal(topology_m2)
-def test_mode_switch_default_to_legacy(topology, moddn_setup):
+def test_mode_switch_default_to_legacy(topology_m2, moddn_setup):
"""This test switch the server from default mode to legacy"""
- topology.master1.log.info("\n\n######## Disable the moddn aci mod ########\n")
- _bind_manager(topology)
+ topology_m2.ms["master1"].log.info("\n\n######## Disable the moddn aci mod ########\n")
+ _bind_manager(topology_m2)
mod = [(ldap.MOD_REPLACE, CONFIG_MODDN_ACI_ATTR, 'off')]
- topology.master1.modify_s(DN_CONFIG, mod)
+ topology_m2.ms["master1"].modify_s(DN_CONFIG, mod)
-def test_mode_legacy_ger_no_moddn1(topology, moddn_setup):
- topology.master1.log.info("\n\n######## mode legacy 1: GER no moddn ########\n")
+def test_mode_legacy_ger_no_moddn1(topology_m2, moddn_setup):
+ topology_m2.ms["master1"].log.info("\n\n######## mode legacy 1: GER no moddn ########\n")
request_ctrl = GetEffectiveRightsControl(criticality=True, authzId="dn: " + BIND_DN)
- msg_id = topology.master1.search_ext(PRODUCTION_DN,
- ldap.SCOPE_SUBTREE,
- "objectclass=*",
- serverctrls=[request_ctrl])
- rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id)
- #ger={}
+ msg_id = topology_m2.ms["master1"].search_ext(PRODUCTION_DN,
+ ldap.SCOPE_SUBTREE,
+ "objectclass=*",
+ serverctrls=[request_ctrl])
+ rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["master1"].result3(msg_id)
+ # ger={}
value = ''
for dn, attrs in rdata:
- topology.master1.log.info("dn: %s" % dn)
+ topology_m2.ms["master1"].log.info("dn: %s" % dn)
value = attrs['entryLevelRights'][0]
- topology.master1.log.info("######## entryLevelRights: %r" % value)
+ topology_m2.ms["master1"].log.info("######## entryLevelRights: %r" % value)
assert 'n' not in value
-def test_mode_legacy_ger_no_moddn2(topology, moddn_setup):
- topology.master1.log.info("\n\n######## mode legacy 2: GER no moddn ########\n")
+def test_mode_legacy_ger_no_moddn2(topology_m2, moddn_setup):
+ topology_m2.ms["master1"].log.info("\n\n######## mode legacy 2: GER no moddn ########\n")
# successfull MOD with the ACI
- _bind_manager(topology)
- _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD,
+ _bind_manager(topology_m2)
+ _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD,
target_from=STAGING_DN, target_to=PRODUCTION_DN)
- _bind_normal(topology)
+ _bind_normal(topology_m2)
request_ctrl = GetEffectiveRightsControl(criticality=True, authzId="dn: " + BIND_DN)
- msg_id = topology.master1.search_ext(PRODUCTION_DN,
- ldap.SCOPE_SUBTREE,
- "objectclass=*",
- serverctrls=[request_ctrl])
- rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id)
- #ger={}
+ msg_id = topology_m2.ms["master1"].search_ext(PRODUCTION_DN,
+ ldap.SCOPE_SUBTREE,
+ "objectclass=*",
+ serverctrls=[request_ctrl])
+ rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["master1"].result3(msg_id)
+ # ger={}
value = ''
for dn, attrs in rdata:
- topology.master1.log.info("dn: %s" % dn)
+ topology_m2.ms["master1"].log.info("dn: %s" % dn)
value = attrs['entryLevelRights'][0]
- topology.master1.log.info("######## entryLevelRights: %r" % value)
+ topology_m2.ms["master1"].log.info("######## entryLevelRights: %r" % value)
assert 'n' not in value
# successfull MOD with the both ACI
- _bind_manager(topology)
- _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE,
+ _bind_manager(topology_m2)
+ _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE,
target_from=STAGING_DN, target_to=PRODUCTION_DN)
- _bind_normal(topology)
+ _bind_normal(topology_m2)
-def test_mode_legacy_ger_with_moddn(topology, moddn_setup):
- topology.master1.log.info("\n\n######## mode legacy : GER with moddn ########\n")
+def test_mode_legacy_ger_with_moddn(topology_m2, moddn_setup):
+ topology_m2.ms["master1"].log.info("\n\n######## mode legacy : GER with moddn ########\n")
# being allowed to read/write the RDN attribute use to allow the RDN
ACI_TARGET = "(target = \"ldap:///%s\")(targetattr=\"cn\")" % (PRODUCTION_DN)
- ACI_ALLOW = "(version 3.0; acl \"MODDN production changing the RDN attribute\"; allow (read,search,write)"
- ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN
- ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT
+ ACI_ALLOW = "(version 3.0; acl \"MODDN production changing the RDN attribute\"; allow (read,search,write)"
+ ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN
+ ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT
# successfull MOD with the ACI
- _bind_manager(topology)
+ _bind_manager(topology_m2)
mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
- topology.master1.modify_s(SUFFIX, mod)
- _bind_normal(topology)
+ topology_m2.ms["master1"].modify_s(SUFFIX, mod)
+ _bind_normal(topology_m2)
request_ctrl = GetEffectiveRightsControl(criticality=True, authzId="dn: " + BIND_DN)
- msg_id = topology.master1.search_ext(PRODUCTION_DN,
- ldap.SCOPE_SUBTREE,
- "objectclass=*",
- serverctrls=[request_ctrl])
- rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id)
- #ger={}
+ msg_id = topology_m2.ms["master1"].search_ext(PRODUCTION_DN,
+ ldap.SCOPE_SUBTREE,
+ "objectclass=*",
+ serverctrls=[request_ctrl])
+ rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["master1"].result3(msg_id)
+ # ger={}
value = ''
for dn, attrs in rdata:
- topology.master1.log.info("dn: %s" % dn)
+ topology_m2.ms["master1"].log.info("dn: %s" % dn)
value = attrs['entryLevelRights'][0]
- topology.master1.log.info("######## entryLevelRights: %r" % value)
+ topology_m2.ms["master1"].log.info("######## entryLevelRights: %r" % value)
assert 'n' in value
# successfull MOD with the both ACI
- _bind_manager(topology)
+ _bind_manager(topology_m2)
mod = [(ldap.MOD_DELETE, 'aci', ACI_BODY)]
- topology.master1.modify_s(SUFFIX, mod)
- #_bind_normal(topology)
+ topology_m2.ms["master1"].modify_s(SUFFIX, mod)
+ # _bind_normal(topology_m2)
@pytest.fixture(scope="module")
-def rdn_write_setup(topology):
- topology.master1.log.info("\n\n######## Add entry tuser ########\n")
- topology.master1.add_s(Entry((SRC_ENTRY_DN, {
- 'objectclass': "top person".split(),
- 'sn': SRC_ENTRY_CN,
- 'cn': SRC_ENTRY_CN})))
+def rdn_write_setup(topology_m2):
+ topology_m2.ms["master1"].log.info("\n\n######## Add entry tuser ########\n")
+ topology_m2.ms["master1"].add_s(Entry((SRC_ENTRY_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': SRC_ENTRY_CN,
+ 'cn': SRC_ENTRY_CN})))
-def test_rdn_write_get_ger(topology, rdn_write_setup):
+def test_rdn_write_get_ger(topology_m2, rdn_write_setup):
ANONYMOUS_DN = ""
- topology.master1.log.info("\n\n######## GER rights for anonymous ########\n")
+ topology_m2.ms["master1"].log.info("\n\n######## GER rights for anonymous ########\n")
request_ctrl = GetEffectiveRightsControl(criticality=True,
authzId="dn:" + ANONYMOUS_DN)
- msg_id = topology.master1.search_ext(SUFFIX,
- ldap.SCOPE_SUBTREE,
- "objectclass=*",
- serverctrls=[request_ctrl])
- rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id)
+ msg_id = topology_m2.ms["master1"].search_ext(SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ "objectclass=*",
+ serverctrls=[request_ctrl])
+ rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["master1"].result3(msg_id)
value = ''
for dn, attrs in rdata:
- topology.master1.log.info("dn: %s" % dn)
+ topology_m2.ms["master1"].log.info("dn: %s" % dn)
for value in attrs['entryLevelRights']:
- topology.master1.log.info("######## entryLevelRights: %r" % value)
+ topology_m2.ms["master1"].log.info("######## entryLevelRights: %r" % value)
assert 'n' not in value
-def test_rdn_write_modrdn_anonymous(topology, rdn_write_setup):
+def test_rdn_write_modrdn_anonymous(topology_m2, rdn_write_setup):
ANONYMOUS_DN = ""
- topology.master1.close()
- topology.master1.binddn = ANONYMOUS_DN
- topology.master1.open()
- msg_id = topology.master1.search_ext("", ldap.SCOPE_BASE, "objectclass=*")
- rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id)
+ topology_m2.ms["master1"].close()
+ topology_m2.ms["master1"].binddn = ANONYMOUS_DN
+ topology_m2.ms["master1"].open()
+ msg_id = topology_m2.ms["master1"].search_ext("", ldap.SCOPE_BASE, "objectclass=*")
+ rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["master1"].result3(msg_id)
for dn, attrs in rdata:
- topology.master1.log.info("dn: %s" % dn)
+ topology_m2.ms["master1"].log.info("dn: %s" % dn)
for attr in attrs:
- topology.master1.log.info("######## %r: %r" % (attr, attrs[attr]))
+ topology_m2.ms["master1"].log.info("######## %r: %r" % (attr, attrs[attr]))
try:
- topology.master1.rename_s(SRC_ENTRY_DN, "cn=%s" % DST_ENTRY_CN, delold=True)
+ topology_m2.ms["master1"].rename_s(SRC_ENTRY_DN, "cn=%s" % DST_ENTRY_CN, delold=True)
except Exception as e:
- topology.master1.log.info("Exception (expected): %s" % type(e).__name__)
+ topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__)
isinstance(e, ldap.INSUFFICIENT_ACCESS)
try:
- topology.master1.getEntry(DST_ENTRY_DN, ldap.SCOPE_BASE, "objectclass=*")
+ topology_m2.ms["master1"].getEntry(DST_ENTRY_DN, ldap.SCOPE_BASE, "objectclass=*")
assert False
except Exception as e:
- topology.master1.log.info("The entry was not renamed (expected)")
+ topology_m2.ms["master1"].log.info("The entry was not renamed (expected)")
isinstance(e, ldap.NO_SUCH_OBJECT)
- _bind_manager(topology)
+ _bind_manager(topology_m2)
if __name__ == '__main__':
diff --git a/dirsrvtests/tests/suites/attr_encryption/attr_encrypt_test.py b/dirsrvtests/tests/suites/attr_encryption/attr_encrypt_test.py
deleted file mode 100644
index 6116aed..0000000
--- a/dirsrvtests/tests/suites/attr_encryption/attr_encrypt_test.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_attr_encrypt_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_attr_encrypt_(topology):
- '''
- Write a single test here...
- '''
- log.info('attr_encrypt test suite PASSED')
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/attr_uniqueness_plugin/attr_uniqueness_test.py b/dirsrvtests/tests/suites/attr_uniqueness_plugin/attr_uniqueness_test.py
index 6550f43..5da6fe3 100644
--- a/dirsrvtests/tests/suites/attr_uniqueness_plugin/attr_uniqueness_test.py
+++ b/dirsrvtests/tests/suites/attr_uniqueness_plugin/attr_uniqueness_test.py
@@ -15,72 +15,37 @@ from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX
USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
-def test_attr_uniqueness_init(topology):
+def test_attr_uniqueness_init(topology_st):
'''
Enable dynamic plugins - makes things easier
'''
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
except ldap.LDAPError as e:
ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc'])
assert False
- topology.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS)
+ topology_st.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS)
-def test_attr_uniqueness(topology):
+def test_attr_uniqueness(topology_st):
log.info('Running test_attr_uniqueness...')
#
# Configure plugin
#
try:
- topology.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
- [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'uid')])
+ topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
+ [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'uid')])
except ldap.LDAPError as e:
log.fatal('test_attr_uniqueness: Failed to configure plugin for "uid": error ' + e.message['desc'])
@@ -88,25 +53,25 @@ def test_attr_uniqueness(topology):
# Add an entry
try:
- topology.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(),
- 'sn': '1',
- 'cn': 'user 1',
- 'uid': 'user1',
- 'mail': 'user1(a)example.com',
- 'mailAlternateAddress': 'user1(a)alt.example.com',
- 'userpassword': 'password'})))
+ topology_st.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(),
+ 'sn': '1',
+ 'cn': 'user 1',
+ 'uid': 'user1',
+ 'mail': 'user1(a)example.com',
+ 'mailAlternateAddress': 'user1(a)alt.example.com',
+ 'userpassword': 'password'})))
except ldap.LDAPError as e:
log.fatal('test_attr_uniqueness: Failed to add test user' + USER1_DN + ': error ' + e.message['desc'])
assert False
# Add an entry with a duplicate "uid"
try:
- topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
- 'sn': '2',
- 'cn': 'user 2',
- 'uid': 'user2',
- 'uid': 'user1',
- 'userpassword': 'password'})))
+ topology_st.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
+ 'sn': '2',
+ 'cn': 'user 2',
+ 'uid': 'user2',
+ 'uid': 'user1',
+ 'userpassword': 'password'})))
except ldap.CONSTRAINT_VIOLATION:
pass
else:
@@ -117,8 +82,8 @@ def test_attr_uniqueness(topology):
# Change config to use "mail" instead of "uid"
#
try:
- topology.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
- [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'mail')])
+ topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
+ [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'mail')])
except ldap.LDAPError as e:
log.fatal('test_attr_uniqueness: Failed to configure plugin for "mail": error ' + e.message['desc'])
@@ -128,12 +93,12 @@ def test_attr_uniqueness(topology):
# Test plugin - Add an entry, that has a duplicate "mail" value
#
try:
- topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
- 'sn': '2',
- 'cn': 'user 2',
- 'uid': 'user2',
- 'mail': 'user1(a)example.com',
- 'userpassword': 'password'})))
+ topology_st.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
+ 'sn': '2',
+ 'cn': 'user 2',
+ 'uid': 'user2',
+ 'mail': 'user1(a)example.com',
+ 'userpassword': 'password'})))
except ldap.CONSTRAINT_VIOLATION:
pass
else:
@@ -144,10 +109,10 @@ def test_attr_uniqueness(topology):
# Reconfigure plugin for mail and mailAlternateAddress
#
try:
- topology.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
- [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'mail'),
- (ldap.MOD_ADD, 'uniqueness-attribute-name',
- 'mailAlternateAddress')])
+ topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
+ [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'mail'),
+ (ldap.MOD_ADD, 'uniqueness-attribute-name',
+ 'mailAlternateAddress')])
except ldap.LDAPError as e:
log.error('test_attr_uniqueness: Failed to reconfigure plugin for "mail mailAlternateAddress": error ' +
e.message['desc'])
@@ -157,12 +122,12 @@ def test_attr_uniqueness(topology):
# Test plugin - Add an entry, that has a duplicate "mail" value
#
try:
- topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
- 'sn': '2',
- 'cn': 'user 2',
- 'uid': 'user2',
- 'mail': 'user1(a)example.com',
- 'userpassword': 'password'})))
+ topology_st.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
+ 'sn': '2',
+ 'cn': 'user 2',
+ 'uid': 'user2',
+ 'mail': 'user1(a)example.com',
+ 'userpassword': 'password'})))
except ldap.CONSTRAINT_VIOLATION:
pass
else:
@@ -173,12 +138,12 @@ def test_attr_uniqueness(topology):
# Test plugin - Add an entry, that has a duplicate "mailAlternateAddress" value
#
try:
- topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
- 'sn': '2',
- 'cn': 'user 2',
- 'uid': 'user2',
- 'mailAlternateAddress': 'user1(a)alt.example.com',
- 'userpassword': 'password'})))
+ topology_st.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
+ 'sn': '2',
+ 'cn': 'user 2',
+ 'uid': 'user2',
+ 'mailAlternateAddress': 'user1(a)alt.example.com',
+ 'userpassword': 'password'})))
except ldap.CONSTRAINT_VIOLATION:
pass
else:
@@ -189,12 +154,12 @@ def test_attr_uniqueness(topology):
# Test plugin - Add an entry, that has a duplicate "mail" value conflicting mailAlternateAddress
#
try:
- topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
- 'sn': '2',
- 'cn': 'user 2',
- 'uid': 'user2',
- 'mail': 'user1(a)alt.example.com',
- 'userpassword': 'password'})))
+ topology_st.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
+ 'sn': '2',
+ 'cn': 'user 2',
+ 'uid': 'user2',
+ 'mail': 'user1(a)alt.example.com',
+ 'userpassword': 'password'})))
except ldap.CONSTRAINT_VIOLATION:
pass
else:
@@ -205,12 +170,12 @@ def test_attr_uniqueness(topology):
# Test plugin - Add an entry, that has a duplicate "mailAlternateAddress" conflicting mail
#
try:
- topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
- 'sn': '2',
- 'cn': 'user 2',
- 'uid': 'user2',
- 'mailAlternateAddress': 'user1(a)example.com',
- 'userpassword': 'password'})))
+ topology_st.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
+ 'sn': '2',
+ 'cn': 'user 2',
+ 'uid': 'user2',
+ 'mailAlternateAddress': 'user1(a)example.com',
+ 'userpassword': 'password'})))
except ldap.CONSTRAINT_VIOLATION:
pass
else:
@@ -221,7 +186,7 @@ def test_attr_uniqueness(topology):
# Cleanup
#
try:
- topology.standalone.delete_s(USER1_DN)
+ topology_st.standalone.delete_s(USER1_DN)
except ldap.LDAPError as e:
log.fatal('test_attr_uniqueness: Failed to delete test entry: ' + e.message['desc'])
assert False
diff --git a/dirsrvtests/tests/suites/automember_plugin/automember_test.py b/dirsrvtests/tests/suites/automember_plugin/automember_test.py
deleted file mode 100644
index f3a1113..0000000
--- a/dirsrvtests/tests/suites/automember_plugin/automember_test.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_automember_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_automember_(topology):
- '''
- Write a single test here...
- '''
- log.info('automember test suite PASSED')
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
index 209bab9..82cd8e5 100644
--- a/dirsrvtests/tests/suites/basic/basic_test.py
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -21,6 +21,7 @@ from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
@@ -37,72 +38,32 @@ ROOTDSE_DEF_ATTR_LIST = ('namingContexts',
'vendorName',
'vendorVersion')
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
@pytest.fixture(scope="module")
-def topology(request):
- """This fixture is used to standalone topology for the 'module'."""
-
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-(a)pytest.fixture(scope="module")
-def import_example_ldif(topology):
+def import_example_ldif(topology_st):
"""Import the Example LDIF for the tests in this suite"""
log.info('Initializing the "basic" test suite')
- ldif = '%s/Example.ldif' % get_data_dir(topology.standalone.prefix)
- import_ldif = topology.standalone.get_ldif_dir() + "/Example.ldif"
+ ldif = '%s/Example.ldif' % get_data_dir(topology_st.standalone.prefix)
+ import_ldif = topology_st.standalone.get_ldif_dir() + "/Example.ldif"
shutil.copyfile(ldif, import_ldif)
try:
- topology.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX,
- input_file=import_ldif,
- args={TASK_WAIT: True})
+ topology_st.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX,
+ input_file=import_ldif,
+ args={TASK_WAIT: True})
except ValueError:
log.error('Online import failed')
assert False
@pytest.fixture(params=ROOTDSE_DEF_ATTR_LIST)
-def rootdse_attr(topology, request):
+def rootdse_attr(topology_st, request):
"""Adds an attr from the list
as the default attr to the rootDSE
"""
# Ensure the server is started and connected
- topology.standalone.start()
+ topology_st.standalone.start()
RETURN_DEFAULT_OPATTR = "nsslapd-return-default-opattr"
rootdse_attr_name = request.param
@@ -111,7 +72,7 @@ def rootdse_attr(topology, request):
rootdse_attr_name))
mod = [(ldap.MOD_ADD, RETURN_DEFAULT_OPATTR, rootdse_attr_name)]
try:
- topology.standalone.modify_s("", mod)
+ topology_st.standalone.modify_s("", mod)
except ldap.LDAPError as e:
log.fatal('Failed to add attr: error (%s)' % (e.message['desc']))
assert False
@@ -121,16 +82,17 @@ def rootdse_attr(topology, request):
rootdse_attr_name))
mod = [(ldap.MOD_DELETE, RETURN_DEFAULT_OPATTR, rootdse_attr_name)]
try:
- topology.standalone.modify_s("", mod)
+ topology_st.standalone.modify_s("", mod)
except ldap.LDAPError as e:
log.fatal('Failed to delete attr: error (%s)' % (e.message['desc']))
assert False
+
request.addfinalizer(fin)
return rootdse_attr_name
-def test_basic_ops(topology, import_example_ldif):
+def test_basic_ops(topology_st, import_example_ldif):
"""Test doing adds, mods, modrdns, and deletes"""
log.info('Running test_basic_ops...')
@@ -147,34 +109,34 @@ def test_basic_ops(topology, import_example_ldif):
# Adds
#
try:
- topology.standalone.add_s(Entry((USER1_DN,
- {'objectclass': "top extensibleObject".split(),
- 'sn': '1',
- 'cn': 'user1',
- 'uid': 'user1',
- 'userpassword': 'password'})))
+ topology_st.standalone.add_s(Entry((USER1_DN,
+ {'objectclass': "top extensibleObject".split(),
+ 'sn': '1',
+ 'cn': 'user1',
+ 'uid': 'user1',
+ 'userpassword': 'password'})))
except ldap.LDAPError as e:
log.error('Failed to add test user' + USER1_DN + ': error ' + e.message['desc'])
assert False
try:
- topology.standalone.add_s(Entry((USER2_DN,
- {'objectclass': "top extensibleObject".split(),
- 'sn': '2',
- 'cn': 'user2',
- 'uid': 'user2',
- 'userpassword': 'password'})))
+ topology_st.standalone.add_s(Entry((USER2_DN,
+ {'objectclass': "top extensibleObject".split(),
+ 'sn': '2',
+ 'cn': 'user2',
+ 'uid': 'user2',
+ 'userpassword': 'password'})))
except ldap.LDAPError as e:
log.error('Failed to add test user' + USER2_DN + ': error ' + e.message['desc'])
assert False
try:
- topology.standalone.add_s(Entry((USER3_DN,
- {'objectclass': "top extensibleObject".split(),
- 'sn': '3',
- 'cn': 'user3',
- 'uid': 'user3',
- 'userpassword': 'password'})))
+ topology_st.standalone.add_s(Entry((USER3_DN,
+ {'objectclass': "top extensibleObject".split(),
+ 'sn': '3',
+ 'cn': 'user3',
+ 'uid': 'user3',
+ 'userpassword': 'password'})))
except ldap.LDAPError as e:
log.error('Failed to add test user' + USER3_DN + ': error ' + e.message['desc'])
assert False
@@ -183,22 +145,22 @@ def test_basic_ops(topology, import_example_ldif):
# Mods
#
try:
- topology.standalone.modify_s(USER1_DN, [(ldap.MOD_ADD, 'description',
- 'New description')])
+ topology_st.standalone.modify_s(USER1_DN, [(ldap.MOD_ADD, 'description',
+ 'New description')])
except ldap.LDAPError as e:
log.error('Failed to add description: error ' + e.message['desc'])
assert False
try:
- topology.standalone.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'description',
- 'Modified description')])
+ topology_st.standalone.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'description',
+ 'Modified description')])
except ldap.LDAPError as e:
log.error('Failed to modify description: error ' + e.message['desc'])
assert False
try:
- topology.standalone.modify_s(USER1_DN, [(ldap.MOD_DELETE, 'description',
- None)])
+ topology_st.standalone.modify_s(USER1_DN, [(ldap.MOD_DELETE, 'description',
+ None)])
except ldap.LDAPError as e:
log.error('Failed to delete description: error ' + e.message['desc'])
assert False
@@ -207,21 +169,21 @@ def test_basic_ops(topology, import_example_ldif):
# Modrdns
#
try:
- topology.standalone.rename_s(USER1_DN, USER1_NEWDN, delold=1)
+ topology_st.standalone.rename_s(USER1_DN, USER1_NEWDN, delold=1)
except ldap.LDAPError as e:
log.error('Failed to modrdn user1: error ' + e.message['desc'])
assert False
try:
- topology.standalone.rename_s(USER2_DN, USER2_NEWDN, delold=0)
+ topology_st.standalone.rename_s(USER2_DN, USER2_NEWDN, delold=0)
except ldap.LDAPError as e:
log.error('Failed to modrdn user2: error ' + e.message['desc'])
assert False
# Modrdn - New superior
try:
- topology.standalone.rename_s(USER3_DN, USER3_NEWDN,
- newsuperior=NEW_SUPERIOR, delold=1)
+ topology_st.standalone.rename_s(USER3_DN, USER3_NEWDN,
+ newsuperior=NEW_SUPERIOR, delold=1)
except ldap.LDAPError as e:
log.error('Failed to modrdn(new superior) user3: error ' + e.message['desc'])
assert False
@@ -230,19 +192,19 @@ def test_basic_ops(topology, import_example_ldif):
# Deletes
#
try:
- topology.standalone.delete_s(USER1_RDN_DN)
+ topology_st.standalone.delete_s(USER1_RDN_DN)
except ldap.LDAPError as e:
log.error('Failed to delete test entry1: ' + e.message['desc'])
assert False
try:
- topology.standalone.delete_s(USER2_RDN_DN)
+ topology_st.standalone.delete_s(USER2_RDN_DN)
except ldap.LDAPError as e:
log.error('Failed to delete test entry2: ' + e.message['desc'])
assert False
try:
- topology.standalone.delete_s(USER3_RDN_DN)
+ topology_st.standalone.delete_s(USER3_RDN_DN)
except ldap.LDAPError as e:
log.error('Failed to delete test entry3: ' + e.message['desc'])
assert False
@@ -250,7 +212,7 @@ def test_basic_ops(topology, import_example_ldif):
log.info('test_basic_ops: PASSED')
-def test_basic_import_export(topology, import_example_ldif):
+def test_basic_import_export(topology_st, import_example_ldif):
"""Test online and offline LDIF imports & exports"""
log.info('Running test_basic_import_export...')
@@ -262,10 +224,10 @@ def test_basic_import_export(topology, import_example_ldif):
#
# Generate a test ldif (50k entries)
- ldif_dir = topology.standalone.get_ldif_dir()
+ ldif_dir = topology_st.standalone.get_ldif_dir()
import_ldif = ldif_dir + '/basic_import.ldif'
try:
- topology.standalone.buildLDIF(50000, import_ldif)
+ topology_st.standalone.buildLDIF(50000, import_ldif)
except OSError as e:
log.fatal('test_basic_import_export: failed to create test ldif,\
error: %s - %s' % (e.errno, e.strerror))
@@ -273,15 +235,15 @@ def test_basic_import_export(topology, import_example_ldif):
# Online
try:
- topology.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX,
- input_file=import_ldif,
- args={TASK_WAIT: True})
+ topology_st.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX,
+ input_file=import_ldif,
+ args={TASK_WAIT: True})
except ValueError:
log.fatal('test_basic_import_export: Online import failed')
assert False
# Offline
- if not topology.standalone.ldif2db(DEFAULT_BENAME, None, None, None, import_ldif):
+ if not topology_st.standalone.ldif2db(DEFAULT_BENAME, None, None, None, import_ldif):
log.fatal('test_basic_import_export: Offline import failed')
assert False
@@ -291,7 +253,7 @@ def test_basic_import_export(topology, import_example_ldif):
# Online export
export_ldif = ldif_dir + '/export.ldif'
- exportTask = Tasks(topology.standalone)
+ exportTask = Tasks(topology_st.standalone)
try:
args = {TASK_WAIT: True}
exportTask.exportLDIF(DEFAULT_SUFFIX, None, export_ldif, args)
@@ -300,21 +262,21 @@ def test_basic_import_export(topology, import_example_ldif):
assert False
# Offline export
- if not topology.standalone.db2ldif(DEFAULT_BENAME, (DEFAULT_SUFFIX,),
- None, None, None, export_ldif):
+ if not topology_st.standalone.db2ldif(DEFAULT_BENAME, (DEFAULT_SUFFIX,),
+ None, None, None, export_ldif):
log.fatal('test_basic_import_export: Failed to run offline db2ldif')
assert False
#
# Cleanup - Import the Example LDIF for the other tests in this suite
#
- ldif = '%s/Example.ldif' % get_data_dir(topology.standalone.prefix)
- import_ldif = topology.standalone.get_ldif_dir() + "/Example.ldif"
+ ldif = '%s/Example.ldif' % get_data_dir(topology_st.standalone.prefix)
+ import_ldif = topology_st.standalone.get_ldif_dir() + "/Example.ldif"
shutil.copyfile(ldif, import_ldif)
try:
- topology.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX,
- input_file=import_ldif,
- args={TASK_WAIT: True})
+ topology_st.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX,
+ input_file=import_ldif,
+ args={TASK_WAIT: True})
except ValueError:
log.fatal('test_basic_import_export: Online import failed')
assert False
@@ -322,43 +284,43 @@ def test_basic_import_export(topology, import_example_ldif):
log.info('test_basic_import_export: PASSED')
-def test_basic_backup(topology, import_example_ldif):
+def test_basic_backup(topology_st, import_example_ldif):
"""Test online and offline back and restore"""
log.info('Running test_basic_backup...')
- backup_dir = topology.standalone.get_bak_dir() + '/backup_test'
+ backup_dir = topology_st.standalone.get_bak_dir() + '/backup_test'
# Test online backup
try:
- topology.standalone.tasks.db2bak(backup_dir=backup_dir,
- args={TASK_WAIT: True})
+ topology_st.standalone.tasks.db2bak(backup_dir=backup_dir,
+ args={TASK_WAIT: True})
except ValueError:
log.fatal('test_basic_backup: Online backup failed')
assert False
# Test online restore
try:
- topology.standalone.tasks.bak2db(backup_dir=backup_dir,
- args={TASK_WAIT: True})
+ topology_st.standalone.tasks.bak2db(backup_dir=backup_dir,
+ args={TASK_WAIT: True})
except ValueError:
log.fatal('test_basic_backup: Online restore failed')
assert False
# Test offline backup
- if not topology.standalone.db2bak(backup_dir):
+ if not topology_st.standalone.db2bak(backup_dir):
log.fatal('test_basic_backup: Offline backup failed')
assert False
# Test offline restore
- if not topology.standalone.bak2db(backup_dir):
+ if not topology_st.standalone.bak2db(backup_dir):
log.fatal('test_basic_backup: Offline backup failed')
assert False
log.info('test_basic_backup: PASSED')
-def test_basic_acl(topology, import_example_ldif):
+def test_basic_acl(topology_st, import_example_ldif):
"""Run some basic access control(ACL) tests"""
log.info('Running test_basic_acl...')
@@ -370,24 +332,24 @@ def test_basic_acl(topology, import_example_ldif):
# Add two users
#
try:
- topology.standalone.add_s(Entry((USER1_DN,
- {'objectclass': "top extensibleObject".split(),
- 'sn': '1',
- 'cn': 'user 1',
- 'uid': 'user1',
- 'userpassword': PASSWORD})))
+ topology_st.standalone.add_s(Entry((USER1_DN,
+ {'objectclass': "top extensibleObject".split(),
+ 'sn': '1',
+ 'cn': 'user 1',
+ 'uid': 'user1',
+ 'userpassword': PASSWORD})))
except ldap.LDAPError as e:
log.fatal('test_basic_acl: Failed to add test user ' + USER1_DN
+ ': error ' + e.message['desc'])
assert False
try:
- topology.standalone.add_s(Entry((USER2_DN,
- {'objectclass': "top extensibleObject".split(),
- 'sn': '2',
- 'cn': 'user 2',
- 'uid': 'user2',
- 'userpassword': PASSWORD})))
+ topology_st.standalone.add_s(Entry((USER2_DN,
+ {'objectclass': "top extensibleObject".split(),
+ 'sn': '2',
+ 'cn': 'user 2',
+ 'uid': 'user2',
+ 'userpassword': PASSWORD})))
except ldap.LDAPError as e:
log.fatal('test_basic_acl: Failed to add test user ' + USER1_DN
+ ': error ' + e.message['desc'])
@@ -398,7 +360,7 @@ def test_basic_acl(topology, import_example_ldif):
# and also set the default anonymous access
#
try:
- topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', DENY_ACI)])
+ topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', DENY_ACI)])
except ldap.LDAPError as e:
log.fatal('test_basic_acl: Failed to add DENY ACI: error ' + e.message['desc'])
assert False
@@ -407,15 +369,15 @@ def test_basic_acl(topology, import_example_ldif):
# Make sure USER1_DN can not search anything, but USER2_dn can...
#
try:
- topology.standalone.simple_bind_s(USER1_DN, PASSWORD)
+ topology_st.standalone.simple_bind_s(USER1_DN, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_basic_acl: Failed to bind as user1, error: ' + e.message['desc'])
assert False
try:
- entries = topology.standalone.search_s(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- '(uid=*)')
+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ '(uid=*)')
if entries:
log.fatal('test_basic_acl: User1 was incorrectly able to search the suffix!')
assert False
@@ -425,15 +387,15 @@ def test_basic_acl(topology, import_example_ldif):
# Now try user2... Also check that userpassword is stripped out
try:
- topology.standalone.simple_bind_s(USER2_DN, PASSWORD)
+ topology_st.standalone.simple_bind_s(USER2_DN, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_basic_acl: Failed to bind as user2, error: ' + e.message['desc'])
assert False
try:
- entries = topology.standalone.search_s(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- '(uid=user1)')
+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ '(uid=user1)')
if not entries:
log.fatal('test_basic_acl: User1 incorrectly not able to search the suffix')
assert False
@@ -448,15 +410,15 @@ def test_basic_acl(topology, import_example_ldif):
# Make sure Root DN can also search (this also resets the bind dn to the
# Root DN for future operations)
try:
- topology.standalone.simple_bind_s(DN_DM, PW_DM)
+ topology_st.standalone.simple_bind_s(DN_DM, PW_DM)
except ldap.LDAPError as e:
log.fatal('test_basic_acl: Failed to bind as ROotDN, error: ' + e.message['desc'])
assert False
try:
- entries = topology.standalone.search_s(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- '(uid=*)')
+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ '(uid=*)')
if not entries:
log.fatal('test_basic_acl: Root DN incorrectly not able to search the suffix')
assert False
@@ -468,19 +430,19 @@ def test_basic_acl(topology, import_example_ldif):
# Cleanup
#
try:
- topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', DENY_ACI)])
+ topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', DENY_ACI)])
except ldap.LDAPError as e:
log.fatal('test_basic_acl: Failed to delete DENY ACI: error ' + e.message['desc'])
assert False
try:
- topology.standalone.delete_s(USER1_DN)
+ topology_st.standalone.delete_s(USER1_DN)
except ldap.LDAPError as e:
log.fatal('test_basic_acl: Failed to delete test entry1: ' + e.message['desc'])
assert False
try:
- topology.standalone.delete_s(USER2_DN)
+ topology_st.standalone.delete_s(USER2_DN)
except ldap.LDAPError as e:
log.fatal('test_basic_acl: Failed to delete test entry2: ' + e.message['desc'])
assert False
@@ -488,7 +450,7 @@ def test_basic_acl(topology, import_example_ldif):
log.info('test_basic_acl: PASSED')
-def test_basic_searches(topology, import_example_ldif):
+def test_basic_searches(topology_st, import_example_ldif):
"""The search results are gathered from testing with Example.ldif"""
log.info('Running test_basic_searches...')
@@ -512,9 +474,9 @@ def test_basic_searches(topology, import_example_ldif):
for (search_filter, search_result) in filters:
try:
- entries = topology.standalone.search_s(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- search_filter)
+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ search_filter)
if len(entries) != search_result:
log.fatal('test_basic_searches: An incorrect number of entries\
was returned from filter (%s): (%d) expected (%d)' %
@@ -527,7 +489,7 @@ def test_basic_searches(topology, import_example_ldif):
log.info('test_basic_searches: PASSED')
-def test_basic_referrals(topology, import_example_ldif):
+def test_basic_referrals(topology_st, import_example_ldif):
"""Set the server to referral mode,
and make sure we recive the referal error(10)
"""
@@ -540,17 +502,17 @@ def test_basic_referrals(topology, import_example_ldif):
# Set the referral, adn the backend state
#
try:
- topology.standalone.modify_s(SUFFIX_CONFIG,
- [(ldap.MOD_REPLACE,
- 'nsslapd-referral',
- 'ldap://localhost.localdomain:389/o%3dnetscaperoot')])
+ topology_st.standalone.modify_s(SUFFIX_CONFIG,
+ [(ldap.MOD_REPLACE,
+ 'nsslapd-referral',
+ 'ldap://localhost.localdomain:389/o%3dnetscaperoot')])
except ldap.LDAPError as e:
log.fatal('test_basic_referrals: Failed to set referral: error ' + e.message['desc'])
assert False
try:
- topology.standalone.modify_s(SUFFIX_CONFIG, [(ldap.MOD_REPLACE,
- 'nsslapd-state', 'Referral')])
+ topology_st.standalone.modify_s(SUFFIX_CONFIG, [(ldap.MOD_REPLACE,
+ 'nsslapd-state', 'Referral')])
except ldap.LDAPError as e:
log.fatal('test_basic_referrals: Failed to set backend state: error '
+ e.message['desc'])
@@ -559,9 +521,9 @@ def test_basic_referrals(topology, import_example_ldif):
#
# Test that a referral error is returned
#
- topology.standalone.set_option(ldap.OPT_REFERRALS, 0) # Do not follow referral
+ topology_st.standalone.set_option(ldap.OPT_REFERRALS, 0) # Do not follow referral
try:
- topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=top')
+ topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=top')
except ldap.REFERRAL:
pass
except ldap.LDAPError as e:
@@ -571,52 +533,52 @@ def test_basic_referrals(topology, import_example_ldif):
#
# Make sure server can restart in referral mode
#
- topology.standalone.restart(timeout=10)
+ topology_st.standalone.restart(timeout=10)
#
# Cleanup
#
try:
- topology.standalone.modify_s(SUFFIX_CONFIG, [(ldap.MOD_REPLACE,
- 'nsslapd-state', 'Backend')])
+ topology_st.standalone.modify_s(SUFFIX_CONFIG, [(ldap.MOD_REPLACE,
+ 'nsslapd-state', 'Backend')])
except ldap.LDAPError as e:
log.fatal('test_basic_referrals: Failed to set backend state: error '
+ e.message['desc'])
assert False
try:
- topology.standalone.modify_s(SUFFIX_CONFIG, [(ldap.MOD_DELETE,
- 'nsslapd-referral', None)])
+ topology_st.standalone.modify_s(SUFFIX_CONFIG, [(ldap.MOD_DELETE,
+ 'nsslapd-referral', None)])
except ldap.LDAPError as e:
log.fatal('test_basic_referrals: Failed to delete referral: error '
+ e.message['desc'])
assert False
- topology.standalone.set_option(ldap.OPT_REFERRALS, 1)
+ topology_st.standalone.set_option(ldap.OPT_REFERRALS, 1)
log.info('test_basic_referrals: PASSED')
-def test_basic_systemctl(topology, import_example_ldif):
+def test_basic_systemctl(topology_st, import_example_ldif):
"""Test systemctl/lib389 can stop and start the server. Also test that start reports an
error when the instance does not start. Only for RPM builds
"""
log.info('Running test_basic_systemctl...')
- config_dir = topology.standalone.get_config_dir()
+ config_dir = topology_st.standalone.get_config_dir()
#
# Stop the server
#
log.info('Stopping the server...')
- topology.standalone.stop()
+ topology_st.standalone.stop()
log.info('Stopped the server.')
#
# Start the server
#
log.info('Starting the server...')
- topology.standalone.start()
+ topology_st.standalone.start()
log.info('Started the server.')
#
@@ -624,21 +586,21 @@ def test_basic_systemctl(topology, import_example_ldif):
# and verify that systemctl detects the failed start
#
log.info('Stopping the server...')
- topology.standalone.stop()
+ topology_st.standalone.stop()
log.info('Stopped the server before breaking the dse.ldif.')
- shutil.copy(config_dir + '/dse.ldif', config_dir + '/dse.ldif.correct' )
+ shutil.copy(config_dir + '/dse.ldif', config_dir + '/dse.ldif.correct')
open(config_dir + '/dse.ldif', 'w').close()
# We need to kill the .bak file too, DS is just too smart!
open(config_dir + '/dse.ldif.bak', 'w').close()
log.info('Attempting to start the server with broken dse.ldif...')
try:
- topology.standalone.start()
+ topology_st.standalone.start()
except:
log.info('Server failed to start as expected')
log.info('Check the status...')
- assert(not topology.standalone.status())
+ assert (not topology_st.standalone.status())
log.info('Server failed to start as expected')
time.sleep(5)
@@ -646,29 +608,29 @@ def test_basic_systemctl(topology, import_example_ldif):
# Fix the dse.ldif, and make sure the server starts up,
# and systemctl correctly identifies the successful start
#
- shutil.copy(config_dir + '/dse.ldif.correct', config_dir + '/dse.ldif' )
+ shutil.copy(config_dir + '/dse.ldif.correct', config_dir + '/dse.ldif')
log.info('Starting the server with good dse.ldif...')
- topology.standalone.start()
+ topology_st.standalone.start()
log.info('Check the status...')
- assert(topology.standalone.status())
+ assert (topology_st.standalone.status())
log.info('Server started after fixing dse.ldif.')
log.info('test_basic_systemctl: PASSED')
-def test_basic_ldapagent(topology, import_example_ldif):
+def test_basic_ldapagent(topology_st, import_example_ldif):
"""Test that the ldap agent starts"""
log.info('Running test_basic_ldapagent...')
- var_dir = topology.standalone.get_local_state_dir()
- config_file = os.path.join(topology.standalone.get_sysconf_dir(), 'dirsrv/config/agent.conf')
- cmd = 'sudo %s %s' % (os.path.join(topology.standalone.get_sbin_dir(), 'ldap-agent'), config_file)
+ var_dir = topology_st.standalone.get_local_state_dir()
+ config_file = os.path.join(topology_st.standalone.get_sysconf_dir(), 'dirsrv/config/agent.conf')
+ cmd = 'sudo %s %s' % (os.path.join(topology_st.standalone.get_sbin_dir(), 'ldap-agent'), config_file)
agent_config_file = open(config_file, 'w')
agent_config_file.write('agentx-master ' + var_dir + '/agentx/master\n')
agent_config_file.write('agent-logdir ' + var_dir + '/log/dirsrv\n')
- agent_config_file.write('server slapd-' + topology.standalone.serverid + '\n')
+ agent_config_file.write('server slapd-' + topology_st.standalone.serverid + '\n')
agent_config_file.close()
rc = os.system(cmd)
@@ -688,37 +650,37 @@ def test_basic_ldapagent(topology, import_example_ldif):
log.info('test_basic_ldapagent: PASSED')
-def test_basic_dse(topology, import_example_ldif):
+def test_basic_dse(topology_st, import_example_ldif):
"""Test that the dse.ldif is not wipped out
after the process is killed (bug 910581)
"""
log.info('Running test_basic_dse...')
- dse_file = topology.standalone.confdir + '/dse.ldif'
+ dse_file = topology_st.standalone.confdir + '/dse.ldif'
pid = check_output(['pidof', '-s', 'ns-slapd'])
os.system('sudo kill -9 ' + pid)
if os.path.getsize(dse_file) == 0:
log.fatal('test_basic_dse: dse.ldif\'s content was incorrectly removed!')
assert False
- topology.standalone.start(timeout=60)
+ topology_st.standalone.start(timeout=60)
log.info('dse.ldif was not corrupted, and the server was restarted')
log.info('test_basic_dse: PASSED')
@pytest.mark.parametrize("rootdse_attr_name", ROOTDSE_DEF_ATTR_LIST)
-def test_def_rootdse_attr(topology, import_example_ldif, rootdse_attr_name):
+def test_def_rootdse_attr(topology_st, import_example_ldif, rootdse_attr_name):
"""Tests that operational attributes
are not returned by default in rootDSE searches
"""
- topology.standalone.start()
+ topology_st.standalone.start()
log.info(" Assert rootdse search hasn't %s attr" % rootdse_attr_name)
try:
- entries = topology.standalone.search_s("", ldap.SCOPE_BASE)
+ entries = topology_st.standalone.search_s("", ldap.SCOPE_BASE)
entry = str(entries[0])
assert rootdse_attr_name not in entry
@@ -727,14 +689,14 @@ def test_def_rootdse_attr(topology, import_example_ldif, rootdse_attr_name):
assert False
-def test_mod_def_rootdse_attr(topology, import_example_ldif, rootdse_attr):
+def test_mod_def_rootdse_attr(topology_st, import_example_ldif, rootdse_attr):
"""Tests that operational attributes are returned
by default in rootDSE searches after config modification
"""
log.info(" Assert rootdse search has %s attr" % rootdse_attr)
try:
- entries = topology.standalone.search_s("", ldap.SCOPE_BASE)
+ entries = topology_st.standalone.search_s("", ldap.SCOPE_BASE)
entry = str(entries[0])
assert rootdse_attr in entry
diff --git a/dirsrvtests/tests/suites/betxns/betxn_test.py b/dirsrvtests/tests/suites/betxns/betxn_test.py
index aa688de..40021e5 100644
--- a/dirsrvtests/tests/suites/betxns/betxn_test.py
+++ b/dirsrvtests/tests/suites/betxns/betxn_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -19,55 +19,22 @@ from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
+
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-def test_betxn_init(topology):
+def test_betxn_init(topology_st):
# First enable dynamic plugins - makes plugin testing much easier
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
except ldap.LDAPError as e:
ldap.error('Failed to enable dynamic plugin!' + e.message['desc'])
assert False
-def test_betxt_7bit(topology):
+def test_betxt_7bit(topology_st):
'''
Test that the 7-bit plugin correctly rejects an invlaid update
'''
@@ -79,22 +46,22 @@ def test_betxt_7bit(topology):
BAD_RDN = eight_bit_rdn.encode('utf-8')
# This plugin should on by default, but just in case...
- topology.standalone.plugins.enable(name=PLUGIN_7_BIT_CHECK)
+ topology_st.standalone.plugins.enable(name=PLUGIN_7_BIT_CHECK)
# Add our test user
try:
- topology.standalone.add_s(Entry((USER_DN, {'objectclass': "top extensibleObject".split(),
- 'sn': '1',
- 'cn': 'test 1',
- 'uid': 'test_entry',
- 'userpassword': 'password'})))
+ topology_st.standalone.add_s(Entry((USER_DN, {'objectclass': "top extensibleObject".split(),
+ 'sn': '1',
+ 'cn': 'test 1',
+ 'uid': 'test_entry',
+ 'userpassword': 'password'})))
except ldap.LDAPError as e:
log.error('Failed to add test user' + USER_DN + ': error ' + e.message['desc'])
assert False
# Attempt a modrdn, this should fail
try:
- topology.standalone.rename_s(USER_DN, BAD_RDN, delold=0)
+ topology_st.standalone.rename_s(USER_DN, BAD_RDN, delold=0)
log.fatal('test_betxt_7bit: Modrdn operation incorrectly succeeded')
assert False
except ldap.LDAPError as e:
@@ -102,7 +69,7 @@ def test_betxt_7bit(topology):
# Make sure the operation did not succeed, attempt to search for the new RDN
try:
- entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, BAD_RDN)
+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, BAD_RDN)
if entries:
log.fatal('test_betxt_7bit: Incorrectly found the entry using the invalid RDN')
assert False
@@ -114,7 +81,7 @@ def test_betxt_7bit(topology):
# Cleanup - remove the user
#
try:
- topology.standalone.delete_s(USER_DN)
+ topology_st.standalone.delete_s(USER_DN)
except ldap.LDAPError as e:
log.fatal('Failed to delete test entry: ' + e.message['desc'])
assert False
@@ -122,7 +89,7 @@ def test_betxt_7bit(topology):
log.info('test_betxt_7bit: PASSED')
-def test_betxn_attr_uniqueness(topology):
+def test_betxn_attr_uniqueness(topology_st):
'''
Test that we can not add two entries that have the same attr value that is
defined by the plugin.
@@ -133,15 +100,15 @@ def test_betxn_attr_uniqueness(topology):
USER1_DN = 'uid=test_entry1,' + DEFAULT_SUFFIX
USER2_DN = 'uid=test_entry2,' + DEFAULT_SUFFIX
- topology.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS)
+ topology_st.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS)
# Add the first entry
try:
- topology.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(),
- 'sn': '1',
- 'cn': 'test 1',
- 'uid': 'test_entry1',
- 'userpassword': 'password1'})))
+ topology_st.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(),
+ 'sn': '1',
+ 'cn': 'test 1',
+ 'uid': 'test_entry1',
+ 'userpassword': 'password1'})))
except ldap.LDAPError as e:
log.fatal('test_betxn_attr_uniqueness: Failed to add test user: ' +
USER1_DN + ', error ' + e.message['desc'])
@@ -149,12 +116,12 @@ def test_betxn_attr_uniqueness(topology):
# Add the second entry with a dupliate uid
try:
- topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
- 'sn': '2',
- 'cn': 'test 2',
- 'uid': 'test_entry2',
- 'uid': 'test_entry1', # Duplicate value
- 'userpassword': 'password2'})))
+ topology_st.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
+ 'sn': '2',
+ 'cn': 'test 2',
+ 'uid': 'test_entry2',
+ 'uid': 'test_entry1', # Duplicate value
+ 'userpassword': 'password2'})))
log.fatal('test_betxn_attr_uniqueness: The second entry was incorrectly added.')
assert False
except ldap.LDAPError as e:
@@ -164,10 +131,10 @@ def test_betxn_attr_uniqueness(topology):
#
# Cleanup - disable plugin, remove test entry
#
- topology.standalone.plugins.disable(name=PLUGIN_ATTR_UNIQUENESS)
+ topology_st.standalone.plugins.disable(name=PLUGIN_ATTR_UNIQUENESS)
try:
- topology.standalone.delete_s(USER1_DN)
+ topology_st.standalone.delete_s(USER1_DN)
except ldap.LDAPError as e:
log.fatal('test_betxn_attr_uniqueness: Failed to delete test entry1: ' +
e.message['desc'])
@@ -176,31 +143,31 @@ def test_betxn_attr_uniqueness(topology):
log.info('test_betxn_attr_uniqueness: PASSED')
-def test_betxn_memberof(topology):
+def test_betxn_memberof(topology_st):
ENTRY1_DN = 'cn=group1,' + DEFAULT_SUFFIX
ENTRY2_DN = 'cn=group2,' + DEFAULT_SUFFIX
PLUGIN_DN = 'cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config'
# Enable and configure memberOf plugin
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'member')])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'member')])
except ldap.LDAPError as e:
log.fatal('test_betxn_memberof: Failed to update config(member): error ' + e.message['desc'])
assert False
# Add our test entries
try:
- topology.standalone.add_s(Entry((ENTRY1_DN, {'objectclass': "top groupofnames".split(),
- 'cn': 'group1'})))
+ topology_st.standalone.add_s(Entry((ENTRY1_DN, {'objectclass': "top groupofnames".split(),
+ 'cn': 'group1'})))
except ldap.LDAPError as e:
log.error('test_betxn_memberof: Failed to add group1:' +
ENTRY1_DN + ', error ' + e.message['desc'])
assert False
try:
- topology.standalone.add_s(Entry((ENTRY2_DN, {'objectclass': "top groupofnames".split(),
- 'cn': 'group1'})))
+ topology_st.standalone.add_s(Entry((ENTRY2_DN, {'objectclass': "top groupofnames".split(),
+ 'cn': 'group1'})))
except ldap.LDAPError as e:
log.error('test_betxn_memberof: Failed to add group2:' +
ENTRY2_DN + ', error ' + e.message['desc'])
@@ -212,7 +179,7 @@ def test_betxn_memberof(topology):
# Add group2 to group1 - it should fail with objectclass violation
try:
- topology.standalone.modify_s(ENTRY1_DN, [(ldap.MOD_REPLACE, 'member', ENTRY2_DN)])
+ topology_st.standalone.modify_s(ENTRY1_DN, [(ldap.MOD_REPLACE, 'member', ENTRY2_DN)])
log.fatal('test_betxn_memberof: Group2 was incorrectly allowed to be added to group1')
assert False
except ldap.LDAPError as e:
@@ -224,7 +191,7 @@ def test_betxn_memberof(topology):
# Add group2 to group1 - it should fail with objectclass violation
try:
- topology.standalone.modify_s(ENTRY1_DN, [(ldap.MOD_ADD, 'member', ENTRY2_DN)])
+ topology_st.standalone.modify_s(ENTRY1_DN, [(ldap.MOD_ADD, 'member', ENTRY2_DN)])
log.fatal('test_betxn_memberof: Group2 was incorrectly allowed to be added to group1')
assert False
except ldap.LDAPError as e:
diff --git a/dirsrvtests/tests/suites/chaining_plugin/chaining_test.py b/dirsrvtests/tests/suites/chaining_plugin/chaining_test.py
deleted file mode 100644
index dc7807b..0000000
--- a/dirsrvtests/tests/suites/chaining_plugin/chaining_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_chaining_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_chaining_(topology):
- '''
- Write a single test here...
- '''
- log.info('chaining test suite PASSED')
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/clu/clu_test.py b/dirsrvtests/tests/suites/clu/clu_test.py
index e8bccb6..abf477c 100644
--- a/dirsrvtests/tests/suites/clu/clu_test.py
+++ b/dirsrvtests/tests/suites/clu/clu_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -18,55 +18,18 @@ from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
+
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_clu_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_clu_pwdhash(topology):
- '''
- Test the pwdhash script
- '''
+def test_clu_pwdhash(topology_st):
+ """Test the pwdhash script"""
log.info('Running test_clu_pwdhash...')
- cmd = '%s -s ssha testpassword' % os.path.join(topology.standalone.get_bin_dir(), 'pwdhash')
+ cmd = '%s -s ssha testpassword' % os.path.join(topology_st.standalone.get_bin_dir(), 'pwdhash')
p = os.popen(cmd)
result = p.readline()
diff --git a/dirsrvtests/tests/suites/clu/db2ldif_test.py b/dirsrvtests/tests/suites/clu/db2ldif_test.py
deleted file mode 100644
index be4405a..0000000
--- a/dirsrvtests/tests/suites/clu/db2ldif_test.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_db2ldif_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/collation_plugin/collatation_test.py b/dirsrvtests/tests/suites/collation_plugin/collatation_test.py
deleted file mode 100644
index d915165..0000000
--- a/dirsrvtests/tests/suites/collation_plugin/collatation_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_collatation_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_collatation_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py
index 4670bb1..0323427 100644
--- a/dirsrvtests/tests/suites/config/config_test.py
+++ b/dirsrvtests/tests/suites/config/config_test.py
@@ -17,130 +17,14 @@ from lib389.tools import DirSrvTools
from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
+from lib389.topologies import topology_m2
-DEBUGGING = False
USER_DN = 'uid=test_user,%s' % DEFAULT_SUFFIX
-if DEBUGGING:
- logging.getLogger(__name__).setLevel(logging.DEBUG)
-else:
- logging.getLogger(__name__).setLevel(logging.INFO)
-
+logging.getLogger(__name__).setLevel(logging.INFO)
log = logging.getLogger(__name__)
-class TopologyReplication(object):
- """The Replication Topology Class"""
- def __init__(self, master1, master2):
- """Init"""
- master1.open()
- self.master1 = master1
- master2.open()
- self.master2 = master2
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- """Create Replication Deployment"""
-
- # Creating master 1...
- if DEBUGGING:
- master1 = DirSrv(verbose=True)
- else:
- master1 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master1.allocate(args_master)
- instance_master1 = master1.exists()
- if instance_master1:
- master1.delete()
- master1.create()
- master1.open()
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-
- # Creating master 2...
- if DEBUGGING:
- master2 = DirSrv(verbose=True)
- else:
- master2 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master2.allocate(args_master)
- instance_master2 = master2.exists()
- if instance_master2:
- master2.delete()
- master2.create()
- master2.open()
- master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
- #
- # Create all the agreements
- #
- # Creating agreement from master 1 to master 2
- properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
- if not m1_m2_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m1_m2_agmt)
-
- # Creating agreement from master 2 to master 1
- properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
- if not m2_m1_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m2_m1_agmt)
-
- # Allow the replicas to get situated with the new agreements...
- time.sleep(5)
-
- #
- # Initialize all the agreements
- #
- master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- master1.waitForReplInit(m1_m2_agmt)
-
- # Check replication is working...
- if master1.testReplication(DEFAULT_SUFFIX, master2):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- raise
-
- def fin():
- """If we are debugging just stop the instances, otherwise remove
- them
- """
- if DEBUGGING:
- master1.stop()
- master2.stop()
- else:
- master1.delete()
- master2.delete()
-
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- master1.clearTmpDir(__file__)
-
- return TopologyReplication(master1, master2)
-
-
@pytest.fixture(scope="module")
def big_file():
TEMP_BIG_FILE = ''
@@ -154,18 +38,18 @@ def big_file():
@pytest.fixture
-def test_user(topology):
+def test_user(topology_m2):
"""Add and remove test user"""
try:
- topology.master1.add_s(Entry((USER_DN, {
- 'uid': 'test_user',
- 'givenName': 'test_user',
- 'objectclass': ['top', 'person',
- 'organizationalPerson',
- 'inetorgperson'],
- 'cn': 'test_user',
- 'sn': 'test_user'})))
+ topology_m2.ms["master1"].add_s(Entry((USER_DN, {
+ 'uid': 'test_user',
+ 'givenName': 'test_user',
+ 'objectclass': ['top', 'person',
+ 'organizationalPerson',
+ 'inetorgperson'],
+ 'cn': 'test_user',
+ 'sn': 'test_user'})))
time.sleep(1)
except ldap.LDAPError as e:
log.fatal('Failed to add user (%s): error (%s)' % (USER_DN,
@@ -174,16 +58,16 @@ def test_user(topology):
def fin():
try:
- topology.master1.delete_s(USER_DN)
+ topology_m2.ms["master1"].delete_s(USER_DN)
time.sleep(1)
except ldap.LDAPError as e:
log.fatal('Failed to delete user (%s): error (%s)' % (
- USER_DN,
- e.message['desc']))
+ USER_DN,
+ e.message['desc']))
raise
-def test_maxbersize_repl(topology, test_user, big_file):
+def test_maxbersize_repl(topology_m2, test_user, big_file):
"""maxbersize is ignored in the replicated operations.
:Feature: Config
@@ -201,27 +85,27 @@ def test_maxbersize_repl(topology, test_user, big_file):
"""
log.info("Set nsslapd-maxbersize: 20K to master2")
try:
- topology.master2.modify_s("cn=config", [(ldap.MOD_REPLACE,
- 'nsslapd-maxbersize', '20480')])
+ topology_m2.ms["master2"].modify_s("cn=config", [(ldap.MOD_REPLACE,
+ 'nsslapd-maxbersize', '20480')])
except ldap.LDAPError as e:
log.error('Failed to set nsslapd-maxbersize == 20480: error ' +
- e.message['desc'])
+ e.message['desc'])
raise
- topology.master2.restart(20)
+ topology_m2.ms["master2"].restart(20)
log.info('Try to add attribute with a big value to master2 - expect to FAIL')
with pytest.raises(ldap.SERVER_DOWN):
- topology.master2.modify_s(USER_DN, [(ldap.MOD_REPLACE,
- 'jpegphoto', big_file)])
+ topology_m2.ms["master2"].modify_s(USER_DN, [(ldap.MOD_REPLACE,
+ 'jpegphoto', big_file)])
- topology.master2.restart(20)
- topology.master1.restart(20)
+ topology_m2.ms["master2"].restart(20)
+ topology_m2.ms["master1"].restart(20)
log.info('Try to add attribute with a big value to master1 - expect to PASS')
try:
- topology.master1.modify_s(USER_DN, [(ldap.MOD_REPLACE,
- 'jpegphoto', big_file)])
+ topology_m2.ms["master1"].modify_s(USER_DN, [(ldap.MOD_REPLACE,
+ 'jpegphoto', big_file)])
except ldap.SERVER_DOWN as e:
log.fatal('Failed to add a big attribute, error: ' + e.message['desc'])
raise
@@ -230,43 +114,43 @@ def test_maxbersize_repl(topology, test_user, big_file):
log.info('Check if a big value was successfully added to master1')
try:
- entries = topology.master1.search_s(USER_DN, ldap.SCOPE_BASE,
- '(cn=*)',
- ['jpegphoto'])
+ entries = topology_m2.ms["master1"].search_s(USER_DN, ldap.SCOPE_BASE,
+ '(cn=*)',
+ ['jpegphoto'])
assert entries[0].data['jpegphoto']
except ldap.LDAPError as e:
- log.fatal('Search failed, error: ' + e.message['desc'])
- raise
+ log.fatal('Search failed, error: ' + e.message['desc'])
+ raise
log.info('Check if a big value was successfully replicated to master2')
try:
- entries = topology.master2.search_s(USER_DN, ldap.SCOPE_BASE,
- '(cn=*)',
- ['jpegphoto'])
+ entries = topology_m2.ms["master2"].search_s(USER_DN, ldap.SCOPE_BASE,
+ '(cn=*)',
+ ['jpegphoto'])
assert entries[0].data['jpegphoto']
except ldap.LDAPError as e:
- log.fatal('Search failed, error: ' + e.message['desc'])
- raise
+ log.fatal('Search failed, error: ' + e.message['desc'])
+ raise
log.info("Set nsslapd-maxbersize: 2097152 (default) to master2")
try:
- topology.master2.modify_s("cn=config", [(ldap.MOD_REPLACE,
- 'nsslapd-maxbersize', '2097152')])
+ topology_m2.ms["master2"].modify_s("cn=config", [(ldap.MOD_REPLACE,
+ 'nsslapd-maxbersize', '2097152')])
except ldap.LDAPError as e:
log.error('Failed to set nsslapd-maxbersize == 2097152 error ' +
- e.message['desc'])
+ e.message['desc'])
raise
-def test_config_listen_backport_size(topology):
+def test_config_listen_backport_size(topology_m2):
"""We need to check that we can search on nsslapd-listen-backlog-size,
and change its value: to a psoitive number and a negative number.
Verify invalid value is rejected.
"""
try:
- entry = topology.master1.search_s(DN_CONFIG, ldap.SCOPE_BASE, 'objectclass=top',
- ['nsslapd-listen-backlog-size'])
+ entry = topology_m2.ms["master1"].search_s(DN_CONFIG, ldap.SCOPE_BASE, 'objectclass=top',
+ ['nsslapd-listen-backlog-size'])
default_val = entry[0].data['nsslapd-listen-backlog-size'][0]
assert default_val, 'Failed to get nsslapd-listen-backlog-size from config'
except ldap.LDAPError as e:
@@ -274,39 +158,39 @@ def test_config_listen_backport_size(topology):
raise
try:
- topology.master1.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
- 'nsslapd-listen-backlog-size',
- '256')])
+ topology_m2.ms["master1"].modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
+ 'nsslapd-listen-backlog-size',
+ '256')])
except ldap.LDAPError as e:
log.fatal('Failed to modify config, error: ' + e.message('desc'))
raise
try:
- topology.master1.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
- 'nsslapd-listen-backlog-size',
- '-1')])
+ topology_m2.ms["master1"].modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
+ 'nsslapd-listen-backlog-size',
+ '-1')])
except ldap.LDAPError as e:
log.fatal('Failed to modify config(negative value), error: ' +
e.message('desc'))
raise
with pytest.raises(ldap.LDAPError):
- topology.master1.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
- 'nsslapd-listen-backlog-size',
- 'ZZ')])
+ topology_m2.ms["master1"].modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
+ 'nsslapd-listen-backlog-size',
+ 'ZZ')])
log.fatal('Invalid value was successfully added')
# Cleanup - undo what we've done
try:
- topology.master1.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
- 'nsslapd-listen-backlog-size',
- default_val)])
+ topology_m2.ms["master1"].modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
+ 'nsslapd-listen-backlog-size',
+ default_val)])
except ldap.LDAPError as e:
log.fatal('Failed to reset config, error: ' + e.message('desc'))
raise
-def test_config_deadlock_policy(topology):
+def test_config_deadlock_policy(topology_m2):
"""We need to check that nsslapd-db-deadlock-policy exists, that we can
change the value, and invalid values are rejected
"""
@@ -315,8 +199,8 @@ def test_config_deadlock_policy(topology):
default_val = '9'
try:
- entry = topology.master1.search_s(LDBM_DN, ldap.SCOPE_BASE, 'objectclass=top',
- ['nsslapd-db-deadlock-policy'])
+ entry = topology_m2.ms["master1"].search_s(LDBM_DN, ldap.SCOPE_BASE, 'objectclass=top',
+ ['nsslapd-db-deadlock-policy'])
val = entry[0].data['nsslapd-db-deadlock-policy'][0]
assert val, 'Failed to get nsslapd-db-deadlock-policy from config'
assert val == default_val, 'The wrong derfualt value was present'
@@ -327,9 +211,9 @@ def test_config_deadlock_policy(topology):
# Try a range of valid values
for val in ('0', '5', '9'):
try:
- topology.master1.modify_s(LDBM_DN, [(ldap.MOD_REPLACE,
- 'nsslapd-db-deadlock-policy',
- val)])
+ topology_m2.ms["master1"].modify_s(LDBM_DN, [(ldap.MOD_REPLACE,
+ 'nsslapd-db-deadlock-policy',
+ val)])
except ldap.LDAPError as e:
log.fatal('Failed to modify config: nsslapd-db-deadlock-policy to (%s), error: %s' %
(val, e.message('desc')))
@@ -338,16 +222,16 @@ def test_config_deadlock_policy(topology):
# Try a range of invalid values
for val in ('-1', '10'):
with pytest.raises(ldap.LDAPError):
- topology.master1.modify_s(LDBM_DN, [(ldap.MOD_REPLACE,
- 'nsslapd-db-deadlock-policy',
- val)])
+ topology_m2.ms["master1"].modify_s(LDBM_DN, [(ldap.MOD_REPLACE,
+ 'nsslapd-db-deadlock-policy',
+ val)])
log.fatal('Able to add invalid value to nsslapd-db-deadlock-policy(%s)' % (val))
# Cleanup - undo what we've done
try:
- topology.master1.modify_s(LDBM_DN, [(ldap.MOD_REPLACE,
- 'nsslapd-db-deadlock-policy',
- default_val)])
+ topology_m2.ms["master1"].modify_s(LDBM_DN, [(ldap.MOD_REPLACE,
+ 'nsslapd-db-deadlock-policy',
+ default_val)])
except ldap.LDAPError as e:
log.fatal('Failed to reset nsslapd-db-deadlock-policy to the default value(%s), error: %s' %
(default_val, e.message('desc')))
diff --git a/dirsrvtests/tests/suites/cos_plugin/cos_test.py b/dirsrvtests/tests/suites/cos_plugin/cos_test.py
deleted file mode 100644
index f06b9d4..0000000
--- a/dirsrvtests/tests/suites/cos_plugin/cos_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_cos_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_cos_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/deref_plugin/deref_test.py b/dirsrvtests/tests/suites/deref_plugin/deref_test.py
deleted file mode 100644
index 0153932..0000000
--- a/dirsrvtests/tests/suites/deref_plugin/deref_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_deref_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_deref_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/disk_monitoring/disk_monitor_test.py b/dirsrvtests/tests/suites/disk_monitoring/disk_monitor_test.py
deleted file mode 100644
index 8930ebd..0000000
--- a/dirsrvtests/tests/suites/disk_monitoring/disk_monitor_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_disk_monitor_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_disk_monitor_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/distrib_plugin/distrib_test.py b/dirsrvtests/tests/suites/distrib_plugin/distrib_test.py
deleted file mode 100644
index 101d65c..0000000
--- a/dirsrvtests/tests/suites/distrib_plugin/distrib_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_distrib_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_distrib_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/dna_plugin/dna_test.py b/dirsrvtests/tests/suites/dna_plugin/dna_test.py
index 8830d5e..84a000e 100644
--- a/dirsrvtests/tests/suites/dna_plugin/dna_test.py
+++ b/dirsrvtests/tests/suites/dna_plugin/dna_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -18,6 +18,7 @@ from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
@@ -35,60 +36,13 @@ PEOPLE_OU = 'ou=people,' + DEFAULT_SUFFIX
GROUP_DN = 'cn=group,' + DEFAULT_SUFFIX
CONFIG_AREA = 'nsslapd-pluginConfigArea'
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- # This is useful for analysing the test env.
- standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=[], encrypt=False,
- repl_data=True, outputfile='{}/{}.ldif'.format(standalone.ldifdir, SERVERID_STANDALONE))
- standalone.clearBackupFS()
- standalone.backupFS()
- standalone.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-
-def test_dna_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_dna_(topology):
- '''
- Write a single test here...
- '''
-
- # stop the plugin, and start it
- topology.standalone.plugins.disable(name=PLUGIN_DNA)
- topology.standalone.plugins.enable(name=PLUGIN_DNA)
+
+def test_basic(topology_st):
+ """Test basic functionality"""
+
+ # Stop the plugin, and start it
+ topology_st.standalone.plugins.disable(name=PLUGIN_DNA)
+ topology_st.standalone.plugins.enable(name=PLUGIN_DNA)
CONFIG_DN = 'cn=config,cn=' + PLUGIN_DNA + ',cn=plugins,cn=config'
@@ -99,20 +53,20 @@ def test_dna_(topology):
############################################################################
try:
- topology.standalone.add_s(Entry((CONFIG_DN, {
- 'objectclass': 'top dnaPluginConfig'.split(),
- 'cn': 'config',
- 'dnatype': 'uidNumber',
- 'dnafilter': '(objectclass=top)',
- 'dnascope': DEFAULT_SUFFIX,
- 'dnaMagicRegen': '-1',
- 'dnaMaxValue': '50000',
- 'dnaNextValue': '1'
- })))
+ topology_st.standalone.add_s(Entry((CONFIG_DN, {
+ 'objectclass': 'top dnaPluginConfig'.split(),
+ 'cn': 'config',
+ 'dnatype': 'uidNumber',
+ 'dnafilter': '(objectclass=top)',
+ 'dnascope': DEFAULT_SUFFIX,
+ 'dnaMagicRegen': '-1',
+ 'dnaMaxValue': '50000',
+ 'dnaNextValue': '1'
+ })))
except ldap.ALREADY_EXISTS:
try:
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'dnaNextValue', '1'),
- (ldap.MOD_REPLACE, 'dnaMagicRegen', '-1')])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'dnaNextValue', '1'),
+ (ldap.MOD_REPLACE, 'dnaMagicRegen', '-1')])
except ldap.LDAPError as e:
log.fatal('test_dna: Failed to set the DNA plugin: error ' + e.message['desc'])
assert False
@@ -122,24 +76,24 @@ def test_dna_(topology):
# Do we need to restart for the plugin?
- topology.standalone.restart()
+ topology_st.standalone.restart()
############################################################################
# Test plugin
############################################################################
try:
- topology.standalone.add_s(Entry((USER1_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user1'
- })))
+ topology_st.standalone.add_s(Entry((USER1_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
except ldap.LDAPError as e:
log.fatal('test_dna: Failed to user1: error ' + e.message['desc'])
assert False
# See if the entry now has the new uidNumber assignment - uidNumber=1
try:
- entries = topology.standalone.search_s(USER1_DN, ldap.SCOPE_BASE, '(uidNumber=1)')
+ entries = topology_st.standalone.search_s(USER1_DN, ldap.SCOPE_BASE, '(uidNumber=1)')
if not entries:
log.fatal('test_dna: user1 was not updated - (looking for uidNumber: 1)')
assert False
@@ -149,14 +103,14 @@ def test_dna_(topology):
# Test the magic regen value
try:
- topology.standalone.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'uidNumber', '-1')])
+ topology_st.standalone.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'uidNumber', '-1')])
except ldap.LDAPError as e:
log.fatal('test_dna: Failed to set the magic reg value: error ' + e.message['desc'])
assert False
# See if the entry now has the new uidNumber assignment - uidNumber=2
try:
- entries = topology.standalone.search_s(USER1_DN, ldap.SCOPE_BASE, '(uidNumber=2)')
+ entries = topology_st.standalone.search_s(USER1_DN, ldap.SCOPE_BASE, '(uidNumber=2)')
if not entries:
log.fatal('test_dna: user1 was not updated (looking for uidNumber: 2)')
assert False
@@ -169,7 +123,7 @@ def test_dna_(topology):
################################################################################
try:
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'dnaMagicRegen', '-2')])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'dnaMagicRegen', '-2')])
except ldap.LDAPError as e:
log.fatal('test_dna: Failed to set the magic reg value to -2: error ' + e.message['desc'])
assert False
@@ -180,14 +134,14 @@ def test_dna_(topology):
# Test the magic regen value
try:
- topology.standalone.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'uidNumber', '-2')])
+ topology_st.standalone.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'uidNumber', '-2')])
except ldap.LDAPError as e:
log.fatal('test_dna: Failed to set the magic reg value: error ' + e.message['desc'])
assert False
# See if the entry now has the new uidNumber assignment - uidNumber=3
try:
- entries = topology.standalone.search_s(USER1_DN, ldap.SCOPE_BASE, '(uidNumber=3)')
+ entries = topology_st.standalone.search_s(USER1_DN, ldap.SCOPE_BASE, '(uidNumber=3)')
if not entries:
log.fatal('test_dna: user1 was not updated (looking for uidNumber: 3)')
assert False
@@ -199,27 +153,19 @@ def test_dna_(topology):
# Test plugin dependency
############################################################################
- #test_dependency(inst, PLUGIN_AUTOMEMBER)
+ # test_dependency(inst, PLUGIN_AUTOMEMBER)
############################################################################
# Cleanup
############################################################################
try:
- topology.standalone.delete_s(USER1_DN)
+ topology_st.standalone.delete_s(USER1_DN)
except ldap.LDAPError as e:
log.fatal('test_dna: Failed to delete test entry1: ' + e.message['desc'])
assert False
- topology.standalone.plugins.disable(name=PLUGIN_DNA)
-
- ############################################################################
- # Test passed
- ############################################################################
-
- log.info('test_dna: PASS\n')
-
- return
+ topology_st.standalone.plugins.disable(name=PLUGIN_DNA)
if __name__ == '__main__':
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
deleted file mode 100644
index 1f6862d..0000000
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_ds_logs_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_ds_logs_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py b/dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py
index 30dfa88..b8bf477 100644
--- a/dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py
+++ b/dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py b/dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py
index 920d3f6..f98812c 100644
--- a/dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py
+++ b/dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py b/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py
index 2a038a9..2411424 100644
--- a/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py
+++ b/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -25,57 +25,19 @@ from lib389.tools import DirSrvTools
from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
def repl_fail(replica):
- # remove replica instance, and assert failure
+ """Remove replica instance, and assert failure"""
+
replica.delete()
assert False
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def test_dynamic_plugins(topology):
+def test_dynamic_plugins(topology_st):
"""
Test Dynamic Plugins - exercise each plugin and its main features, while
changing the configuration without restarting the server.
@@ -97,7 +59,6 @@ def test_dynamic_plugins(topology):
Stress - Put the server under load that will trigger multiple plugins(MO, RI, DNA, etc)
Restart various plugins while these operations are going on. Perform this test
5 times(stress_max_run).
-
"""
REPLICA_PORT = 33334
@@ -110,14 +71,14 @@ def test_dynamic_plugins(topology):
# First enable dynamic plugins
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
except ldap.LDAPError as e:
ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc'])
assert False
# Test that critical plugins can be updated even though the change might not be applied
try:
- topology.standalone.modify_s(DN_LDBM, [(ldap.MOD_REPLACE, 'description', 'test')])
+ topology_st.standalone.modify_s(DN_LDBM, [(ldap.MOD_REPLACE, 'description', 'test')])
except ldap.LDAPError as e:
ldap.fatal('Failed to apply change to critical plugin' + e.message['desc'])
assert False
@@ -135,7 +96,7 @@ def test_dynamic_plugins(topology):
log.info('Testing Dynamic Plugins Functionality' + msg + '...')
log.info('####################################################################\n')
- plugin_tests.test_all_plugins(topology.standalone)
+ plugin_tests.test_all_plugins(topology_st.standalone)
log.info('####################################################################')
log.info('Successfully Tested Dynamic Plugins Functionality' + msg + '.')
@@ -155,20 +116,20 @@ def test_dynamic_plugins(topology):
#
# Restart the plugin several times (and prev plugins) - work that linked list
#
- plugin_test(topology.standalone, "restart")
+ plugin_test(topology_st.standalone, "restart")
if prev_prev_plugin_test:
- prev_prev_plugin_test(topology.standalone, "restart")
+ prev_prev_plugin_test(topology_st.standalone, "restart")
- plugin_test(topology.standalone, "restart")
+ plugin_test(topology_st.standalone, "restart")
if prev_plugin_test:
- prev_plugin_test(topology.standalone, "restart")
+ prev_plugin_test(topology_st.standalone, "restart")
- plugin_test(topology.standalone, "restart")
+ plugin_test(topology_st.standalone, "restart")
# Now run the functional test
- plugin_test(topology.standalone)
+ plugin_test(topology_st.standalone)
# Set the previous tests
if prev_plugin_test:
@@ -188,8 +149,8 @@ def test_dynamic_plugins(topology):
log.info('Stressing Dynamic Plugins' + msg + '...')
log.info('####################################################################\n')
- stress_tests.configureMO(topology.standalone)
- stress_tests.configureRI(topology.standalone)
+ stress_tests.configureMO(topology_st.standalone)
+ stress_tests.configureRI(topology_st.standalone)
stress_count = 0
while stress_count < stress_max_runs:
@@ -199,37 +160,37 @@ def test_dynamic_plugins(topology):
try:
# Launch three new threads to add a bunch of users
- add_users = stress_tests.AddUsers(topology.standalone, 'employee', True)
+ add_users = stress_tests.AddUsers(topology_st.standalone, 'employee', True)
add_users.start()
- add_users2 = stress_tests.AddUsers(topology.standalone, 'entry', True)
+ add_users2 = stress_tests.AddUsers(topology_st.standalone, 'entry', True)
add_users2.start()
- add_users3 = stress_tests.AddUsers(topology.standalone, 'person', True)
+ add_users3 = stress_tests.AddUsers(topology_st.standalone, 'person', True)
add_users3.start()
time.sleep(1)
# While we are adding users restart the MO plugin and an idle plugin
- topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
time.sleep(1)
- topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
time.sleep(1)
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
- topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
- topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
+ topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
+ topology_st.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
time.sleep(1)
- topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
time.sleep(2)
- topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
time.sleep(1)
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
- topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
- topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
- topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
+ topology_st.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
+ topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
time.sleep(1)
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
- topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
# Wait for the 'adding' threads to complete
add_users.join()
@@ -237,43 +198,43 @@ def test_dynamic_plugins(topology):
add_users3.join()
# Now launch three threads to delete the users
- del_users = stress_tests.DelUsers(topology.standalone, 'employee')
+ del_users = stress_tests.DelUsers(topology_st.standalone, 'employee')
del_users.start()
- del_users2 = stress_tests.DelUsers(topology.standalone, 'entry')
+ del_users2 = stress_tests.DelUsers(topology_st.standalone, 'entry')
del_users2.start()
- del_users3 = stress_tests.DelUsers(topology.standalone, 'person')
+ del_users3 = stress_tests.DelUsers(topology_st.standalone, 'person')
del_users3.start()
time.sleep(1)
# Restart both the MO, RI plugins during these deletes, and an idle plugin
- topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
- topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
- topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
+ topology_st.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
+ topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
time.sleep(1)
- topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
+ topology_st.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
time.sleep(1)
- topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
time.sleep(1)
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
time.sleep(1)
- topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
- topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
- topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
- topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
- topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
- topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
+ topology_st.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
+ topology_st.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
+ topology_st.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
+ topology_st.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
+ topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
time.sleep(2)
- topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
+ topology_st.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
time.sleep(1)
- topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
time.sleep(1)
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
time.sleep(1)
- topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
- topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
- topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
+ topology_st.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
+ topology_st.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
+ topology_st.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
# Wait for the 'deleting' threads to complete
del_users.join()
@@ -281,11 +242,11 @@ def test_dynamic_plugins(topology):
del_users3.join()
# Now make sure both the MO and RI plugins still work correctly
- plugin_tests.func_tests[8](topology.standalone) # RI plugin
- plugin_tests.func_tests[5](topology.standalone) # MO plugin
+ plugin_tests.func_tests[8](topology_st.standalone) # RI plugin
+ plugin_tests.func_tests[5](topology_st.standalone) # MO plugin
# Cleanup the stress tests
- stress_tests.cleanup(topology.standalone)
+ stress_tests.cleanup(topology_st.standalone)
except:
log.info('Stress test failed!')
@@ -319,29 +280,29 @@ def test_dynamic_plugins(topology):
replica_inst.open()
try:
- topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX,
- role=REPLICAROLE_MASTER,
- replicaId=1)
+ topology_st.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX,
+ role=REPLICAROLE_MASTER,
+ replicaId=1)
replica_inst.replica.enableReplication(suffix=DEFAULT_SUFFIX,
- role=REPLICAROLE_CONSUMER,
- replicaId=65535)
+ role=REPLICAROLE_CONSUMER,
+ replicaId=65535)
properties = {RA_NAME: r'to_replica',
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- repl_agreement = topology.standalone.agreement.create(suffix=DEFAULT_SUFFIX,
- host=LOCALHOST,
- port=REPLICA_PORT,
- properties=properties)
+ repl_agreement = topology_st.standalone.agreement.create(suffix=DEFAULT_SUFFIX,
+ host=LOCALHOST,
+ port=REPLICA_PORT,
+ properties=properties)
if not repl_agreement:
log.fatal("Fail to create a replica agreement")
repl_fail(replica_inst)
- topology.standalone.agreement.init(DEFAULT_SUFFIX, LOCALHOST, REPLICA_PORT)
- topology.standalone.waitForReplInit(repl_agreement)
+ topology_st.standalone.agreement.init(DEFAULT_SUFFIX, LOCALHOST, REPLICA_PORT)
+ topology_st.standalone.waitForReplInit(repl_agreement)
except:
log.info('Failed to setup replication!')
repl_fail(replica_inst)
@@ -358,7 +319,7 @@ def test_dynamic_plugins(topology):
try:
# Grab master's max CSN
- entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, RUV_FILTER)
+ entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, RUV_FILTER)
if not entry:
log.error('Failed to find db tombstone entry from master')
repl_fail(replica_inst)
@@ -423,9 +384,9 @@ def test_dynamic_plugins(topology):
# Check the master
try:
- entries = topology.standalone.search_s(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- "(|(uid=person*)(uid=entry*)(uid=employee*))")
+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ "(|(uid=person*)(uid=entry*)(uid=employee*))")
if len(entries) > 0:
log.error('Master database has incorrect data set!\n')
repl_fail(replica_inst)
diff --git a/dirsrvtests/tests/suites/filter/filter_test.py b/dirsrvtests/tests/suites/filter/filter_test.py
index 1601c23..81a3ee7 100644
--- a/dirsrvtests/tests/suites/filter/filter_test.py
+++ b/dirsrvtests/tests/suites/filter/filter_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -17,54 +17,13 @@ from lib389.tools import DirSrvTools
from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_filter_init(topology):
- '''
- Write your testcase here...
- '''
- return
-
-
-def test_filter_escaped(topology):
+def test_filter_escaped(topology_st):
'''
Test we can search for an '*' in a attribute value.
'''
@@ -75,7 +34,7 @@ def test_filter_escaped(topology):
USER2_DN = 'uid=test_entry2,' + DEFAULT_SUFFIX
try:
- topology.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(),
+ topology_st.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(),
'sn': '1',
'cn': 'test * me',
'uid': 'test_entry',
@@ -86,7 +45,7 @@ def test_filter_escaped(topology):
assert False
try:
- topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
+ topology_st.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
'sn': '2',
'cn': 'test me',
'uid': 'test_entry2',
@@ -96,7 +55,7 @@ def test_filter_escaped(topology):
assert False
try:
- entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'cn=*\**')
+ entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'cn=*\**')
if not entry or len(entry) > 1:
log.fatal('test_filter_escaped: Entry was not found using "cn=*\**"')
assert False
@@ -108,7 +67,7 @@ def test_filter_escaped(topology):
log.info('test_filter_escaped: PASSED')
-def test_filter_search_original_attrs(topology):
+def test_filter_search_original_attrs(topology_st):
'''
Search and request attributes with extra characters. The returned entry
should not have these extra characters: "objectclass EXTRA"
@@ -117,7 +76,7 @@ def test_filter_search_original_attrs(topology):
log.info('Running test_filter_search_original_attrs...')
try:
- entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_BASE,
+ entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_BASE,
'objectclass=top', ['objectclass-EXTRA'])
if entry[0].hasAttr('objectclass-EXTRA'):
log.fatal('test_filter_search_original_attrs: Entry does not have the original attribute')
diff --git a/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py b/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py
index 0fc207e..9812fae 100644
--- a/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py
+++ b/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py
@@ -16,9 +16,7 @@ from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
+from lib389.topologies import topology_st
DN_PEOPLE = 'ou=people,%s' % DEFAULT_SUFFIX
DN_ROOT = ''
@@ -28,91 +26,58 @@ TEST_USER_PWD = 'all_attrs_test'
# Suffix for search, Regular user boolean, List of expected attrs
TEST_PARAMS = [(DN_ROOT, False, [
- 'aci', 'createTimestamp', 'creatorsName',
- 'modifiersName', 'modifyTimestamp', 'namingContexts',
- 'nsBackendSuffix', 'nsUniqueId', 'subschemaSubentry',
- 'supportedControl', 'supportedExtension',
- 'supportedFeatures', 'supportedLDAPVersion',
- 'supportedSASLMechanisms', 'vendorName', 'vendorVersion'
- ]),
+ 'aci', 'createTimestamp', 'creatorsName',
+ 'modifiersName', 'modifyTimestamp', 'namingContexts',
+ 'nsBackendSuffix', 'nsUniqueId', 'subschemaSubentry',
+ 'supportedControl', 'supportedExtension',
+ 'supportedFeatures', 'supportedLDAPVersion',
+ 'supportedSASLMechanisms', 'vendorName', 'vendorVersion'
+]),
(DN_ROOT, True, [
- 'createTimestamp', 'creatorsName',
- 'modifiersName', 'modifyTimestamp', 'namingContexts',
- 'nsBackendSuffix', 'nsUniqueId', 'subschemaSubentry',
- 'supportedControl', 'supportedExtension',
- 'supportedFeatures', 'supportedLDAPVersion',
- 'supportedSASLMechanisms', 'vendorName', 'vendorVersion'
- ]),
+ 'createTimestamp', 'creatorsName',
+ 'modifiersName', 'modifyTimestamp', 'namingContexts',
+ 'nsBackendSuffix', 'nsUniqueId', 'subschemaSubentry',
+ 'supportedControl', 'supportedExtension',
+ 'supportedFeatures', 'supportedLDAPVersion',
+ 'supportedSASLMechanisms', 'vendorName', 'vendorVersion'
+ ]),
(DN_PEOPLE, False, [
'aci', 'createTimestamp', 'creatorsName', 'entrydn',
'entryid', 'modifiersName', 'modifyTimestamp',
'nsUniqueId', 'numSubordinates', 'parentid'
- ]),
+ ]),
(DN_PEOPLE, True, [
'createTimestamp', 'creatorsName', 'entrydn',
'entryid', 'modifyTimestamp', 'nsUniqueId',
'numSubordinates', 'parentid'
- ]),
+ ]),
(TEST_USER_DN, False, [
'createTimestamp', 'creatorsName', 'entrydn',
'entryid', 'modifiersName', 'modifyTimestamp',
'nsUniqueId', 'parentid'
- ]),
+ ]),
(TEST_USER_DN, True, [
'createTimestamp', 'creatorsName', 'entrydn',
'entryid', 'modifyTimestamp', 'nsUniqueId', 'parentid'
- ]),
+ ]),
(DN_CONFIG, False, ['numSubordinates', 'passwordHistory'])]
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-
@pytest.fixture(scope="module")
-def test_user(topology):
+def test_user(topology_st):
"""User for binding operation"""
try:
- topology.standalone.add_s(Entry((TEST_USER_DN, {
- 'objectclass': 'top person'.split(),
- 'objectclass': 'organizationalPerson',
- 'objectclass': 'inetorgperson',
- 'cn': TEST_USER_NAME,
- 'sn': TEST_USER_NAME,
- 'userpassword': TEST_USER_PWD,
- 'mail': '%s(a)redhat.com' % TEST_USER_NAME,
- 'uid': TEST_USER_NAME
- })))
+ topology_st.standalone.add_s(Entry((TEST_USER_DN, {
+ 'objectclass': 'top person'.split(),
+ 'objectclass': 'organizationalPerson',
+ 'objectclass': 'inetorgperson',
+ 'cn': TEST_USER_NAME,
+ 'sn': TEST_USER_NAME,
+ 'userpassword': TEST_USER_PWD,
+ 'mail': '%s(a)redhat.com' % TEST_USER_NAME,
+ 'uid': TEST_USER_NAME
+ })))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (TEST_USER_DN,
e.message['desc']))
@@ -120,7 +85,7 @@ def test_user(topology):
@pytest.fixture(scope="module")
-def user_aci(topology):
+def user_aci(topology_st):
"""Deny modifiersName attribute for the test user
under whole suffix
"""
@@ -129,12 +94,12 @@ def user_aci(topology):
ACI_ALLOW = '(version 3.0; acl "Deny modifiersName for user"; deny (read)'
ACI_SUBJECT = ' userdn = "ldap:///%s";)' % TEST_USER_DN
ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT
- topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD,
- 'aci',
- ACI_BODY)])
+ topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD,
+ 'aci',
+ ACI_BODY)])
-def test_supported_features(topology):
+def test_supported_features(topology_st):
"""Verify that OID 1.3.6.1.4.1.4203.1.5.1 is published
in the supportedFeatures [RFC3674] attribute in the rootDSE.
@@ -147,9 +112,9 @@ def test_supported_features(topology):
:Assert: Value 1.3.6.1.4.1.4203.1.5.1 is presented
"""
- entries = topology.standalone.search_s('', ldap.SCOPE_BASE,
- '(objectClass=*)',
- ['supportedFeatures'])
+ entries = topology_st.standalone.search_s('', ldap.SCOPE_BASE,
+ '(objectClass=*)',
+ ['supportedFeatures'])
supported_value = entries[0].data['supportedfeatures']
assert supported_value == ['1.3.6.1.4.1.4203.1.5.1']
@@ -158,7 +123,7 @@ def test_supported_features(topology):
@pytest.mark.parametrize('add_attr', ['', '*', 'objectClass'])
@pytest.mark.parametrize('search_suffix,regular_user,oper_attr_list',
TEST_PARAMS)
-def test_search_basic(topology, test_user, user_aci, add_attr,
+def test_search_basic(topology_st, test_user, user_aci, add_attr,
search_suffix, regular_user, oper_attr_list):
"""Verify that you can get all expected operational attributes
by a Search Request [RFC2251] with '+' (ASCII 43) filter.
@@ -177,9 +142,9 @@ def test_search_basic(topology, test_user, user_aci, add_attr,
"""
if regular_user:
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
else:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
search_filter = ['+']
if add_attr:
@@ -188,9 +153,9 @@ def test_search_basic(topology, test_user, user_aci, add_attr,
else:
expected_attrs = sorted(oper_attr_list)
- entries = topology.standalone.search_s(search_suffix, ldap.SCOPE_BASE,
- '(objectclass=*)',
- search_filter)
+ entries = topology_st.standalone.search_s(search_suffix, ldap.SCOPE_BASE,
+ '(objectclass=*)',
+ search_filter)
found_attrs = sorted(entries[0].data.keys())
if add_attr == '*':
diff --git a/dirsrvtests/tests/suites/get_effective_rights/ger_test.py b/dirsrvtests/tests/suites/get_effective_rights/ger_test.py
deleted file mode 100644
index 57c97c3..0000000
--- a/dirsrvtests/tests/suites/get_effective_rights/ger_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_ger_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_ger_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/gssapi_repl/gssapi_repl_test.py b/dirsrvtests/tests/suites/gssapi_repl/gssapi_repl_test.py
index 805dd89..2bae81d 100644
--- a/dirsrvtests/tests/suites/gssapi_repl/gssapi_repl_test.py
+++ b/dirsrvtests/tests/suites/gssapi_repl/gssapi_repl_test.py
@@ -1,3 +1,11 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2016 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
import os
import sys
import time
@@ -11,7 +19,7 @@ from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
from lib389.mit_krb5 import MitKrb5
-
+from lib389.topologies import topology_m2
#########################################
#
@@ -34,94 +42,33 @@ REALM = "EXAMPLE.COM"
HOST_MASTER_1 = 'ldapkdc1.example.com'
HOST_MASTER_2 = 'ldapkdc2.example.com'
-class TopologyReplication(object):
- def __init__(self, master1, master2):
- master1.open()
- self.master1 = master1
- master2.open()
- self.master2 = master2
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- return
- # Create the realm first
- krb = MitKrb5(realm=REALM)
- if krb.check_realm():
- krb.destroy_realm()
- krb.create_realm()
- DEBUG = False
-
- # Creating master 1...
- master1 = DirSrv(verbose=DEBUG)
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_instance[SER_REALM] = REALM
- args_instance[SER_STRICT_HOSTNAME_CHECKING] = False
- args_master = args_instance.copy()
- master1.allocate(args_master)
- instance_master1 = master1.exists()
- if instance_master1:
- master1.delete()
- master1.create() # There is some magic in .create that finds the realm, and adds the keytab for us.
- master1.open()
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-
- # Creating master 2...
- master2 = DirSrv(verbose=DEBUG)
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_instance[SER_REALM] = REALM
- args_instance[SER_STRICT_HOSTNAME_CHECKING] = False
- args_master = args_instance.copy()
- master2.allocate(args_master)
- instance_master2 = master2.exists()
- if instance_master2:
- master2.delete()
- master2.create()
- master2.open()
- master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
- # Delete each instance in the end
- def fin():
- master1.delete()
- master2.delete()
- if krb.check_realm():
- krb.destroy_realm()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- master1.clearTmpDir(__file__)
-
- return TopologyReplication(master1, master2)
def _create_machine_ou(inst):
- inst.add_s( Entry(( "ou=Machines,%s" % DEFAULT_SUFFIX, {
- 'objectClass' : 'top organizationalUnit'.split(),
- 'ou' : 'Machines'
- }
- ))
- )
+ inst.add_s(Entry(("ou=Machines,%s" % DEFAULT_SUFFIX, {
+ 'objectClass': 'top organizationalUnit'.split(),
+ 'ou': 'Machines'
+ }
+ ))
+ )
+
def _create_machine_account(inst, name):
# Create the simple security objects for the servers to replicate to
- inst.add_s( Entry(( "uid=%s,ou=Machines,%s" % (name, DEFAULT_SUFFIX),
- {
- 'objectClass' : 'top account'.split(),
- 'uid' : name
- }
- )))
+ inst.add_s(Entry(("uid=%s,ou=Machines,%s" % (name, DEFAULT_SUFFIX),
+ {
+ 'objectClass': 'top account'.split(),
+ 'uid': name
+ }
+ )))
+
def _check_machine_account(inst, name):
- r = inst.search_s( 'ou=Machines,%s' % DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=%s)' % name)
+ r = inst.search_s('ou=Machines,%s' % DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=%s)' % name)
if len(r) > 0:
return True
return False
+
def _allow_machine_account(inst, name):
# First we need to get the mapping tree dn
mt = inst.mappingtree.list(suffix=DEFAULT_SUFFIX)[0]
@@ -129,15 +76,15 @@ def _allow_machine_account(inst, name):
(ldap.MOD_REPLACE, 'nsDS5ReplicaBindDN', "uid=%s,ou=Machines,%s" % (name, DEFAULT_SUFFIX))
])
-def test_gssapi_repl(topology):
- """
- Create a kdc, then using that, provision two masters which have a gssapi
+
+def test_gssapi_repl(topology_m2):
+ """Create a kdc, then using that, provision two masters which have a gssapi
authenticated replication agreement.
"""
- return
- master1 = topology.master1
- master2 = topology.master2
+ return
+ master1 = topology_m2.ms["master1"]
+ master2 = topology_m2.ms["master2"]
# Create the locations on each master for the other to bind to.
_create_machine_ou(master1)
@@ -158,8 +105,8 @@ def test_gssapi_repl(topology):
# Creating agreement from master 1 to master 2
# Set the replica bind method to sasl gssapi
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_METHOD: 'SASL/GSSAPI',
+ properties = {RA_NAME: r'meTo_$host:$port',
+ RA_METHOD: 'SASL/GSSAPI',
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
if not m1_m2_agmt:
@@ -170,8 +117,8 @@ def test_gssapi_repl(topology):
# Creating agreement from master 2 to master 1
# Set the replica bind method to sasl gssapi
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_METHOD: 'SASL/GSSAPI',
+ properties = {RA_NAME: r'meTo_$host:$port',
+ RA_METHOD: 'SASL/GSSAPI',
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
if not m2_m1_agmt:
@@ -199,15 +146,12 @@ def test_gssapi_repl(topology):
_create_machine_account(master1, 'http/one.example.com')
# Check it's on 2
time.sleep(5)
- assert(_check_machine_account(master2, 'http/one.example.com'))
+ assert (_check_machine_account(master2, 'http/one.example.com'))
# Add a user to master 2
_create_machine_account(master2, 'http/two.example.com')
# Check it's on 1
time.sleep(5)
- assert(_check_machine_account(master2, 'http/two.example.com'))
-
-
- log.info('Test complete')
+ assert (_check_machine_account(master2, 'http/two.example.com'))
if __name__ == '__main__':
diff --git a/dirsrvtests/tests/suites/ldapi/ldapi_test.py b/dirsrvtests/tests/suites/ldapi/ldapi_test.py
deleted file mode 100644
index 2c30fc0..0000000
--- a/dirsrvtests/tests/suites/ldapi/ldapi_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_ldapi_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_ldapi_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/linkedattrs_plugin/linked_attrs_test.py b/dirsrvtests/tests/suites/linkedattrs_plugin/linked_attrs_test.py
deleted file mode 100644
index 461da3f..0000000
--- a/dirsrvtests/tests/suites/linkedattrs_plugin/linked_attrs_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_linked_attrs_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_linked_attrs_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/mapping_tree/mapping_tree_test.py b/dirsrvtests/tests/suites/mapping_tree/mapping_tree_test.py
deleted file mode 100644
index d7a436b..0000000
--- a/dirsrvtests/tests/suites/mapping_tree/mapping_tree_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_mapping_tree_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_mapping_tree_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/memberof_plugin/memberof_test.py b/dirsrvtests/tests/suites/memberof_plugin/memberof_test.py
index 2123a7c..7ded0f4 100644
--- a/dirsrvtests/tests/suites/memberof_plugin/memberof_test.py
+++ b/dirsrvtests/tests/suites/memberof_plugin/memberof_test.py
@@ -1,12 +1,11 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-
import os
import sys
import time
@@ -19,10 +18,10 @@ from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config')
USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX
@@ -30,69 +29,31 @@ USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX
GROUP_DN = 'cn=group,' + DEFAULT_SUFFIX
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-
-def test_memberof_auto_add_oc(topology):
- """
- Test the auto add objectclass feature. The plugin should add a predefined
+def test_memberof_auto_add_oc(topology_st):
+ """Test the auto add objectclass feature. The plugin should add a predefined
objectclass that will allow memberOf to be added to an entry.
"""
# enable dynamic plugins
try:
- topology.standalone.modify_s(DN_CONFIG,
- [(ldap.MOD_REPLACE,
- 'nsslapd-dynamic-plugins',
- 'on')])
+ topology_st.standalone.modify_s(DN_CONFIG,
+ [(ldap.MOD_REPLACE,
+ 'nsslapd-dynamic-plugins',
+ 'on')])
except ldap.LDAPError as e:
ldap.error('Failed to enable dynamic plugins! ' + e.message['desc'])
assert False
# Enable the plugin
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
# First test invalid value (config validation)
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
try:
- topology.standalone.modify_s(MEMBEROF_PLUGIN_DN,
- [(ldap.MOD_REPLACE,
- 'memberofAutoAddOC',
- 'invalid123')])
+ topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN,
+ [(ldap.MOD_REPLACE,
+ 'memberofAutoAddOC',
+ 'invalid123')])
log.fatal('Incorrectly added invalid objectclass!')
assert False
except ldap.UNWILLING_TO_PERFORM:
@@ -102,65 +63,65 @@ def test_memberof_auto_add_oc(topology):
assert False
# Add valid objectclass
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
try:
- topology.standalone.modify_s(MEMBEROF_PLUGIN_DN,
- [(ldap.MOD_REPLACE,
- 'memberofAutoAddOC',
- 'inetuser')])
+ topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN,
+ [(ldap.MOD_REPLACE,
+ 'memberofAutoAddOC',
+ 'inetuser')])
except ldap.LDAPError as e:
log.fatal('Failed to configure memberOf plugin: error ' + e.message['desc'])
assert False
# Add two users
try:
- topology.standalone.add_s(Entry((USER1_DN,
- {'objectclass': 'top',
- 'objectclass': 'person',
- 'objectclass': 'organizationalPerson',
- 'objectclass': 'inetorgperson',
- 'sn': 'last',
- 'cn': 'full',
- 'givenname': 'user1',
- 'uid': 'user1'
- })))
+ topology_st.standalone.add_s(Entry((USER1_DN,
+ {'objectclass': 'top',
+ 'objectclass': 'person',
+ 'objectclass': 'organizationalPerson',
+ 'objectclass': 'inetorgperson',
+ 'sn': 'last',
+ 'cn': 'full',
+ 'givenname': 'user1',
+ 'uid': 'user1'
+ })))
except ldap.LDAPError as e:
log.fatal('Failed to add user1 entry, error: ' + e.message['desc'])
assert False
try:
- topology.standalone.add_s(Entry((USER2_DN,
- {'objectclass': 'top',
- 'objectclass': 'person',
- 'objectclass': 'organizationalPerson',
- 'objectclass': 'inetorgperson',
- 'sn': 'last',
- 'cn': 'full',
- 'givenname': 'user2',
- 'uid': 'user2'
- })))
+ topology_st.standalone.add_s(Entry((USER2_DN,
+ {'objectclass': 'top',
+ 'objectclass': 'person',
+ 'objectclass': 'organizationalPerson',
+ 'objectclass': 'inetorgperson',
+ 'sn': 'last',
+ 'cn': 'full',
+ 'givenname': 'user2',
+ 'uid': 'user2'
+ })))
except ldap.LDAPError as e:
log.fatal('Failed to add user2 entry, error: ' + e.message['desc'])
assert False
# Add a group(that already includes one user
try:
- topology.standalone.add_s(Entry((GROUP_DN,
- {'objectclass': 'top',
- 'objectclass': 'groupOfNames',
- 'cn': 'group',
- 'member': USER1_DN
- })))
+ topology_st.standalone.add_s(Entry((GROUP_DN,
+ {'objectclass': 'top',
+ 'objectclass': 'groupOfNames',
+ 'cn': 'group',
+ 'member': USER1_DN
+ })))
except ldap.LDAPError as e:
log.fatal('Failed to add group entry, error: ' + e.message['desc'])
assert False
# Add a user to the group
try:
- topology.standalone.modify_s(GROUP_DN,
- [(ldap.MOD_ADD,
- 'member',
- USER2_DN)])
+ topology_st.standalone.modify_s(GROUP_DN,
+ [(ldap.MOD_ADD,
+ 'member',
+ USER2_DN)])
except ldap.LDAPError as e:
log.fatal('Failed to add user2 to group: error ' + e.message['desc'])
assert False
@@ -172,4 +133,4 @@ if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
\ No newline at end of file
+ pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/memory_leaks/range_search_test.py b/dirsrvtests/tests/suites/memory_leaks/range_search_test.py
index d8d591f..0c8e100 100644
--- a/dirsrvtests/tests/suites/memory_leaks/range_search_test.py
+++ b/dirsrvtests/tests/suites/memory_leaks/range_search_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -18,82 +18,51 @@ from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
@pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- def fin():
- standalone.delete()
- if not standalone.has_asan():
- sbin_dir = standalone.get_sbin_dir()
- valgrind_disable(sbin_dir)
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_range_search_init(topology):
- '''
- Enable retro cl, and valgrind. Since valgrind tests move the ns-slapd binary
+def setup(topology_st, request):
+ """Enable retro cl, and valgrind. Since valgrind tests move the ns-slapd binary
around it's important to always "valgrind_disable" before "assert False"ing,
otherwise we leave the wrong ns-slapd in place if there is a failure
- '''
+ """
log.info('Initializing test_range_search...')
- topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
+ topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
# First stop the instance
- topology.standalone.stop(timeout=30)
+ topology_st.standalone.stop(timeout=30)
# Get the sbin directory so we know where to replace 'ns-slapd'
- sbin_dir = get_sbin_dir(prefix=topology.standalone.prefix)
+ sbin_dir = get_sbin_dir(prefix=topology_st.standalone.prefix)
# Enable valgrind
- if not topology.standalone.has_asan():
+ if not topology_st.standalone.has_asan():
valgrind_enable(sbin_dir)
+ def fin():
+ if not topology_st.standalone.has_asan():
+ topology_st.standalone.stop(timeout=30)
+ sbin_dir = topology_st.standalone.get_sbin_dir()
+ valgrind_disable(sbin_dir)
+ topology_st.standalone.start()
+
+ request.addfinalizer(fin)
+
# Now start the server with a longer timeout
- topology.standalone.start()
+ topology_st.standalone.start()
-def test_range_search(topology):
- '''
- Add a 100 entries, and run a range search. When we encounter an error we
- still need to disable valgrind before exiting
- '''
+def test_range_search(topology_st, setup):
+ """Add a 100 entries, and run a range search.
+ When we encounter an error we still need to
+ disable valgrind before exiting
+ """
log.info('Running test_range_search...')
@@ -104,37 +73,34 @@ def test_range_search(topology):
idx = str(idx)
USER_DN = 'uid=user' + idx + ',' + DEFAULT_SUFFIX
try:
- topology.standalone.add_s(Entry((USER_DN, {'objectclass': "top extensibleObject".split(),
- 'uid': 'user' + idx})))
+ topology_st.standalone.add_s(Entry((USER_DN, {'objectclass': "top extensibleObject".split(),
+ 'uid': 'user' + idx})))
except ldap.LDAPError as e:
log.fatal('test_range_search: Failed to add test user ' + USER_DN + ': error ' + e.message['desc'])
success = False
time.sleep(1)
+ # Issue range search
if success:
- # Issue range search
try:
- topology.standalone.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE,
- '(&(changenumber>=74)(changenumber<=84))')
+ topology_st.standalone.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE,
+ '(&(changenumber>=74)(changenumber<=84))')
except ldap.LDAPError as e:
log.fatal('test_range_search: Failed to search retro changelog(%s), error: %s' %
(RETROCL_SUFFIX, e.message('desc')))
success = False
- if success and not topology.standalone.has_asan():
+ if success and not topology_st.standalone.has_asan():
# Get the results file, stop the server, and check for the leak
- results_file = valgrind_get_results_file(topology.standalone)
- topology.standalone.stop(timeout=30)
+ results_file = valgrind_get_results_file(topology_st.standalone)
+ topology_st.standalone.stop(timeout=30)
if valgrind_check_file(results_file, VALGRIND_LEAK_STR, 'range_candidates'):
log.fatal('test_range_search: Memory leak is still present!')
assert False
- log.info('test_range_search: PASSED')
-
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
diff --git a/dirsrvtests/tests/suites/monitor/monitor_test.py b/dirsrvtests/tests/suites/monitor/monitor_test.py
deleted file mode 100644
index c1fc303..0000000
--- a/dirsrvtests/tests/suites/monitor/monitor_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_monitor_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_monitor_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
index 273d2eb..774b255 100644
--- a/dirsrvtests/tests/suites/paged_results/paged_results_test.py
+++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
@@ -17,6 +17,7 @@ from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
from sss_control import SSSRequestControl
DEBUGGING = False
@@ -39,55 +40,22 @@ NEW_BACKEND_1 = 'parent_base'
NEW_BACKEND_2 = 'child_base'
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-
@pytest.fixture(scope="module")
-def test_user(topology, request):
+def test_user(topology_st, request):
"""User for binding operation"""
log.info('Adding user {}'.format(TEST_USER_DN))
try:
- topology.standalone.add_s(Entry((TEST_USER_DN, {
- 'objectclass': 'top person'.split(),
- 'objectclass': 'organizationalPerson',
- 'objectclass': 'inetorgperson',
- 'cn': TEST_USER_NAME,
- 'sn': TEST_USER_NAME,
- 'userpassword': TEST_USER_PWD,
- 'mail': '%s(a)redhat.com' % TEST_USER_NAME,
- 'uid': TEST_USER_NAME
- })))
+ topology_st.standalone.add_s(Entry((TEST_USER_DN, {
+ 'objectclass': 'top person'.split(),
+ 'objectclass': 'organizationalPerson',
+ 'objectclass': 'inetorgperson',
+ 'cn': TEST_USER_NAME,
+ 'sn': TEST_USER_NAME,
+ 'userpassword': TEST_USER_PWD,
+ 'mail': '%s(a)redhat.com' % TEST_USER_NAME,
+ 'uid': TEST_USER_NAME
+ })))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (TEST_USER_DN,
e.message['desc']))
@@ -95,45 +63,46 @@ def test_user(topology, request):
def fin():
log.info('Deleting user {}'.format(TEST_USER_DN))
- topology.standalone.delete_s(TEST_USER_DN)
+ topology_st.standalone.delete_s(TEST_USER_DN)
+
request.addfinalizer(fin)
@pytest.fixture(scope="module")
-def new_suffixes(topology):
+def new_suffixes(topology_st):
"""Add two suffixes with backends, one is a parent
of the another
"""
log.info('Adding suffix:{} and backend: {}'.format(NEW_SUFFIX_1, NEW_BACKEND_1))
- topology.standalone.backend.create(NEW_SUFFIX_1,
- {BACKEND_NAME: NEW_BACKEND_1})
- topology.standalone.mappingtree.create(NEW_SUFFIX_1,
- bename=NEW_BACKEND_1)
+ topology_st.standalone.backend.create(NEW_SUFFIX_1,
+ {BACKEND_NAME: NEW_BACKEND_1})
+ topology_st.standalone.mappingtree.create(NEW_SUFFIX_1,
+ bename=NEW_BACKEND_1)
try:
- topology.standalone.add_s(Entry((NEW_SUFFIX_1, {
- 'objectclass': 'top',
- 'objectclass': 'organization',
- 'o': NEW_SUFFIX_1_NAME
- })))
+ topology_st.standalone.add_s(Entry((NEW_SUFFIX_1, {
+ 'objectclass': 'top',
+ 'objectclass': 'organization',
+ 'o': NEW_SUFFIX_1_NAME
+ })))
except ldap.LDAPError as e:
log.error('Failed to add suffix ({}): error ({})'.format(NEW_SUFFIX_1,
e.message['desc']))
raise
log.info('Adding suffix:{} and backend: {}'.format(NEW_SUFFIX_2, NEW_BACKEND_2))
- topology.standalone.backend.create(NEW_SUFFIX_2,
- {BACKEND_NAME: NEW_BACKEND_2})
- topology.standalone.mappingtree.create(NEW_SUFFIX_2,
- bename=NEW_BACKEND_2,
- parent=NEW_SUFFIX_1)
+ topology_st.standalone.backend.create(NEW_SUFFIX_2,
+ {BACKEND_NAME: NEW_BACKEND_2})
+ topology_st.standalone.mappingtree.create(NEW_SUFFIX_2,
+ bename=NEW_BACKEND_2,
+ parent=NEW_SUFFIX_1)
try:
- topology.standalone.add_s(Entry((NEW_SUFFIX_2, {
- 'objectclass': 'top',
- 'objectclass': 'organizationalunit',
- 'ou': NEW_SUFFIX_2_NAME
- })))
+ topology_st.standalone.add_s(Entry((NEW_SUFFIX_2, {
+ 'objectclass': 'top',
+ 'objectclass': 'organizationalunit',
+ 'ou': NEW_SUFFIX_2_NAME
+ })))
except ldap.LDAPError as e:
log.error('Failed to add suffix ({}): error ({})'.format(NEW_SUFFIX_2,
e.message['desc']))
@@ -146,10 +115,10 @@ def new_suffixes(topology):
ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT
mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
- topology.standalone.modify_s(NEW_SUFFIX_1, mod)
+ topology_st.standalone.modify_s(NEW_SUFFIX_1, mod)
-def add_users(topology, users_num, suffix):
+def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix
Return the list of added user DNs.
@@ -163,15 +132,15 @@ def add_users(topology, users_num, suffix):
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
- topology.standalone.add_s(Entry((USER_DN, {
- 'objectclass': 'top person'.split(),
- 'objectclass': 'organizationalPerson',
- 'objectclass': 'inetorgperson',
- 'cn': USER_NAME,
- 'sn': USER_NAME,
- 'userpassword': 'pass%s' % num_ran,
- 'mail': '%s(a)redhat.com' % USER_NAME,
- 'uid': USER_NAME})))
+ topology_st.standalone.add_s(Entry((USER_DN, {
+ 'objectclass': 'top person'.split(),
+ 'objectclass': 'organizationalPerson',
+ 'objectclass': 'inetorgperson',
+ 'cn': USER_NAME,
+ 'sn': USER_NAME,
+ 'userpassword': 'pass%s' % num_ran,
+ 'mail': '%s(a)redhat.com' % USER_NAME,
+ 'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
@@ -179,40 +148,40 @@ def add_users(topology, users_num, suffix):
return users_list
-def del_users(topology, users_list):
+def del_users(topology_st, users_list):
"""Delete users with DNs from given list"""
log.info('Deleting %d users' % len(users_list))
for user_dn in users_list:
try:
- topology.standalone.delete_s(user_dn)
+ topology_st.standalone.delete_s(user_dn)
except ldap.LDAPError as e:
log.error('Failed to delete user (%s): error (%s)' % (user_dn,
e.message['desc']))
raise e
-def change_conf_attr(topology, suffix, attr_name, attr_value):
+def change_conf_attr(topology_st, suffix, attr_name, attr_value):
"""Change configurational attribute in the given suffix.
Returns previous attribute value.
"""
try:
- entries = topology.standalone.search_s(suffix, ldap.SCOPE_BASE,
- 'objectclass=top',
- [attr_name])
+ entries = topology_st.standalone.search_s(suffix, ldap.SCOPE_BASE,
+ 'objectclass=top',
+ [attr_name])
attr_value_bck = entries[0].data.get(attr_name)
log.info('Set %s to %s. Previous value - %s. Modified suffix - %s.' % (
- attr_name, attr_value, attr_value_bck, suffix))
+ attr_name, attr_value, attr_value_bck, suffix))
if attr_value is None:
- topology.standalone.modify_s(suffix, [(ldap.MOD_DELETE,
- attr_name,
- attr_value)])
+ topology_st.standalone.modify_s(suffix, [(ldap.MOD_DELETE,
+ attr_name,
+ attr_value)])
else:
- topology.standalone.modify_s(suffix, [(ldap.MOD_REPLACE,
- attr_name,
- attr_value)])
+ topology_st.standalone.modify_s(suffix, [(ldap.MOD_REPLACE,
+ attr_name,
+ attr_value)])
except ldap.LDAPError as e:
log.error('Failed to change attr value (%s): error (%s)' % (attr_name,
e.message['desc']))
@@ -221,7 +190,7 @@ def change_conf_attr(topology, suffix, attr_name, attr_value):
return attr_value_bck
-def paged_search(topology, suffix, controls, search_flt, searchreq_attrlist):
+def paged_search(topology_st, suffix, controls, search_flt, searchreq_attrlist):
"""Search at the DEFAULT_SUFFIX with ldap.SCOPE_SUBTREE
using Simple Paged Control(should the first item in the
list controls.
@@ -240,14 +209,14 @@ def paged_search(topology, suffix, controls, search_flt, searchreq_attrlist):
searchreq_attrlist,
req_pr_ctrl.size,
str(controls)))
- msgid = topology.standalone.search_ext(suffix,
- ldap.SCOPE_SUBTREE,
- search_flt,
- searchreq_attrlist,
- serverctrls=controls)
+ msgid = topology_st.standalone.search_ext(suffix,
+ ldap.SCOPE_SUBTREE,
+ search_flt,
+ searchreq_attrlist,
+ serverctrls=controls)
while True:
log.info('Getting page %d' % (pages,))
- rtype, rdata, rmsgid, rctrls = topology.standalone.result3(msgid)
+ rtype, rdata, rmsgid, rctrls = topology_st.standalone.result3(msgid)
log.debug('Data: {}'.format(rdata))
all_results.extend(rdata)
pages += 1
@@ -255,18 +224,18 @@ def paged_search(topology, suffix, controls, search_flt, searchreq_attrlist):
c
for c in rctrls
if c.controlType == SimplePagedResultsControl.controlType
- ]
+ ]
if pctrls:
if pctrls[0].cookie:
# Copy cookie from response control to request control
log.debug('Cookie: {}'.format(pctrls[0].cookie))
req_pr_ctrl.cookie = pctrls[0].cookie
- msgid = topology.standalone.search_ext(suffix,
- ldap.SCOPE_SUBTREE,
- search_flt,
- searchreq_attrlist,
- serverctrls=controls)
+ msgid = topology_st.standalone.search_ext(suffix,
+ ldap.SCOPE_SUBTREE,
+ search_flt,
+ searchreq_attrlist,
+ serverctrls=controls)
else:
break # No more pages available
else:
@@ -278,7 +247,7 @@ def paged_search(topology, suffix, controls, search_flt, searchreq_attrlist):
@pytest.mark.parametrize("page_size,users_num",
[(6, 5), (5, 5), (5, 25)])
-def test_search_success(topology, test_user, page_size, users_num):
+def test_search_success(topology_st, test_user, page_size, users_num):
"""Verify that search with a simple paged results control
returns all entries it should without errors.
@@ -293,40 +262,39 @@ def test_search_success(topology, test_user, page_size, users_num):
:Assert: All users should be found
"""
- users_list = add_users(topology, users_num, DEFAULT_SUFFIX)
+ users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)
search_flt = r'(uid=test*)'
searchreq_attrlist = ['dn', 'sn']
try:
log.info('Set user bind')
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
- all_results = paged_search(topology, DEFAULT_SUFFIX, [req_ctrl],
+ all_results = paged_search(topology_st, DEFAULT_SUFFIX, [req_ctrl],
search_flt, searchreq_attrlist)
log.info('%d results' % len(all_results))
assert len(all_results) == len(users_list)
finally:
log.info('Set Directory Manager bind back (test_search_success)')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- del_users(topology, users_list)
-
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ del_users(topology_st, users_list)
@pytest.mark.parametrize("page_size,users_num,suffix,attr_name,attr_value,expected_err", [
- (50, 200, 'cn=config,%s' % DN_LDBM, 'nsslapd-idlistscanlimit', '100',
- ldap.UNWILLING_TO_PERFORM),
- (5, 15, DN_CONFIG, 'nsslapd-timelimit', '20',
- ldap.UNAVAILABLE_CRITICAL_EXTENSION),
- (21, 50, DN_CONFIG, 'nsslapd-sizelimit', '20',
- ldap.SIZELIMIT_EXCEEDED),
- (21, 50, DN_CONFIG, 'nsslapd-pagedsizelimit', '5',
- ldap.SIZELIMIT_EXCEEDED),
- (5, 50, 'cn=config,%s' % DN_LDBM, 'nsslapd-lookthroughlimit', '20',
- ldap.ADMINLIMIT_EXCEEDED)])
-def test_search_limits_fail(topology, test_user, page_size, users_num,
+ (50, 200, 'cn=config,%s' % DN_LDBM, 'nsslapd-idlistscanlimit', '100',
+ ldap.UNWILLING_TO_PERFORM),
+ (5, 15, DN_CONFIG, 'nsslapd-timelimit', '20',
+ ldap.UNAVAILABLE_CRITICAL_EXTENSION),
+ (21, 50, DN_CONFIG, 'nsslapd-sizelimit', '20',
+ ldap.SIZELIMIT_EXCEEDED),
+ (21, 50, DN_CONFIG, 'nsslapd-pagedsizelimit', '5',
+ ldap.SIZELIMIT_EXCEEDED),
+ (5, 50, 'cn=config,%s' % DN_LDBM, 'nsslapd-lookthroughlimit', '20',
+ ldap.ADMINLIMIT_EXCEEDED)])
+def test_search_limits_fail(topology_st, test_user, page_size, users_num,
suffix, attr_name, attr_value, expected_err):
"""Verify that search with a simple paged results control
throws expected exceptoins when corresponding limits are
@@ -345,8 +313,8 @@ def test_search_limits_fail(topology, test_user, page_size, users_num,
:Assert: Should fail with appropriate exception
"""
- users_list = add_users(topology, users_num, DEFAULT_SUFFIX)
- attr_value_bck = change_conf_attr(topology, suffix, attr_name, attr_value)
+ users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)
+ attr_value_bck = change_conf_attr(topology_st, suffix, attr_name, attr_value)
conf_param_dict = {attr_name: attr_value}
search_flt = r'(uid=test*)'
searchreq_attrlist = ['dn', 'sn']
@@ -354,7 +322,7 @@ def test_search_limits_fail(topology, test_user, page_size, users_num,
try:
log.info('Set user bind')
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
log.info('Create simple paged results control instance')
req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
@@ -363,11 +331,11 @@ def test_search_limits_fail(topology, test_user, page_size, users_num,
sort_ctrl = SSSRequestControl(True, ['sn'])
controls.append(sort_ctrl)
log.info('Initiate ldapsearch with created control instance')
- msgid = topology.standalone.search_ext(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- search_flt,
- searchreq_attrlist,
- serverctrls=controls)
+ msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ search_flt,
+ searchreq_attrlist,
+ serverctrls=controls)
time_val = conf_param_dict.get('nsslapd-timelimit')
if time_val:
@@ -380,42 +348,42 @@ def test_search_limits_fail(topology, test_user, page_size, users_num,
log.info('Getting page %d' % (pages,))
if pages == 0 and (time_val or attr_name in ('nsslapd-lookthroughlimit',
'nsslapd-pagesizelimit')):
- rtype, rdata, rmsgid, rctrls = topology.standalone.result3(msgid)
+ rtype, rdata, rmsgid, rctrls = topology_st.standalone.result3(msgid)
else:
with pytest.raises(expected_err):
- rtype, rdata, rmsgid, rctrls = topology.standalone.result3(msgid)
+ rtype, rdata, rmsgid, rctrls = topology_st.standalone.result3(msgid)
all_results.extend(rdata)
pages += 1
pctrls = [
c
for c in rctrls
if c.controlType == SimplePagedResultsControl.controlType
- ]
+ ]
if pctrls:
if pctrls[0].cookie:
# Copy cookie from response control to request control
req_ctrl.cookie = pctrls[0].cookie
- msgid = topology.standalone.search_ext(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- search_flt,
- searchreq_attrlist,
- serverctrls=controls)
+ msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ search_flt,
+ searchreq_attrlist,
+ serverctrls=controls)
else:
break # No more pages available
else:
break
finally:
if expected_err == ldap.UNAVAILABLE_CRITICAL_EXTENSION:
- topology.standalone.open()
+ topology_st.standalone.open()
log.info('Set Directory Manager bind back (test_search_limits_fail)')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- del_users(topology, users_list)
- change_conf_attr(topology, suffix, attr_name, attr_value_bck)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ del_users(topology_st, users_list)
+ change_conf_attr(topology_st, suffix, attr_name, attr_value_bck)
-def test_search_sort_success(topology, test_user):
+def test_search_sort_success(topology_st, test_user):
"""Verify that search with a simple paged results control
and a server side sort control returns all entries
it should without errors.
@@ -434,13 +402,13 @@ def test_search_sort_success(topology, test_user):
users_num = 50
page_size = 5
- users_list = add_users(topology, users_num, DEFAULT_SUFFIX)
+ users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)
search_flt = r'(uid=test*)'
searchreq_attrlist = ['dn', 'sn']
try:
log.info('Set user bind')
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
sort_ctrl = SSSRequestControl(True, ['sn'])
@@ -448,21 +416,21 @@ def test_search_sort_success(topology, test_user):
log.info('Initiate ldapsearch with created control instance')
log.info('Collect data with sorting')
controls = [req_ctrl, sort_ctrl]
- results_sorted = paged_search(topology, DEFAULT_SUFFIX, controls,
+ results_sorted = paged_search(topology_st, DEFAULT_SUFFIX, controls,
search_flt, searchreq_attrlist)
log.info('Substring numbers from user DNs')
r_nums = map(lambda x: int(x[0][8:13]), results_sorted)
log.info('Assert that list is sorted')
- assert all(r_nums[i] <= r_nums[i+1] for i in range(len(r_nums)-1))
+ assert all(r_nums[i] <= r_nums[i + 1] for i in range(len(r_nums) - 1))
finally:
log.info('Set Directory Manager bind back (test_search_sort_success)')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- del_users(topology, users_list)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ del_users(topology_st, users_list)
-def test_search_abandon(topology, test_user):
+def test_search_abandon(topology_st, test_user):
"""Verify that search with simple paged results control
can be abandon
@@ -481,37 +449,37 @@ def test_search_abandon(topology, test_user):
users_num = 10
page_size = 2
- users_list = add_users(topology, users_num, DEFAULT_SUFFIX)
+ users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)
search_flt = r'(uid=test*)'
searchreq_attrlist = ['dn', 'sn']
try:
log.info('Set user bind')
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
log.info('Create simple paged results control instance')
req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
controls = [req_ctrl]
log.info('Initiate a search with a paged results control')
- msgid = topology.standalone.search_ext(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- search_flt,
- searchreq_attrlist,
- serverctrls=controls)
+ msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ search_flt,
+ searchreq_attrlist,
+ serverctrls=controls)
log.info('Abandon the search')
- topology.standalone.abandon(msgid)
+ topology_st.standalone.abandon(msgid)
log.info('Expect an ldap.TIMEOUT exception, while trying to get the search results')
with pytest.raises(ldap.TIMEOUT):
- topology.standalone.result3(msgid, timeout=5)
+ topology_st.standalone.result3(msgid, timeout=5)
finally:
log.info('Set Directory Manager bind back (test_search_abandon)')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- del_users(topology, users_list)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ del_users(topology_st, users_list)
-def test_search_with_timelimit(topology, test_user):
+def test_search_with_timelimit(topology_st, test_user):
"""Verify that after performing multiple simple paged searches
to completion, each with a timelimit, it wouldn't fail, if we sleep
for a time more than the timelimit.
@@ -533,13 +501,13 @@ def test_search_with_timelimit(topology, test_user):
users_num = 100
page_size = 50
timelimit = 5
- users_list = add_users(topology, users_num, DEFAULT_SUFFIX)
+ users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)
search_flt = r'(uid=test*)'
searchreq_attrlist = ['dn', 'sn']
try:
log.info('Set user bind')
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
log.info('Create simple paged results control instance')
req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
@@ -547,52 +515,52 @@ def test_search_with_timelimit(topology, test_user):
for ii in range(3):
log.info('Iteration %d' % ii)
- msgid = topology.standalone.search_ext(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- search_flt,
- searchreq_attrlist,
- serverctrls=controls,
- timeout=timelimit)
+ msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ search_flt,
+ searchreq_attrlist,
+ serverctrls=controls,
+ timeout=timelimit)
pages = 0
pctrls = []
while True:
log.info('Getting page %d' % (pages,))
- rtype, rdata, rmsgid, rctrls = topology.standalone.result3(msgid)
+ rtype, rdata, rmsgid, rctrls = topology_st.standalone.result3(msgid)
pages += 1
pctrls = [
c
for c in rctrls
if c.controlType == SimplePagedResultsControl.controlType
- ]
+ ]
if pctrls:
if pctrls[0].cookie:
# Copy cookie from response control to request control
req_ctrl.cookie = pctrls[0].cookie
- msgid = topology.standalone.search_ext(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- search_flt,
- searchreq_attrlist,
- serverctrls=controls,
- timeout=timelimit)
+ msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ search_flt,
+ searchreq_attrlist,
+ serverctrls=controls,
+ timeout=timelimit)
else:
log.info('Done with this search - sleeping %d seconds' % (
- timelimit * 2))
+ timelimit * 2))
time.sleep(timelimit * 2)
break # No more pages available
else:
break
finally:
log.info('Set Directory Manager bind back (test_search_with_timelimit)')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- del_users(topology, users_list)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ del_users(topology_st, users_list)
@pytest.mark.parametrize('aci_subject',
('dns = "localhost.localdomain"',
'ip = "::1" or ip = "127.0.0.1"'))
-def test_search_dns_ip_aci(topology, test_user, aci_subject):
+def test_search_dns_ip_aci(topology_st, test_user, aci_subject):
"""Verify that after performing multiple simple paged searches
to completion on the suffix with DNS or IP based ACI
@@ -615,13 +583,13 @@ def test_search_dns_ip_aci(topology, test_user, aci_subject):
users_num = 100
page_size = 5
- users_list = add_users(topology, users_num, DEFAULT_SUFFIX)
+ users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)
search_flt = r'(uid=test*)'
searchreq_attrlist = ['dn', 'sn']
try:
log.info('Back up current suffix ACI')
- acis_bck = topology.standalone.aci.list(DEFAULT_SUFFIX, ldap.SCOPE_BASE)
+ acis_bck = topology_st.standalone.aci.list(DEFAULT_SUFFIX, ldap.SCOPE_BASE)
log.info('Add test ACI')
ACI_TARGET = '(targetattr != "userPassword")'
@@ -629,15 +597,15 @@ def test_search_dns_ip_aci(topology, test_user, aci_subject):
ACI_SUBJECT = '(userdn = "ldap:///anyone") and (%s);)' % aci_subject
ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT
try:
- topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_REPLACE,
- 'aci',
- ACI_BODY)])
+ topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_REPLACE,
+ 'aci',
+ ACI_BODY)])
except ldap.LDAPError as e:
log.fatal('Failed to add ACI: error (%s)' % (e.message['desc']))
raise e
log.info('Set user bind')
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
log.info('Create simple paged results control instance')
req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
@@ -646,7 +614,7 @@ def test_search_dns_ip_aci(topology, test_user, aci_subject):
log.info('Initiate three searches with a paged results control')
for ii in range(3):
log.info('%d search' % (ii + 1))
- all_results = paged_search(topology, DEFAULT_SUFFIX, controls,
+ all_results = paged_search(topology_st, DEFAULT_SUFFIX, controls,
search_flt, searchreq_attrlist)
log.info('%d results' % len(all_results))
assert len(all_results) == len(users_list)
@@ -654,19 +622,19 @@ def test_search_dns_ip_aci(topology, test_user, aci_subject):
finally:
log.info('Set Directory Manager bind back (test_search_dns_ip_aci)')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
log.info('Restore ACI')
- topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE,
- 'aci',
- None)])
+ topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE,
+ 'aci',
+ None)])
for aci in acis_bck:
- topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD,
- 'aci',
- aci.getRawAci())])
- del_users(topology, users_list)
+ topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD,
+ 'aci',
+ aci.getRawAci())])
+ del_users(topology_st, users_list)
-def test_search_multiple_paging(topology, test_user):
+def test_search_multiple_paging(topology_st, test_user):
"""Verify that after performing multiple simple paged searches
on a single connection without a complition, it wouldn't fail.
@@ -685,13 +653,13 @@ def test_search_multiple_paging(topology, test_user):
users_num = 100
page_size = 30
- users_list = add_users(topology, users_num, DEFAULT_SUFFIX)
+ users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)
search_flt = r'(uid=test*)'
searchreq_attrlist = ['dn', 'sn']
try:
log.info('Set user bind')
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
log.info('Create simple paged results control instance')
req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
@@ -699,33 +667,33 @@ def test_search_multiple_paging(topology, test_user):
for ii in range(3):
log.info('Iteration %d' % ii)
- msgid = topology.standalone.search_ext(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- search_flt,
- searchreq_attrlist,
- serverctrls=controls)
- rtype, rdata, rmsgid, rctrls = topology.standalone.result3(msgid)
+ msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ search_flt,
+ searchreq_attrlist,
+ serverctrls=controls)
+ rtype, rdata, rmsgid, rctrls = topology_st.standalone.result3(msgid)
pctrls = [
c
for c in rctrls
if c.controlType == SimplePagedResultsControl.controlType
- ]
+ ]
# Copy cookie from response control to request control
req_ctrl.cookie = pctrls[0].cookie
- msgid = topology.standalone.search_ext(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- search_flt,
- searchreq_attrlist,
- serverctrls=controls)
+ msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ search_flt,
+ searchreq_attrlist,
+ serverctrls=controls)
finally:
log.info('Set Directory Manager bind back (test_search_multiple_paging)')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- del_users(topology, users_list)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ del_users(topology_st, users_list)
@pytest.mark.parametrize("invalid_cookie", [1000, -1])
-def test_search_invalid_cookie(topology, test_user, invalid_cookie):
+def test_search_invalid_cookie(topology_st, test_user, invalid_cookie):
"""Verify that using invalid cookie while performing
search with the simple paged results control throws
a TypeError exception
@@ -745,41 +713,41 @@ def test_search_invalid_cookie(topology, test_user, invalid_cookie):
users_num = 100
page_size = 50
- users_list = add_users(topology, users_num, DEFAULT_SUFFIX)
+ users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)
search_flt = r'(uid=test*)'
searchreq_attrlist = ['dn', 'sn']
try:
log.info('Set user bind')
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
log.info('Create simple paged results control instance')
req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
controls = [req_ctrl]
- msgid = topology.standalone.search_ext(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- search_flt,
- searchreq_attrlist,
- serverctrls=controls)
- rtype, rdata, rmsgid, rctrls = topology.standalone.result3(msgid)
+ msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ search_flt,
+ searchreq_attrlist,
+ serverctrls=controls)
+ rtype, rdata, rmsgid, rctrls = topology_st.standalone.result3(msgid)
log.info('Put an invalid cookie (%d) to the control. TypeError is expected' %
invalid_cookie)
req_ctrl.cookie = invalid_cookie
with pytest.raises(TypeError):
- msgid = topology.standalone.search_ext(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- search_flt,
- searchreq_attrlist,
- serverctrls=controls)
+ msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ search_flt,
+ searchreq_attrlist,
+ serverctrls=controls)
finally:
log.info('Set Directory Manager bind back (test_search_invalid_cookie)')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- del_users(topology, users_list)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ del_users(topology_st, users_list)
-def test_search_abandon_with_zero_size(topology, test_user):
+def test_search_abandon_with_zero_size(topology_st, test_user):
"""Verify that search with simple paged results control
can be abandon using page_size = 0
@@ -797,37 +765,37 @@ def test_search_abandon_with_zero_size(topology, test_user):
users_num = 10
page_size = 0
- users_list = add_users(topology, users_num, DEFAULT_SUFFIX)
+ users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)
search_flt = r'(uid=test*)'
searchreq_attrlist = ['dn', 'sn']
try:
log.info('Set user bind')
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
log.info('Create simple paged results control instance')
req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
controls = [req_ctrl]
- msgid = topology.standalone.search_ext(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- search_flt,
- searchreq_attrlist,
- serverctrls=controls)
- rtype, rdata, rmsgid, rctrls = topology.standalone.result3(msgid)
+ msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ search_flt,
+ searchreq_attrlist,
+ serverctrls=controls)
+ rtype, rdata, rmsgid, rctrls = topology_st.standalone.result3(msgid)
pctrls = [
c
for c in rctrls
if c.controlType == SimplePagedResultsControl.controlType
- ]
+ ]
assert not pctrls[0].cookie
finally:
log.info('Set Directory Manager bind back (test_search_abandon_with_zero_size)')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- del_users(topology, users_list)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ del_users(topology_st, users_list)
-def test_search_pagedsizelimit_success(topology, test_user):
+def test_search_pagedsizelimit_success(topology_st, test_user):
"""Verify that search with a simple paged results control
returns all entries it should without errors while
valid value set to nsslapd-pagedsizelimit.
@@ -849,20 +817,20 @@ def test_search_pagedsizelimit_success(topology, test_user):
page_size = 10
attr_name = 'nsslapd-pagedsizelimit'
attr_value = '20'
- attr_value_bck = change_conf_attr(topology, DN_CONFIG,
+ attr_value_bck = change_conf_attr(topology_st, DN_CONFIG,
attr_name, attr_value)
- users_list = add_users(topology, users_num, DEFAULT_SUFFIX)
+ users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)
search_flt = r'(uid=test*)'
searchreq_attrlist = ['dn', 'sn']
try:
log.info('Set user bind')
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
controls = [req_ctrl]
- all_results = paged_search(topology, DEFAULT_SUFFIX, controls,
+ all_results = paged_search(topology_st, DEFAULT_SUFFIX, controls,
search_flt, searchreq_attrlist)
log.info('%d results' % len(all_results))
@@ -870,15 +838,15 @@ def test_search_pagedsizelimit_success(topology, test_user):
finally:
log.info('Set Directory Manager bind back (test_search_pagedsizelimit_success)')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- del_users(topology, users_list)
- change_conf_attr(topology, DN_CONFIG,
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ del_users(topology_st, users_list)
+ change_conf_attr(topology_st, DN_CONFIG,
'nsslapd-pagedsizelimit', attr_value_bck)
@pytest.mark.parametrize('conf_attr,user_attr,expected_rs',
(('5', '15', 'PASS'), ('15', '5', ldap.SIZELIMIT_EXCEEDED)))
-def test_search_nspagedsizelimit(topology, test_user,
+def test_search_nspagedsizelimit(topology_st, test_user,
conf_attr, user_attr, expected_rs):
"""Verify that nsPagedSizeLimit attribute overrides
nsslapd-pagedsizelimit while performing search with
@@ -909,17 +877,17 @@ def test_search_nspagedsizelimit(topology, test_user,
users_num = 10
page_size = 10
- users_list = add_users(topology, users_num, DEFAULT_SUFFIX)
+ users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)
search_flt = r'(uid=test*)'
searchreq_attrlist = ['dn', 'sn']
- conf_attr_bck = change_conf_attr(topology, DN_CONFIG,
+ conf_attr_bck = change_conf_attr(topology_st, DN_CONFIG,
'nsslapd-pagedsizelimit', conf_attr)
- user_attr_bck = change_conf_attr(topology, TEST_USER_DN,
+ user_attr_bck = change_conf_attr(topology_st, TEST_USER_DN,
'nsPagedSizeLimit', user_attr)
try:
log.info('Set user bind')
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
controls = [req_ctrl]
@@ -927,29 +895,29 @@ def test_search_nspagedsizelimit(topology, test_user,
if expected_rs == ldap.SIZELIMIT_EXCEEDED:
log.info('Expect to fail with SIZELIMIT_EXCEEDED')
with pytest.raises(expected_rs):
- all_results = paged_search(topology, DEFAULT_SUFFIX, controls,
+ all_results = paged_search(topology_st, DEFAULT_SUFFIX, controls,
search_flt, searchreq_attrlist)
elif expected_rs == 'PASS':
log.info('Expect to pass')
- all_results = paged_search(topology, DEFAULT_SUFFIX, controls,
+ all_results = paged_search(topology_st, DEFAULT_SUFFIX, controls,
search_flt, searchreq_attrlist)
log.info('%d results' % len(all_results))
assert len(all_results) == len(users_list)
finally:
log.info('Set Directory Manager bind back (test_search_nspagedsizelimit)')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- del_users(topology, users_list)
- change_conf_attr(topology, DN_CONFIG,
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ del_users(topology_st, users_list)
+ change_conf_attr(topology_st, DN_CONFIG,
'nsslapd-pagedsizelimit', conf_attr_bck)
- change_conf_attr(topology, TEST_USER_DN,
+ change_conf_attr(topology_st, TEST_USER_DN,
'nsPagedSizeLimit', user_attr_bck)
@pytest.mark.parametrize('conf_attr_values,expected_rs',
((('5000', '100', '100'), ldap.ADMINLIMIT_EXCEEDED),
(('5000', '120', '122'), 'PASS')))
-def test_search_paged_limits(topology, test_user, conf_attr_values, expected_rs):
+def test_search_paged_limits(topology_st, test_user, conf_attr_values, expected_rs):
"""Verify that nsslapd-idlistscanlimit and
nsslapd-lookthroughlimit can limit the administrator
search abilities.
@@ -979,21 +947,21 @@ def test_search_paged_limits(topology, test_user, conf_attr_values, expected_rs)
users_num = 101
page_size = 10
- users_list = add_users(topology, users_num, DEFAULT_SUFFIX)
+ users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)
search_flt = r'(uid=test*)'
searchreq_attrlist = ['dn', 'sn']
- size_attr_bck = change_conf_attr(topology, DN_CONFIG,
+ size_attr_bck = change_conf_attr(topology_st, DN_CONFIG,
'nsslapd-sizelimit', conf_attr_values[0])
- pagedsize_attr_bck = change_conf_attr(topology, DN_CONFIG,
+ pagedsize_attr_bck = change_conf_attr(topology_st, DN_CONFIG,
'nsslapd-pagedsizelimit', conf_attr_values[0])
- idlistscan_attr_bck = change_conf_attr(topology, 'cn=config,%s' % DN_LDBM,
+ idlistscan_attr_bck = change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM,
'nsslapd-idlistscanlimit', conf_attr_values[1])
- lookthrough_attr_bck = change_conf_attr(topology, 'cn=config,%s' % DN_LDBM,
+ lookthrough_attr_bck = change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM,
'nsslapd-lookthroughlimit', conf_attr_values[2])
try:
log.info('Set user bind')
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
controls = [req_ctrl]
@@ -1001,32 +969,32 @@ def test_search_paged_limits(topology, test_user, conf_attr_values, expected_rs)
if expected_rs == ldap.ADMINLIMIT_EXCEEDED:
log.info('Expect to fail with ADMINLIMIT_EXCEEDED')
with pytest.raises(expected_rs):
- all_results = paged_search(topology, DEFAULT_SUFFIX, controls,
+ all_results = paged_search(topology_st, DEFAULT_SUFFIX, controls,
search_flt, searchreq_attrlist)
elif expected_rs == 'PASS':
log.info('Expect to pass')
- all_results = paged_search(topology, DEFAULT_SUFFIX, controls,
+ all_results = paged_search(topology_st, DEFAULT_SUFFIX, controls,
search_flt, searchreq_attrlist)
log.info('%d results' % len(all_results))
assert len(all_results) == len(users_list)
finally:
log.info('Set Directory Manager bind back (test_search_paged_limits)')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- del_users(topology, users_list)
- change_conf_attr(topology, DN_CONFIG,
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ del_users(topology_st, users_list)
+ change_conf_attr(topology_st, DN_CONFIG,
'nsslapd-sizelimit', size_attr_bck)
- change_conf_attr(topology, DN_CONFIG,
+ change_conf_attr(topology_st, DN_CONFIG,
'nsslapd-pagedsizelimit', pagedsize_attr_bck)
- change_conf_attr(topology, 'cn=config,%s' % DN_LDBM,
+ change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM,
'nsslapd-lookthroughlimit', lookthrough_attr_bck)
- change_conf_attr(topology, 'cn=config,%s' % DN_LDBM,
+ change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM,
'nsslapd-idlistscanlimit', idlistscan_attr_bck)
@pytest.mark.parametrize('conf_attr_values,expected_rs',
((('1000', '100', '100'), ldap.ADMINLIMIT_EXCEEDED),
(('1000', '120', '122'), 'PASS')))
-def test_search_paged_user_limits(topology, test_user, conf_attr_values, expected_rs):
+def test_search_paged_user_limits(topology_st, test_user, conf_attr_values, expected_rs):
"""Verify that nsPagedIDListScanLimit and nsPagedLookthroughLimit
override nsslapd-idlistscanlimit and nsslapd-lookthroughlimit
while performing search with the simple paged results control.
@@ -1057,21 +1025,21 @@ def test_search_paged_user_limits(topology, test_user, conf_attr_values, expecte
users_num = 101
page_size = 10
- users_list = add_users(topology, users_num, DEFAULT_SUFFIX)
+ users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)
search_flt = r'(uid=test*)'
searchreq_attrlist = ['dn', 'sn']
- lookthrough_attr_bck = change_conf_attr(topology, 'cn=config,%s' % DN_LDBM,
+ lookthrough_attr_bck = change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM,
'nsslapd-lookthroughlimit', conf_attr_values[0])
- idlistscan_attr_bck = change_conf_attr(topology, 'cn=config,%s' % DN_LDBM,
+ idlistscan_attr_bck = change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM,
'nsslapd-idlistscanlimit', conf_attr_values[0])
- user_idlistscan_attr_bck = change_conf_attr(topology, TEST_USER_DN,
+ user_idlistscan_attr_bck = change_conf_attr(topology_st, TEST_USER_DN,
'nsPagedIDListScanLimit', conf_attr_values[1])
- user_lookthrough_attr_bck = change_conf_attr(topology, TEST_USER_DN,
+ user_lookthrough_attr_bck = change_conf_attr(topology_st, TEST_USER_DN,
'nsPagedLookthroughLimit', conf_attr_values[2])
try:
log.info('Set user bind')
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
controls = [req_ctrl]
@@ -1079,29 +1047,29 @@ def test_search_paged_user_limits(topology, test_user, conf_attr_values, expecte
if expected_rs == ldap.ADMINLIMIT_EXCEEDED:
log.info('Expect to fail with ADMINLIMIT_EXCEEDED')
with pytest.raises(expected_rs):
- all_results = paged_search(topology, DEFAULT_SUFFIX, controls,
+ all_results = paged_search(topology_st, DEFAULT_SUFFIX, controls,
search_flt, searchreq_attrlist)
elif expected_rs == 'PASS':
log.info('Expect to pass')
- all_results = paged_search(topology, DEFAULT_SUFFIX, controls,
+ all_results = paged_search(topology_st, DEFAULT_SUFFIX, controls,
search_flt, searchreq_attrlist)
log.info('%d results' % len(all_results))
assert len(all_results) == len(users_list)
finally:
log.info('Set Directory Manager bind back (test_search_paged_user_limits)')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- del_users(topology, users_list)
- change_conf_attr(topology, 'cn=config,%s' % DN_LDBM,
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ del_users(topology_st, users_list)
+ change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM,
'nsslapd-lookthroughlimit', lookthrough_attr_bck)
- change_conf_attr(topology, 'cn=config,%s' % DN_LDBM,
+ change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM,
'nsslapd-idlistscanlimit', idlistscan_attr_bck)
- change_conf_attr(topology, TEST_USER_DN,
+ change_conf_attr(topology_st, TEST_USER_DN,
'nsPagedIDListScanLimit', user_idlistscan_attr_bck)
- change_conf_attr(topology, TEST_USER_DN,
+ change_conf_attr(topology_st, TEST_USER_DN,
'nsPagedLookthroughLimit', user_lookthrough_attr_bck)
-def test_ger_basic(topology, test_user):
+def test_ger_basic(topology_st, test_user):
"""Verify that search with a simple paged results control
and get effective rights control returns all entries
it should without errors.
@@ -1118,19 +1086,19 @@ def test_ger_basic(topology, test_user):
an 'attributeLevelRights' returned
"""
- users_list = add_users(topology, 20, DEFAULT_SUFFIX)
+ users_list = add_users(topology_st, 20, DEFAULT_SUFFIX)
search_flt = r'(uid=test*)'
searchreq_attrlist = ['dn', 'sn']
page_size = 4
try:
log.info('Set bind to directory manager')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
spr_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
ger_ctrl = GetEffectiveRightsControl(True, "dn: " + DN_DM)
- all_results = paged_search(topology, DEFAULT_SUFFIX, [spr_ctrl, ger_ctrl],
+ all_results = paged_search(topology_st, DEFAULT_SUFFIX, [spr_ctrl, ger_ctrl],
search_flt, searchreq_attrlist)
log.info('{} results'.format(len(all_results)))
@@ -1139,10 +1107,10 @@ def test_ger_basic(topology, test_user):
assert all(attrs['attributeLevelRights'][0] for dn, attrs in all_results)
finally:
log.info('Remove added users')
- del_users(topology, users_list)
+ del_users(topology_st, users_list)
-def test_multi_suffix_search(topology, test_user, new_suffixes):
+def test_multi_suffix_search(topology_st, test_user, new_suffixes):
"""Verify that page result search returns empty cookie
if there is no returned entry.
@@ -1169,27 +1137,27 @@ def test_multi_suffix_search(topology, test_user, new_suffixes):
users_num = 20
log.info('Clear the access log')
- topology.standalone.deleteAccessLogs()
+ topology_st.standalone.deleteAccessLogs()
- users_list_1 = add_users(topology, users_num / 2, NEW_SUFFIX_1)
- users_list_2 = add_users(topology, users_num / 2, NEW_SUFFIX_2)
+ users_list_1 = add_users(topology_st, users_num / 2, NEW_SUFFIX_1)
+ users_list_2 = add_users(topology_st, users_num / 2, NEW_SUFFIX_2)
try:
log.info('Set DM bind')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
- all_results = paged_search(topology, NEW_SUFFIX_1, [req_ctrl],
- search_flt, searchreq_attrlist)
+ all_results = paged_search(topology_st, NEW_SUFFIX_1, [req_ctrl],
+ search_flt, searchreq_attrlist)
log.info('{} results'.format(len(all_results)))
assert len(all_results) == users_num
log.info('Restart the server to flush the logs')
- topology.standalone.restart(timeout=10)
+ topology_st.standalone.restart(timeout=10)
- access_log_lines = topology.standalone.ds_access_log.match('.*pr_cookie=.*')
+ access_log_lines = topology_st.standalone.ds_access_log.match('.*pr_cookie=.*')
pr_cookie_list = ([line.rsplit('=', 1)[-1] for line in access_log_lines])
pr_cookie_list = [int(pr_cookie) for pr_cookie in pr_cookie_list]
log.info('Assert that last pr_cookie == -1 and others pr_cookie == 0')
@@ -1198,12 +1166,12 @@ def test_multi_suffix_search(topology, test_user, new_suffixes):
assert pr_cookie_list[-1] == -1
finally:
log.info('Remove added users')
- del_users(topology, users_list_1)
- del_users(topology, users_list_2)
+ del_users(topology_st, users_list_1)
+ del_users(topology_st, users_list_2)
@pytest.mark.parametrize('conf_attr_value', (None, '-1', '1000'))
-def test_maxsimplepaged_per_conn_success(topology, test_user, conf_attr_value):
+def test_maxsimplepaged_per_conn_success(topology_st, test_user, conf_attr_value):
"""Verify that nsslapd-maxsimplepaged-per-conn acts according design
:Feature: Simple paged results
@@ -1222,37 +1190,37 @@ def test_maxsimplepaged_per_conn_success(topology, test_user, conf_attr_value):
results requests per connection.
"""
- users_list = add_users(topology, 20, DEFAULT_SUFFIX)
+ users_list = add_users(topology_st, 20, DEFAULT_SUFFIX)
search_flt = r'(uid=test*)'
searchreq_attrlist = ['dn', 'sn']
page_size = 4
if conf_attr_value:
- max_per_con_bck = change_conf_attr(topology, DN_CONFIG,
+ max_per_con_bck = change_conf_attr(topology_st, DN_CONFIG,
'nsslapd-maxsimplepaged-per-conn',
conf_attr_value)
try:
log.info('Set user bind')
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
- all_results = paged_search(topology, DEFAULT_SUFFIX, [req_ctrl],
+ all_results = paged_search(topology_st, DEFAULT_SUFFIX, [req_ctrl],
search_flt, searchreq_attrlist)
log.info('{} results'.format(len(all_results)))
assert len(all_results) == len(users_list)
finally:
log.info('Remove added users')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- del_users(topology, users_list)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ del_users(topology_st, users_list)
if conf_attr_value:
- change_conf_attr(topology, DN_CONFIG,
+ change_conf_attr(topology_st, DN_CONFIG,
'nsslapd-maxsimplepaged-per-conn', max_per_con_bck)
@pytest.mark.parametrize('conf_attr_value', ('0', '1'))
-def test_maxsimplepaged_per_conn_failure(topology, test_user, conf_attr_value):
+def test_maxsimplepaged_per_conn_failure(topology_st, test_user, conf_attr_value):
"""Verify that nsslapd-maxsimplepaged-per-conn acts according design
:Feature: Simple paged results
@@ -1270,43 +1238,43 @@ def test_maxsimplepaged_per_conn_failure(topology, test_user, conf_attr_value):
:Assert: During the searches UNWILLING_TO_PERFORM should be throwned
"""
- users_list = add_users(topology, 20, DEFAULT_SUFFIX)
+ users_list = add_users(topology_st, 20, DEFAULT_SUFFIX)
search_flt = r'(uid=test*)'
searchreq_attrlist = ['dn', 'sn']
page_size = 4
- max_per_con_bck = change_conf_attr(topology, DN_CONFIG,
+ max_per_con_bck = change_conf_attr(topology_st, DN_CONFIG,
'nsslapd-maxsimplepaged-per-conn',
conf_attr_value)
try:
log.info('Set user bind')
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
log.info('Create simple paged results control instance')
req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
with pytest.raises(ldap.UNWILLING_TO_PERFORM):
- msgid = topology.standalone.search_ext(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- search_flt,
- searchreq_attrlist,
- serverctrls=[req_ctrl])
- rtype, rdata, rmsgid, rctrls = topology.standalone.result3(msgid)
+ msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ search_flt,
+ searchreq_attrlist,
+ serverctrls=[req_ctrl])
+ rtype, rdata, rmsgid, rctrls = topology_st.standalone.result3(msgid)
# If nsslapd-maxsimplepaged-per-conn = 1,
# it should pass this point, but failed on the next search
assert conf_attr_value == '1'
- msgid = topology.standalone.search_ext(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- search_flt,
- searchreq_attrlist,
- serverctrls=[req_ctrl])
- rtype, rdata, rmsgid, rctrls = topology.standalone.result3(msgid)
+ msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ search_flt,
+ searchreq_attrlist,
+ serverctrls=[req_ctrl])
+ rtype, rdata, rmsgid, rctrls = topology_st.standalone.result3(msgid)
finally:
log.info('Remove added users')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- del_users(topology, users_list)
- change_conf_attr(topology, DN_CONFIG,
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ del_users(topology_st, users_list)
+ change_conf_attr(topology_st, DN_CONFIG,
'nsslapd-maxsimplepaged-per-conn', max_per_con_bck)
diff --git a/dirsrvtests/tests/suites/pam_passthru_plugin/pam_test.py b/dirsrvtests/tests/suites/pam_passthru_plugin/pam_test.py
deleted file mode 100644
index 9ff4261..0000000
--- a/dirsrvtests/tests/suites/pam_passthru_plugin/pam_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_pam_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_pam_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/passthru_plugin/passthru_test.py b/dirsrvtests/tests/suites/passthru_plugin/passthru_test.py
deleted file mode 100644
index 956d414..0000000
--- a/dirsrvtests/tests/suites/passthru_plugin/passthru_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_passthru_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_passthru_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/password/password_test.py b/dirsrvtests/tests/suites/password/password_test.py
index 3781492..f450c9a 100644
--- a/dirsrvtests/tests/suites/password/password_test.py
+++ b/dirsrvtests/tests/suites/password/password_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -17,58 +17,16 @@ from lib389.tools import DirSrvTools
from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_password_init(topology):
- '''
- Do init, if necessary
- '''
-
- return
-
-
-def test_password_delete_specific_password(topology):
- '''
- Delete a specific userpassword, and make sure it is actually deleted from the entry
- '''
+def test_password_delete_specific_password(topology_st):
+ """ Delete a specific userpassword, and make sure
+ it is actually deleted from the entry
+ """
log.info('Running test_password_delete_specific_password...')
@@ -78,11 +36,11 @@ def test_password_delete_specific_password(topology):
# Add a test user with a password
#
try:
- topology.standalone.add_s(Entry((USER_DN, {'objectclass': "top extensibleObject".split(),
- 'sn': '1',
- 'cn': 'user 1',
- 'uid': 'user1',
- 'userpassword': PASSWORD})))
+ topology_st.standalone.add_s(Entry((USER_DN, {'objectclass': "top extensibleObject".split(),
+ 'sn': '1',
+ 'cn': 'user 1',
+ 'uid': 'user1',
+ 'userpassword': PASSWORD})))
except ldap.LDAPError as e:
log.fatal('test_password_delete_specific_password: Failed to add test user ' +
USER_DN + ': error ' + e.message['desc'])
@@ -92,7 +50,7 @@ def test_password_delete_specific_password(topology):
# Delete the exact password
#
try:
- topology.standalone.modify_s(USER_DN, [(ldap.MOD_DELETE, 'userpassword', PASSWORD)])
+ topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_DELETE, 'userpassword', PASSWORD)])
except ldap.LDAPError as e:
log.fatal('test_password_delete_specific_password: Failed to delete userpassword: error ' +
e.message['desc'])
@@ -102,7 +60,7 @@ def test_password_delete_specific_password(topology):
# Check the password is actually deleted
#
try:
- entry = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, 'objectclass=top')
+ entry = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, 'objectclass=top')
if entry[0].hasAttr('userpassword'):
log.fatal('test_password_delete_specific_password: Entry incorrectly still have the userpassword attribute')
assert False
@@ -115,7 +73,7 @@ def test_password_delete_specific_password(topology):
# Cleanup
#
try:
- topology.standalone.delete_s(USER_DN)
+ topology_st.standalone.delete_s(USER_DN)
except ldap.LDAPError as e:
log.fatal('test_password_delete_specific_password: Failed to delete user(%s), error: %s' %
(USER_DN, e.message('desc')))
diff --git a/dirsrvtests/tests/suites/password/pwdAdmin_test.py b/dirsrvtests/tests/suites/password/pwdAdmin_test.py
index dc8fdab..46991d8 100644
--- a/dirsrvtests/tests/suites/password/pwdAdmin_test.py
+++ b/dirsrvtests/tests/suites/password/pwdAdmin_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -18,11 +18,11 @@ from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
CONFIG_DN = 'cn=config'
ADMIN_NAME = 'passwd_admin'
ADMIN_DN = 'cn=%s,%s' % (ADMIN_NAME, SUFFIX)
@@ -35,40 +35,7 @@ ENTRY_DN = 'cn=%s,%s' % (ENTRY_NAME, SUFFIX)
INVALID_PWDS = ('2_Short', 'No_Number', 'N0Special', '{SSHA}bBy8UdtPZwu8uZna9QOYG3Pr41RpIRVDl8wddw==')
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_pwdAdmin_init(topology):
+def test_pwdAdmin_init(topology_st):
'''
Create our future Password Admin entry, set the password policy, and test
that its working
@@ -78,28 +45,28 @@ def test_pwdAdmin_init(topology):
# Add Password Admin 1
try:
- topology.standalone.add_s(Entry((ADMIN_DN, {'objectclass': "top extensibleObject".split(),
- 'cn': ADMIN_NAME,
- 'userpassword': ADMIN_PWD})))
+ topology_st.standalone.add_s(Entry((ADMIN_DN, {'objectclass': "top extensibleObject".split(),
+ 'cn': ADMIN_NAME,
+ 'userpassword': ADMIN_PWD})))
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin_init: Failed to add test user' + ADMIN_DN + ': error ' + e.message['desc'])
assert False
# Add Password Admin 2
try:
- topology.standalone.add_s(Entry((ADMIN2_DN, {'objectclass': "top extensibleObject".split(),
- 'cn': ADMIN2_NAME,
- 'userpassword': ADMIN_PWD})))
+ topology_st.standalone.add_s(Entry((ADMIN2_DN, {'objectclass': "top extensibleObject".split(),
+ 'cn': ADMIN2_NAME,
+ 'userpassword': ADMIN_PWD})))
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin_init: Failed to add test user ' + ADMIN2_DN + ': error ' + e.message['desc'])
assert False
# Add Password Admin Group
try:
- topology.standalone.add_s(Entry((ADMIN_GROUP_DN, {'objectclass': "top groupOfUNiqueNames".split(),
- 'cn': 'password admin group',
- 'uniquemember': ADMIN_DN,
- 'uniquemember': ADMIN2_DN})))
+ topology_st.standalone.add_s(Entry((ADMIN_GROUP_DN, {'objectclass': "top groupOfUNiqueNames".split(),
+ 'cn': 'password admin group',
+ 'uniquemember': ADMIN_DN,
+ 'uniquemember': ADMIN2_DN})))
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin_init: Failed to add group' + ADMIN_GROUP_DN + ': error ' + e.message['desc'])
assert False
@@ -107,13 +74,13 @@ def test_pwdAdmin_init(topology):
# Configure password policy
log.info('test_pwdAdmin_init: Configuring password policy...')
try:
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on'),
- (ldap.MOD_REPLACE, 'passwordCheckSyntax', 'on'),
- (ldap.MOD_REPLACE, 'passwordMinCategories', '1'),
- (ldap.MOD_REPLACE, 'passwordMinTokenLength', '1'),
- (ldap.MOD_REPLACE, 'passwordExp', 'on'),
- (ldap.MOD_REPLACE, 'passwordMinDigits', '1'),
- (ldap.MOD_REPLACE, 'passwordMinSpecials', '1')])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on'),
+ (ldap.MOD_REPLACE, 'passwordCheckSyntax', 'on'),
+ (ldap.MOD_REPLACE, 'passwordMinCategories', '1'),
+ (ldap.MOD_REPLACE, 'passwordMinTokenLength', '1'),
+ (ldap.MOD_REPLACE, 'passwordExp', 'on'),
+ (ldap.MOD_REPLACE, 'passwordMinDigits', '1'),
+ (ldap.MOD_REPLACE, 'passwordMinSpecials', '1')])
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin_init: Failed configure password policy: ' +
e.message['desc'])
@@ -124,14 +91,14 @@ def test_pwdAdmin_init(topology):
#
log.info('Add aci to allow password admin to add/update entries...')
- ACI_TARGET = "(target = \"ldap:///%s\")" % SUFFIX
- ACI_TARGETATTR = "(targetattr = *)"
- ACI_ALLOW = "(version 3.0; acl \"Password Admin Access\"; allow (all) "
- ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)"
- ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT
+ ACI_TARGET = "(target = \"ldap:///%s\")" % SUFFIX
+ ACI_TARGETATTR = "(targetattr = *)"
+ ACI_ALLOW = "(version 3.0; acl \"Password Admin Access\"; allow (all) "
+ ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)"
+ ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT
mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
try:
- topology.standalone.modify_s(SUFFIX, mod)
+ topology_st.standalone.modify_s(SUFFIX, mod)
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin_init: Failed to add aci for password admin: ' +
e.message['desc'])
@@ -142,10 +109,10 @@ def test_pwdAdmin_init(topology):
#
log.info('test_pwdAdmin_init: Bind as the Password Administator (before activating)...')
try:
- topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
+ topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin_init: Failed to bind as the Password Admin: ' +
- e.message['desc'])
+ e.message['desc'])
assert False
#
@@ -166,12 +133,12 @@ def test_pwdAdmin_init(topology):
log.info('test_pwdAdmin_init: Create a regular user entry %s with password (%s)...' %
(ENTRY_DN, passwd))
try:
- topology.standalone.add_s(entry)
+ topology_st.standalone.add_s(entry)
except ldap.LDAPError as e:
# We failed as expected
failed_as_expected = True
log.info('test_pwdAdmin_init: Add failed as expected: password (%s) result (%s)'
- % (passwd, e.message['desc']))
+ % (passwd, e.message['desc']))
if not failed_as_expected:
log.fatal('test_pwdAdmin_init: We were incorrectly able to add an entry ' +
@@ -179,7 +146,7 @@ def test_pwdAdmin_init(topology):
assert False
-def test_pwdAdmin(topology):
+def test_pwdAdmin(topology_st):
'''
Test that password administrators/root DN can
bypass password syntax/policy.
@@ -209,7 +176,7 @@ def test_pwdAdmin(topology):
# Bind as Root DN
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin: Root DN failed to authenticate: ' +
e.message['desc'])
@@ -217,7 +184,7 @@ def test_pwdAdmin(topology):
# Set the password admin
try:
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_DN)])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_DN)])
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin: Failed to add password admin to config: ' +
e.message['desc'])
@@ -225,7 +192,7 @@ def test_pwdAdmin(topology):
# Bind as Password Admin
try:
- topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
+ topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin: Failed to bind as the Password Admin: ' +
e.message['desc'])
@@ -239,7 +206,7 @@ def test_pwdAdmin(topology):
log.info('test_pwdAdmin: Create a regular user entry %s with password (%s)...' %
(ENTRY_DN, passwd))
try:
- topology.standalone.add_s(entry)
+ topology_st.standalone.add_s(entry)
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin: Failed to add entry with password (%s) result (%s)'
% (passwd, e.message['desc']))
@@ -249,7 +216,7 @@ def test_pwdAdmin(topology):
# Delete entry for the next pass
try:
- topology.standalone.delete_s(ENTRY_DN)
+ topology_st.standalone.delete_s(ENTRY_DN)
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin: Failed to delete entry: %s' %
(e.message['desc']))
@@ -260,7 +227,7 @@ def test_pwdAdmin(topology):
#
entry.setValues('userpassword', ADMIN_PWD)
try:
- topology.standalone.add_s(entry)
+ topology_st.standalone.add_s(entry)
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin: Failed to add entry with valid password (%s) result (%s)' %
(passwd, e.message['desc']))
@@ -274,7 +241,7 @@ def test_pwdAdmin(topology):
# Bind as root DN
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin: Root DN failed to authenticate: ' +
e.message['desc'])
@@ -282,7 +249,7 @@ def test_pwdAdmin(topology):
# Remove password admin
try:
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_DELETE, 'passwordAdminDN', None)])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_DELETE, 'passwordAdminDN', None)])
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin: Failed to remove password admin from config: ' +
e.message['desc'])
@@ -290,7 +257,7 @@ def test_pwdAdmin(topology):
# Bind as Password Admin (who is no longer an admin)
try:
- topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
+ topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin: Failed to bind as the Password Admin: ' +
e.message['desc'])
@@ -303,7 +270,7 @@ def test_pwdAdmin(topology):
failed_as_expected = False
entry.setValues('userpassword', passwd)
try:
- topology.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)])
+ topology_st.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)])
except ldap.LDAPError as e:
# We failed as expected
failed_as_expected = True
@@ -322,14 +289,14 @@ def test_pwdAdmin(topology):
# Bind as root DN to make the update
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin: Root DN failed to authenticate: ' + e.message['desc'])
assert False
# Update config - set the password admin
try:
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_DN)])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_DN)])
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin: Failed to add password admin to config: ' +
e.message['desc'])
@@ -337,7 +304,7 @@ def test_pwdAdmin(topology):
# Bind as Password Admin
try:
- topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
+ topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin: Failed to bind as the Password Admin: ' +
e.message['desc'])
@@ -348,10 +315,10 @@ def test_pwdAdmin(topology):
#
for passwd in INVALID_PWDS:
try:
- topology.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)])
+ topology_st.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)])
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin: Password update failed unexpectedly: password (%s) result (%s)'
- % (passwd, e.message['desc']))
+ % (passwd, e.message['desc']))
assert False
log.info('test_pwdAdmin: Password update succeeded (%s)' % passwd)
@@ -362,14 +329,14 @@ def test_pwdAdmin(topology):
# Bind as root DN to make the update
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin: Root DN failed to authenticate: ' + e.message['desc'])
assert False
# Update config - set the password admin group
try:
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_GROUP_DN)])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_GROUP_DN)])
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin: Failed to add password admin to config: ' +
e.message['desc'])
@@ -377,7 +344,7 @@ def test_pwdAdmin(topology):
# Bind as admin2
try:
- topology.standalone.simple_bind_s(ADMIN2_DN, ADMIN_PWD)
+ topology_st.standalone.simple_bind_s(ADMIN2_DN, ADMIN_PWD)
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin: Failed to bind as the Password Admin2: ' +
e.message['desc'])
@@ -386,22 +353,22 @@ def test_pwdAdmin(topology):
# Make some invalid password updates, but they should succeed
for passwd in INVALID_PWDS:
try:
- topology.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)])
+ topology_st.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)])
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin: Password update failed unexpectedly: password (%s) result (%s)'
- % (passwd, e.message['desc']))
+ % (passwd, e.message['desc']))
assert False
log.info('test_pwdAdmin: Password update succeeded (%s)' % passwd)
# Cleanup - bind as Root DN for the other tests
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin: Root DN failed to authenticate: ' + e.message['desc'])
assert False
-def test_pwdAdmin_config_validation(topology):
+def test_pwdAdmin_config_validation(topology_st):
'''
Test config validation:
@@ -410,7 +377,7 @@ def test_pwdAdmin_config_validation(topology):
'''
# Add mulitple attributes - one already eists so just try and add as second one
try:
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_ADD, 'passwordAdminDN', ENTRY_DN)])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_ADD, 'passwordAdminDN', ENTRY_DN)])
log.fatal('test_pwdAdmin_config_validation: Incorrectly was able to add two config attributes')
assert False
except ldap.LDAPError as e:
@@ -419,7 +386,7 @@ def test_pwdAdmin_config_validation(topology):
# Attempt to set invalid DN
try:
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_ADD, 'passwordAdminDN', 'ZZZZZ')])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_ADD, 'passwordAdminDN', 'ZZZZZ')])
log.fatal('test_pwdAdmin_config_validation: Incorrectly was able to add invalid DN')
assert False
except ldap.LDAPError as e:
diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
index b474f61..a359b65 100644
--- a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
+++ b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
@@ -1,3 +1,11 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2016 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
import os
import time
import subprocess
@@ -9,86 +17,39 @@ from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
-DEBUGGING = False
OU_PEOPLE = 'ou=people,{}'.format(DEFAULT_SUFFIX)
TEST_USER_NAME = 'simplepaged_test'
TEST_USER_DN = 'uid={},{}'.format(TEST_USER_NAME, OU_PEOPLE)
TEST_USER_PWD = 'simplepaged_test'
-PW_POLICY_CONT_USER = 'cn="cn=nsPwPolicyEntry,uid=simplepaged_test,'\
- 'ou=people,dc=example,dc=com",'\
+PW_POLICY_CONT_USER = 'cn="cn=nsPwPolicyEntry,uid=simplepaged_test,' \
+ 'ou=people,dc=example,dc=com",' \
'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com'
-PW_POLICY_CONT_PEOPLE = 'cn="cn=nsPwPolicyEntry,'\
- 'ou=people,dc=example,dc=com",'\
+PW_POLICY_CONT_PEOPLE = 'cn="cn=nsPwPolicyEntry,' \
+ 'ou=people,dc=example,dc=com",' \
'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com'
-if DEBUGGING:
- logging.getLogger(__name__).setLevel(logging.DEBUG)
-else:
- logging.getLogger(__name__).setLevel(logging.INFO)
-
+logging.getLogger(__name__).setLevel(logging.INFO)
log = logging.getLogger(__name__)
-class TopologyStandalone(object):
- """The DS Topology Class"""
- def __init__(self, standalone):
- """Init"""
- standalone.open()
- self.standalone = standalone
-
-
@pytest.fixture(scope="module")
-def topology(request):
- """Create DS Deployment"""
-
- # Creating standalone instance ...
- if DEBUGGING:
- standalone = DirSrv(verbose=True)
- else:
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- """If we are debugging just stop the instances, otherwise remove
- them
- """
- if DEBUGGING:
- standalone.stop()
- else:
- standalone.delete()
-
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-(a)pytest.fixture(scope="module")
-def test_user(topology, request):
+def test_user(topology_st, request):
"""User for binding operation"""
log.info('Adding user {}'.format(TEST_USER_DN))
try:
- topology.standalone.add_s(Entry((TEST_USER_DN, {
- 'objectclass': 'top person'.split(),
- 'objectclass': 'organizationalPerson',
- 'objectclass': 'inetorgperson',
- 'cn': TEST_USER_NAME,
- 'sn': TEST_USER_NAME,
- 'userpassword': TEST_USER_PWD,
- 'mail': '%s(a)redhat.com' % TEST_USER_NAME,
- 'uid': TEST_USER_NAME
- })))
+ topology_st.standalone.add_s(Entry((TEST_USER_DN, {
+ 'objectclass': 'top person'.split(),
+ 'objectclass': 'organizationalPerson',
+ 'objectclass': 'inetorgperson',
+ 'cn': TEST_USER_NAME,
+ 'sn': TEST_USER_NAME,
+ 'userpassword': TEST_USER_PWD,
+ 'mail': '%s(a)redhat.com' % TEST_USER_NAME,
+ 'uid': TEST_USER_NAME
+ })))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (TEST_USER_DN,
e.message['desc']))
@@ -96,19 +57,20 @@ def test_user(topology, request):
def fin():
log.info('Deleting user {}'.format(TEST_USER_DN))
- topology.standalone.delete_s(TEST_USER_DN)
+ topology_st.standalone.delete_s(TEST_USER_DN)
+
request.addfinalizer(fin)
@pytest.fixture(scope="module")
-def password_policy(topology, test_user):
+def password_policy(topology_st, test_user):
"""Set up password policy for subtree and user"""
log.info('Enable fine-grained policy')
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
- 'nsslapd-pwpolicy-local',
- 'on')])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
+ 'nsslapd-pwpolicy-local',
+ 'on')])
except ldap.LDAPError as e:
log.error('Failed to set fine-grained policy: error {}'.format(
e.message['desc']))
@@ -116,7 +78,7 @@ def password_policy(topology, test_user):
log.info('Create password policy for subtree {}'.format(OU_PEOPLE))
try:
- subprocess.call(['%s/ns-newpwpolicy.pl' % topology.standalone.get_sbin_dir(),
+ subprocess.call(['%s/ns-newpwpolicy.pl' % topology_st.standalone.get_sbin_dir(),
'-D', DN_DM, '-w', PASSWORD,
'-p', str(PORT_STANDALONE), '-h', HOST_STANDALONE,
'-S', OU_PEOPLE, '-Z', SERVERID_STANDALONE])
@@ -127,18 +89,18 @@ def password_policy(topology, test_user):
log.info('Add pwdpolicysubentry attribute to {}'.format(OU_PEOPLE))
try:
- topology.standalone.modify_s(OU_PEOPLE, [(ldap.MOD_REPLACE,
- 'pwdpolicysubentry',
- PW_POLICY_CONT_PEOPLE)])
+ topology_st.standalone.modify_s(OU_PEOPLE, [(ldap.MOD_REPLACE,
+ 'pwdpolicysubentry',
+ PW_POLICY_CONT_PEOPLE)])
except ldap.LDAPError as e:
- log.error('Failed to pwdpolicysubentry pw policy '\
+ log.error('Failed to pwdpolicysubentry pw policy ' \
'policy for {}: error {}'.format(OU_PEOPLE,
e.message['desc']))
raise e
log.info('Create password policy for subtree {}'.format(TEST_USER_DN))
try:
- subprocess.call(['%s/ns-newpwpolicy.pl' % topology.standalone.get_sbin_dir(),
+ subprocess.call(['%s/ns-newpwpolicy.pl' % topology_st.standalone.get_sbin_dir(),
'-D', DN_DM, '-w', PASSWORD,
'-p', str(PORT_STANDALONE), '-h', HOST_STANDALONE,
'-U', TEST_USER_DN, '-Z', SERVERID_STANDALONE])
@@ -149,11 +111,11 @@ def password_policy(topology, test_user):
log.info('Add pwdpolicysubentry attribute to {}'.format(TEST_USER_DN))
try:
- topology.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_REPLACE,
- 'pwdpolicysubentry',
- PW_POLICY_CONT_USER)])
+ topology_st.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_REPLACE,
+ 'pwdpolicysubentry',
+ PW_POLICY_CONT_USER)])
except ldap.LDAPError as e:
- log.error('Failed to pwdpolicysubentry pw policy '\
+ log.error('Failed to pwdpolicysubentry pw policy ' \
'policy for {}: error {}'.format(TEST_USER_DN,
e.message['desc']))
raise e
@@ -163,7 +125,7 @@ def password_policy(topology, test_user):
[('on', 'off', ldap.UNWILLING_TO_PERFORM),
('off', 'off', ldap.UNWILLING_TO_PERFORM),
('off', 'on', None), ('on', 'on', None)])
-def test_change_pwd(topology, test_user, password_policy,
+def test_change_pwd(topology_st, test_user, password_policy,
subtree_pwchange, user_pwchange, exception):
"""Verify that 'passwordChange' attr works as expected
User should have a priority over a subtree.
@@ -186,24 +148,23 @@ def test_change_pwd(topology, test_user, password_policy,
log.info('Set passwordChange to "{}" - {}'.format(subtree_pwchange,
PW_POLICY_CONT_PEOPLE))
try:
- topology.standalone.modify_s(PW_POLICY_CONT_PEOPLE, [(ldap.MOD_REPLACE,
- 'passwordChange',
- subtree_pwchange)])
+ topology_st.standalone.modify_s(PW_POLICY_CONT_PEOPLE, [(ldap.MOD_REPLACE,
+ 'passwordChange',
+ subtree_pwchange)])
except ldap.LDAPError as e:
- log.error('Failed to set passwordChange '\
+ log.error('Failed to set passwordChange ' \
'policy for {}: error {}'.format(PW_POLICY_CONT_PEOPLE,
e.message['desc']))
raise e
-
log.info('Set passwordChange to "{}" - {}'.format(user_pwchange,
PW_POLICY_CONT_USER))
try:
- topology.standalone.modify_s(PW_POLICY_CONT_USER, [(ldap.MOD_REPLACE,
- 'passwordChange',
- user_pwchange)])
+ topology_st.standalone.modify_s(PW_POLICY_CONT_USER, [(ldap.MOD_REPLACE,
+ 'passwordChange',
+ user_pwchange)])
except ldap.LDAPError as e:
- log.error('Failed to set passwordChange '\
+ log.error('Failed to set passwordChange ' \
'policy for {}: error {}'.format(PW_POLICY_CONT_USER,
e.message['desc']))
raise e
@@ -211,29 +172,29 @@ def test_change_pwd(topology, test_user, password_policy,
try:
log.info('Bind as user and modify userPassword')
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
if exception:
with pytest.raises(exception):
- topology.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_REPLACE,
+ topology_st.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_REPLACE,
+ 'userPassword',
+ 'new_pass')])
+ else:
+ topology_st.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_REPLACE,
'userPassword',
'new_pass')])
- else:
- topology.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_REPLACE,
- 'userPassword',
- 'new_pass')])
except ldap.LDAPError as e:
log.error('Failed to change userpassword for {}: error {}'.format(
TEST_USER_DN, e.message['info']))
raise e
finally:
log.info('Bind as DM')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_REPLACE,
- 'userPassword',
- TEST_USER_PWD)])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_REPLACE,
+ 'userPassword',
+ TEST_USER_PWD)])
-def test_pwd_min_age(topology, test_user, password_policy):
+def test_pwd_min_age(topology_st, test_user, password_policy):
"""If we set passwordMinAge to some value, for example to 10, then it
should not allow the user to change the password within 10 seconds after
his previous change.
@@ -261,33 +222,33 @@ def test_pwd_min_age(topology, test_user, password_policy):
log.info('Set passwordminage to "{}" - {}'.format(num_seconds, PW_POLICY_CONT_PEOPLE))
try:
- topology.standalone.modify_s(PW_POLICY_CONT_PEOPLE, [(ldap.MOD_REPLACE,
- 'passwordminage',
- num_seconds)])
+ topology_st.standalone.modify_s(PW_POLICY_CONT_PEOPLE, [(ldap.MOD_REPLACE,
+ 'passwordminage',
+ num_seconds)])
except ldap.LDAPError as e:
- log.error('Failed to set passwordminage '\
+ log.error('Failed to set passwordminage ' \
'policy for {}: error {}'.format(PW_POLICY_CONT_PEOPLE,
e.message['desc']))
raise e
log.info('Set passwordminage to "{}" - {}'.format(num_seconds, PW_POLICY_CONT_USER))
try:
- topology.standalone.modify_s(PW_POLICY_CONT_USER, [(ldap.MOD_REPLACE,
- 'passwordminage',
- num_seconds)])
+ topology_st.standalone.modify_s(PW_POLICY_CONT_USER, [(ldap.MOD_REPLACE,
+ 'passwordminage',
+ num_seconds)])
except ldap.LDAPError as e:
- log.error('Failed to set passwordminage '\
+ log.error('Failed to set passwordminage ' \
'policy for {}: error {}'.format(PW_POLICY_CONT_USER,
e.message['desc']))
raise e
log.info('Set passwordminage to "{}" - {}'.format(num_seconds, DN_CONFIG))
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
- 'passwordminage',
- num_seconds)])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
+ 'passwordminage',
+ num_seconds)])
except ldap.LDAPError as e:
- log.error('Failed to set passwordminage '\
+ log.error('Failed to set passwordminage ' \
'policy for {}: error {}'.format(DN_CONFIG,
e.message['desc']))
raise e
@@ -295,10 +256,10 @@ def test_pwd_min_age(topology, test_user, password_policy):
try:
log.info('Bind as user and modify userPassword')
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
- topology.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_REPLACE,
- 'userPassword',
- 'new_pass')])
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
+ topology_st.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_REPLACE,
+ 'userPassword',
+ 'new_pass')])
except ldap.LDAPError as e:
log.error('Failed to change userpassword for {}: error {}'.format(
TEST_USER_DN, e.message['info']))
@@ -306,31 +267,31 @@ def test_pwd_min_age(topology, test_user, password_policy):
time.sleep(1)
log.info('Bind as user and modify userPassword straight away after previous change')
- topology.standalone.simple_bind_s(TEST_USER_DN, 'new_pass')
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, 'new_pass')
with pytest.raises(ldap.CONSTRAINT_VIOLATION):
- topology.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_REPLACE,
- 'userPassword',
- 'new_new_pass')])
+ topology_st.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_REPLACE,
+ 'userPassword',
+ 'new_new_pass')])
log.info('Wait {} second'.format(int(num_seconds) + 2))
time.sleep(int(num_seconds) + 2)
try:
log.info('Bind as user and modify userPassword')
- topology.standalone.simple_bind_s(TEST_USER_DN, 'new_pass')
- topology.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_REPLACE,
- 'userPassword',
- TEST_USER_PWD)])
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, 'new_pass')
+ topology_st.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_REPLACE,
+ 'userPassword',
+ TEST_USER_PWD)])
except ldap.LDAPError as e:
log.error('Failed to change userpassword for {}: error {}'.format(
TEST_USER_DN, e.message['info']))
raise e
finally:
log.info('Bind as DM')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_REPLACE,
- 'userPassword',
- TEST_USER_PWD)])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_REPLACE,
+ 'userPassword',
+ TEST_USER_PWD)])
if __name__ == '__main__':
diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py
index 67127e5..5a51124 100644
--- a/dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py
+++ b/dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py
@@ -19,7 +19,9 @@ from lib389 import DirSrvTools
from lib389.tools import DirSrvTools
from lib389._constants import *
from lib389.properties import *
+from lib389.topologies import topology_st
+logging.getLogger(__name__).setLevel(logging.INFO)
log = logging.getLogger(__name__)
CONFIG_DN = 'cn=config'
@@ -37,63 +39,23 @@ TEMP_USER = 'cn=test{}'
TEMP_USER_DN = '%s,%s' % (TEMP_USER, OU_PEOPLE)
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- """This fixture is used to standalone topology for the 'module'."""
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
@pytest.fixture(scope="module")
-def test_user(topology, request):
+def test_user(topology_st, request):
"""User for binding operation"""
log.info('Adding user {}'.format(BN))
try:
- topology.standalone.add_s(Entry((BN,
- {'objectclass': ['top',
- 'person',
- 'organizationalPerson',
- 'inetOrgPerson'],
- 'cn': 'bind user',
- 'sn': 'bind user',
- 'userPassword': PASSWORD})))
+ topology_st.standalone.add_s(Entry((BN,
+ {'objectclass': ['top',
+ 'person',
+ 'organizationalPerson',
+ 'inetOrgPerson'],
+ 'cn': 'bind user',
+ 'sn': 'bind user',
+ 'userPassword': PASSWORD})))
log.info('Adding an aci for the bind user')
BN_ACI = '(targetattr="*")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///%s";)' % BN
- topology.standalone.modify_s(OU_PEOPLE, [(ldap.MOD_ADD, 'aci', BN_ACI)])
+ topology_st.standalone.modify_s(OU_PEOPLE, [(ldap.MOD_ADD, 'aci', BN_ACI)])
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (BN,
@@ -102,13 +64,14 @@ def test_user(topology, request):
def fin():
log.info('Deleting user {}'.format(BN))
- topology.standalone.delete_s(BN)
- topology.standalone.modify_s(OU_PEOPLE, [(ldap.MOD_DELETE, 'aci', BN_ACI)])
+ topology_st.standalone.delete_s(BN)
+ topology_st.standalone.modify_s(OU_PEOPLE, [(ldap.MOD_DELETE, 'aci', BN_ACI)])
+
request.addfinalizer(fin)
@pytest.fixture(scope="module")
-def password_policy(topology, test_user):
+def password_policy(topology_st, test_user):
"""Set global password policy.
Then, set fine-grained subtree level password policy
to ou=People with no password syntax.
@@ -118,9 +81,9 @@ def password_policy(topology, test_user):
log.info('Enable fine-grained policy')
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
- 'nsslapd-pwpolicy-local',
- 'on')])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
+ 'nsslapd-pwpolicy-local',
+ 'on')])
except ldap.LDAPError as e:
log.error('Failed to set fine-grained policy: error {}'.format(
e.message['desc']))
@@ -128,7 +91,7 @@ def password_policy(topology, test_user):
log.info('Create password policy for subtree {}'.format(OU_PEOPLE))
try:
- subprocess.call(['%s/ns-newpwpolicy.pl' % topology.standalone.get_sbin_dir(),
+ subprocess.call(['%s/ns-newpwpolicy.pl' % topology_st.standalone.get_sbin_dir(),
'-D', DN_DM, '-w', PASSWORD,
'-p', str(PORT_STANDALONE), '-h', HOST_STANDALONE,
'-S', OU_PEOPLE, '-Z', SERVERID_STANDALONE])
@@ -139,32 +102,32 @@ def password_policy(topology, test_user):
log.info('Add pwdpolicysubentry attribute to {}'.format(OU_PEOPLE))
try:
- topology.standalone.modify_s(OU_PEOPLE, [(ldap.MOD_REPLACE,
- 'pwdpolicysubentry',
- PWP_CONTAINER_PEOPLE)])
+ topology_st.standalone.modify_s(OU_PEOPLE, [(ldap.MOD_REPLACE,
+ 'pwdpolicysubentry',
+ PWP_CONTAINER_PEOPLE)])
except ldap.LDAPError as e:
- log.error('Failed to pwdpolicysubentry pw policy '\
+ log.error('Failed to pwdpolicysubentry pw policy ' \
'policy for {}: error {}'.format(OU_PEOPLE,
e.message['desc']))
raise e
log.info("Set the default settings for the policy container.")
- topology.standalone.modify_s(PWP_CONTAINER_PEOPLE,
- [(ldap.MOD_REPLACE, 'passwordMustChange', 'off'),
- (ldap.MOD_REPLACE, 'passwordExp', 'off'),
- (ldap.MOD_REPLACE, 'passwordMinAge', '0'),
- (ldap.MOD_REPLACE, 'passwordChange', 'off'),
- (ldap.MOD_REPLACE, 'passwordStorageScheme', 'ssha')])
+ topology_st.standalone.modify_s(PWP_CONTAINER_PEOPLE,
+ [(ldap.MOD_REPLACE, 'passwordMustChange', 'off'),
+ (ldap.MOD_REPLACE, 'passwordExp', 'off'),
+ (ldap.MOD_REPLACE, 'passwordMinAge', '0'),
+ (ldap.MOD_REPLACE, 'passwordChange', 'off'),
+ (ldap.MOD_REPLACE, 'passwordStorageScheme', 'ssha')])
- check_attr_val(topology, CONFIG_DN, ATTR_INHERIT_GLOBAL, 'off')
- check_attr_val(topology, CONFIG_DN, ATTR_CHECK_SYNTAX, 'off')
+ check_attr_val(topology_st, CONFIG_DN, ATTR_INHERIT_GLOBAL, 'off')
+ check_attr_val(topology_st, CONFIG_DN, ATTR_CHECK_SYNTAX, 'off')
-def check_attr_val(topology, dn, attr, expected):
+def check_attr_val(topology_st, dn, attr, expected):
"""Check that entry has the value"""
try:
- centry = topology.standalone.search_s(dn, ldap.SCOPE_BASE, 'cn=*')
+ centry = topology_st.standalone.search_s(dn, ldap.SCOPE_BASE, 'cn=*')
assert centry[0], 'Failed to get %s' % dn
val = centry[0].getValue(attr)
@@ -179,7 +142,7 @@ def check_attr_val(topology, dn, attr, expected):
@pytest.mark.parametrize('inherit_value,checksyntax_value',
[('off', 'off'), ('on', 'off'), ('off', 'on')])
-def test_entry_has_no_restrictions(topology, password_policy, test_user,
+def test_entry_has_no_restrictions(topology_st, password_policy, test_user,
inherit_value, checksyntax_value):
"""Make sure an entry added to ou=people
has no password syntax restrictions when:
@@ -206,47 +169,47 @@ def test_entry_has_no_restrictions(topology, password_policy, test_user,
log.info('Set {} to {}'.format(ATTR_INHERIT_GLOBAL, inherit_value))
log.info('Set {} to {}'.format(ATTR_CHECK_SYNTAX, checksyntax_value))
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE,
- ATTR_INHERIT_GLOBAL, inherit_value)])
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE,
- ATTR_CHECK_SYNTAX, checksyntax_value)])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE,
+ ATTR_INHERIT_GLOBAL, inherit_value)])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE,
+ ATTR_CHECK_SYNTAX, checksyntax_value)])
# Wait a second for cn=config to apply
time.sleep(1)
- check_attr_val(topology, CONFIG_DN, ATTR_INHERIT_GLOBAL, inherit_value)
- check_attr_val(topology, CONFIG_DN, ATTR_CHECK_SYNTAX, checksyntax_value)
+ check_attr_val(topology_st, CONFIG_DN, ATTR_INHERIT_GLOBAL, inherit_value)
+ check_attr_val(topology_st, CONFIG_DN, ATTR_CHECK_SYNTAX, checksyntax_value)
log.info('Bind as test user')
- topology.standalone.simple_bind_s(BN, PASSWORD)
+ topology_st.standalone.simple_bind_s(BN, PASSWORD)
log.info('Make sure an entry added to ou=people has '
'no password syntax restrictions.')
try:
- topology.standalone.add_s(Entry((TEMP_USER_DN.format('0'),
- {'objectclass': ['top',
- 'person',
- 'organizationalPerson',
- 'inetOrgPerson'],
- 'cn': TEMP_USER.format('0'),
- 'sn': TEMP_USER.format('0'),
- 'userPassword': 'short'})))
+ topology_st.standalone.add_s(Entry((TEMP_USER_DN.format('0'),
+ {'objectclass': ['top',
+ 'person',
+ 'organizationalPerson',
+ 'inetOrgPerson'],
+ 'cn': TEMP_USER.format('0'),
+ 'sn': TEMP_USER.format('0'),
+ 'userPassword': 'short'})))
except ldap.LDAPError as e:
log.fatal('Failed to add cn=test0 with userPassword: short: ' +
e.message['desc'])
raise e
finally:
log.info('Bind as DM user')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
log.info('Remove {}'.format(TEMP_USER_DN.format('0')))
try:
- topology.standalone.delete_s(TEMP_USER_DN.format('0'))
+ topology_st.standalone.delete_s(TEMP_USER_DN.format('0'))
except ldap.NO_SUCH_OBJECT as e:
log.fatal('There is no {}, it is a problem'.format(TEMP_USER_DN.format('0')))
raise e
@pytest.mark.parametrize('container', [DN_CONFIG, PWP_CONTAINER_PEOPLE])
-def test_entry_has_restrictions(topology, password_policy, test_user, container):
+def test_entry_has_restrictions(topology_st, password_policy, test_user, container):
"""Set 'nsslapd-pwpolicy-inherit-global: on'
and 'passwordCheckSyntax: on'. Make sure that
syntax rules work, if set them at both: cn=config and
@@ -271,56 +234,56 @@ def test_entry_has_restrictions(topology, password_policy, test_user, container)
log.info('Set {} to {}'.format(ATTR_INHERIT_GLOBAL, 'on'))
log.info('Set {} to {}'.format(ATTR_CHECK_SYNTAX, 'on'))
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE,
- ATTR_INHERIT_GLOBAL, 'on')])
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE,
- ATTR_CHECK_SYNTAX, 'on')])
- topology.standalone.modify_s(container, [(ldap.MOD_REPLACE,
- 'passwordMinLength' , '9')])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE,
+ ATTR_INHERIT_GLOBAL, 'on')])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE,
+ ATTR_CHECK_SYNTAX, 'on')])
+ topology_st.standalone.modify_s(container, [(ldap.MOD_REPLACE,
+ 'passwordMinLength', '9')])
# Wait a second for cn=config to apply
time.sleep(1)
- check_attr_val(topology, CONFIG_DN, ATTR_INHERIT_GLOBAL, 'on')
- check_attr_val(topology, CONFIG_DN, ATTR_CHECK_SYNTAX, 'on')
+ check_attr_val(topology_st, CONFIG_DN, ATTR_INHERIT_GLOBAL, 'on')
+ check_attr_val(topology_st, CONFIG_DN, ATTR_CHECK_SYNTAX, 'on')
log.info('Bind as test user')
- topology.standalone.simple_bind_s(BN, PASSWORD)
+ topology_st.standalone.simple_bind_s(BN, PASSWORD)
log.info('Try to add user with a short password (<9)')
with pytest.raises(ldap.CONSTRAINT_VIOLATION):
- topology.standalone.add_s(Entry((TEMP_USER_DN.format('0'),
- {'objectclass': ['top',
- 'person',
- 'organizationalPerson',
- 'inetOrgPerson'],
- 'cn': TEMP_USER.format('0'),
- 'sn': TEMP_USER.format('0'),
- 'userPassword': 'short'})))
+ topology_st.standalone.add_s(Entry((TEMP_USER_DN.format('0'),
+ {'objectclass': ['top',
+ 'person',
+ 'organizationalPerson',
+ 'inetOrgPerson'],
+ 'cn': TEMP_USER.format('0'),
+ 'sn': TEMP_USER.format('0'),
+ 'userPassword': 'short'})))
log.info('Try to add user with a long password (>9)')
try:
- topology.standalone.add_s(Entry((TEMP_USER_DN.format('1'),
- {'objectclass': ['top',
- 'person',
- 'organizationalPerson',
- 'inetOrgPerson'],
- 'cn': TEMP_USER.format('1'),
- 'sn': TEMP_USER.format('1'),
- 'userPassword': 'Reallylong1'})))
+ topology_st.standalone.add_s(Entry((TEMP_USER_DN.format('1'),
+ {'objectclass': ['top',
+ 'person',
+ 'organizationalPerson',
+ 'inetOrgPerson'],
+ 'cn': TEMP_USER.format('1'),
+ 'sn': TEMP_USER.format('1'),
+ 'userPassword': 'Reallylong1'})))
except ldap.LDAPError as e:
log.fatal('Failed to add cn=test1 with userPassword: short: '
+ e.message['desc'])
raise e
finally:
log.info('Bind as DM user')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
log.info('Remove {}'.format(TEMP_USER_DN.format('0')))
try:
- topology.standalone.delete_s(TEMP_USER_DN.format('0'))
+ topology_st.standalone.delete_s(TEMP_USER_DN.format('0'))
except ldap.NO_SUCH_OBJECT as e:
log.info('There is no {}, it is okay'.format(TEMP_USER_DN.format('0')))
try:
- topology.standalone.delete_s(TEMP_USER_DN.format('1'))
+ topology_st.standalone.delete_s(TEMP_USER_DN.format('1'))
except ldap.NO_SUCH_OBJECT as e:
log.fatal('There is no {}, it is a problem'.format(TEMP_USER_DN.format('1')))
raise e
diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py
index aad4e40..7336c4d 100644
--- a/dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py
+++ b/dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -15,74 +15,25 @@ from lib389 import DirSrv, Entry
from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-DEBUGGING = False
-
USER_DN = 'uid=user,ou=People,%s' % DEFAULT_SUFFIX
-if DEBUGGING:
- logging.getLogger(__name__).setLevel(logging.DEBUG)
-else:
- logging.getLogger(__name__).setLevel(logging.INFO)
-
-
+logging.getLogger(__name__).setLevel(logging.INFO)
log = logging.getLogger(__name__)
-class TopologyStandalone(object):
- """The DS Topology Class"""
- def __init__(self, standalone):
- """Init"""
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- """Create DS Deployment"""
-
- # Creating standalone instance ...
- if DEBUGGING:
- standalone = DirSrv(verbose=True)
- else:
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- """If we are debugging just stop the instances, otherwise remove
- them
- """
- if DEBUGGING:
- standalone.stop()
- else:
- standalone.delete()
-
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
def _create_user(inst):
"""Create the test user."""
inst.add_s(Entry((
- USER_DN, {
- 'objectClass': 'top account simplesecurityobject'.split(),
- 'uid': 'user',
- 'userpassword': PASSWORD
- })))
+ USER_DN, {
+ 'objectClass': 'top account simplesecurityobject'.split(),
+ 'uid': 'user',
+ 'userpassword': PASSWORD
+ })))
def setPolicy(inst, attr, value):
@@ -129,7 +80,7 @@ def resetPasswd(inst):
# Now set the password
try:
inst.modify_s(USER_DN,
- [(ldap.MOD_REPLACE, 'userpassword', PASSWORD)])
+ [(ldap.MOD_REPLACE, 'userpassword', PASSWORD)])
except ldap.LDAPError as e:
log.fatal("Failed to reset user password: " + str(e))
assert False
@@ -145,7 +96,7 @@ def tryPassword(inst, policy_attr, value, reset_value, pw_bad, pw_good, msg):
setPolicy(inst, policy_attr, value)
try:
inst.modify_s(USER_DN,
- [(ldap.MOD_REPLACE, 'userpassword', pw_bad)])
+ [(ldap.MOD_REPLACE, 'userpassword', pw_bad)])
log.fatal('Invalid password was unexpectedly accepted (%s)' %
(policy_attr))
assert False
@@ -160,7 +111,7 @@ def tryPassword(inst, policy_attr, value, reset_value, pw_bad, pw_good, msg):
# Change password that is allowed
try:
inst.modify_s(USER_DN,
- [(ldap.MOD_REPLACE, 'userpassword', pw_good)])
+ [(ldap.MOD_REPLACE, 'userpassword', pw_good)])
except ldap.LDAPError as e:
log.fatal("Failed to change password: " + str(e))
assert False
@@ -170,46 +121,46 @@ def tryPassword(inst, policy_attr, value, reset_value, pw_bad, pw_good, msg):
setPolicy(inst, policy_attr, reset_value)
-def test_pwdPolicy_syntax(topology):
+def test_pwdPolicy_syntax(topology_st):
'''
Password policy test: Ensure that on a password change, the policy syntax
is enforced correctly.
'''
# Create a user
- _create_user(topology.standalone)
+ _create_user(topology_st.standalone)
# Set the password policy globally
- topology.standalone.config.set('passwordCheckSyntax', 'on')
- topology.standalone.config.set('nsslapd-pwpolicy-local', 'off')
- topology.standalone.config.set('passwordMinCategories', '1')
+ topology_st.standalone.config.set('passwordCheckSyntax', 'on')
+ topology_st.standalone.config.set('nsslapd-pwpolicy-local', 'off')
+ topology_st.standalone.config.set('passwordMinCategories', '1')
#
# Test each syntax catagory
#
# Min Length
- tryPassword(topology.standalone, 'passwordMinLength', 10, 2, 'passwd',
+ tryPassword(topology_st.standalone, 'passwordMinLength', 10, 2, 'passwd',
'password123', 'length too short')
# Min Digit
- tryPassword(topology.standalone, 'passwordMinDigits', 2, 0, 'passwd',
+ tryPassword(topology_st.standalone, 'passwordMinDigits', 2, 0, 'passwd',
'password123', 'does not contain minimum number of digits')
# Min Alphas
- tryPassword(topology.standalone, 'passwordMinAlphas', 2, 0, 'p123456789',
+ tryPassword(topology_st.standalone, 'passwordMinAlphas', 2, 0, 'p123456789',
'password123', 'does not contain minimum number of alphas')
# Max Repeats
- tryPassword(topology.standalone, 'passwordMaxRepeats', 2, 0, 'passsword',
+ tryPassword(topology_st.standalone, 'passwordMaxRepeats', 2, 0, 'passsword',
'pasword123', 'too many repeating characters')
# Min Specials
- tryPassword(topology.standalone, 'passwordMinSpecials', 2, 0, 'passwd',
+ tryPassword(topology_st.standalone, 'passwordMinSpecials', 2, 0, 'passwd',
'password_#$',
'does not contain minimum number of special characters')
# Min Lowers
- tryPassword(topology.standalone, 'passwordMinLowers', 2, 0, 'PASSWORD123',
+ tryPassword(topology_st.standalone, 'passwordMinLowers', 2, 0, 'PASSWORD123',
'password123',
'does not contain minimum number of lowercase characters')
# Min Uppers
- tryPassword(topology.standalone, 'passwordMinUppers', 2, 0, 'password',
+ tryPassword(topology_st.standalone, 'passwordMinUppers', 2, 0, 'password',
'PASSWORD',
'does not contain minimum number of lowercase characters')
# Min 8-bits - "ldap" package only accepts ascii strings at the moment
diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py
index f3e57f4..fd0236c 100644
--- a/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py
+++ b/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py
@@ -1,3 +1,11 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2016 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
import os
import sys
import time
@@ -12,69 +20,18 @@ from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
-DEBUGGING = False
CONFIG_ATTR = 'passwordSendExpiringTime'
USER_DN = 'uid=tuser,{:s}'.format(DEFAULT_SUFFIX)
USER_PASSWD = 'secret123'
-if DEBUGGING:
- logging.getLogger(__name__).setLevel(logging.DEBUG)
-else:
- logging.getLogger(__name__).setLevel(logging.INFO)
-
+logging.getLogger(__name__).setLevel(logging.INFO)
log = logging.getLogger(__name__)
-class TopologyStandalone(object):
- """The DS Topology Class"""
- def __init__(self, standalone):
- """Init"""
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- """Create DS Deployment"""
-
- # Creating standalone instance ...
- if DEBUGGING:
- standalone = DirSrv(verbose=True)
- else:
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- """If we are debugging just stop the instances, otherwise remove
- them
- """
-
- if DEBUGGING:
- standalone.stop()
- else:
- standalone.delete()
-
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-
@pytest.fixture
-def global_policy(topology, request):
+def global_policy(topology_st, request):
"""Sets the required global
password policy attributes under
cn=config entry
@@ -86,21 +43,21 @@ def global_policy(topology, request):
CONFIG_ATTR: ''}
try:
log.info('Get the default values')
- entry = topology.standalone.getEntry(DN_CONFIG, ldap.SCOPE_BASE,
- '(objectClass=*)', attrs.keys())
+ entry = topology_st.standalone.getEntry(DN_CONFIG, ldap.SCOPE_BASE,
+ '(objectClass=*)', attrs.keys())
for key in attrs.keys():
attrs[key] = entry.getValue(key)
log.info('Set the new values')
- topology.standalone.modify_s(DN_CONFIG, [
- (ldap.MOD_REPLACE, 'passwordExp', 'on'),
- (ldap.MOD_REPLACE, 'passwordMaxAge', '172800'),
- (ldap.MOD_REPLACE, 'passwordWarning', '86400'),
- (ldap.MOD_REPLACE, CONFIG_ATTR, 'on')])
+ topology_st.standalone.modify_s(DN_CONFIG, [
+ (ldap.MOD_REPLACE, 'passwordExp', 'on'),
+ (ldap.MOD_REPLACE, 'passwordMaxAge', '172800'),
+ (ldap.MOD_REPLACE, 'passwordWarning', '86400'),
+ (ldap.MOD_REPLACE, CONFIG_ATTR, 'on')])
except ldap.LDAPError as ex:
- log.error("Failed to set global password policy, error:{:s}"\
+ log.error("Failed to set global password policy, error:{:s}" \
.format(ex.message['desc']))
raise ex
@@ -110,11 +67,12 @@ def global_policy(topology, request):
try:
log.info('Reset the defaults')
for key in attrs.keys():
- topology.standalone.modify_s(DN_CONFIG, [
+ topology_st.standalone.modify_s(DN_CONFIG, [
(ldap.MOD_REPLACE, key, attrs[key])])
except ldap.LDAPError as ex:
log.error("Failed to set defaults, error:{:s}".format(ex.message['desc']))
raise ex
+
request.addfinalizer(fin)
# A short sleep is required after the modifying password policy or cn=config
@@ -122,7 +80,7 @@ def global_policy(topology, request):
@pytest.fixture
-def global_policy_default(topology, request):
+def global_policy_default(topology_st, request):
"""Sets the required global password policy
attributes for testing the default behavior
of password expiry warning time
@@ -131,22 +89,22 @@ def global_policy_default(topology, request):
attrs = {'passwordExp': '',
'passwordMaxAge': '',
'passwordWarning': '',
- CONFIG_ATTR : ''}
+ CONFIG_ATTR: ''}
try:
log.info('Get the default values')
- entry = topology.standalone.getEntry(DN_CONFIG, ldap.SCOPE_BASE,
- '(objectClass=*)', attrs.keys())
+ entry = topology_st.standalone.getEntry(DN_CONFIG, ldap.SCOPE_BASE,
+ '(objectClass=*)', attrs.keys())
for key in attrs.keys():
attrs[key] = entry.getValue(key)
log.info('Set the new values')
- topology.standalone.modify_s(DN_CONFIG, [
- (ldap.MOD_REPLACE, 'passwordExp', 'on'),
- (ldap.MOD_REPLACE, 'passwordMaxAge', '86400'),
- (ldap.MOD_REPLACE, 'passwordWarning', '86400'),
- (ldap.MOD_REPLACE, CONFIG_ATTR, 'off')])
+ topology_st.standalone.modify_s(DN_CONFIG, [
+ (ldap.MOD_REPLACE, 'passwordExp', 'on'),
+ (ldap.MOD_REPLACE, 'passwordMaxAge', '86400'),
+ (ldap.MOD_REPLACE, 'passwordWarning', '86400'),
+ (ldap.MOD_REPLACE, CONFIG_ATTR, 'off')])
except ldap.LDAPError as ex:
- log.error("Failed to set global password policy, error:{:s}"\
+ log.error("Failed to set global password policy, error:{:s}" \
.format(ex.message['desc']))
raise ex
@@ -156,13 +114,14 @@ def global_policy_default(topology, request):
log.info('Reset the defaults')
try:
for key in attrs.keys():
- topology.standalone.modify_s(DN_CONFIG, [
- (ldap.MOD_REPLACE, key, attrs[key])
- ])
+ topology_st.standalone.modify_s(DN_CONFIG, [
+ (ldap.MOD_REPLACE, key, attrs[key])
+ ])
except ldap.LDAPError as ex:
- log.error("Failed to reset defaults, error:{:s}"\
+ log.error("Failed to reset defaults, error:{:s}" \
.format(ex.message['desc']))
raise ex
+
request.addfinalizer(fin)
# A short sleep is required after modifying password policy or cn=config
@@ -170,7 +129,7 @@ def global_policy_default(topology, request):
@pytest.fixture
-def add_user(topology, request):
+def add_user(topology_st, request):
"""Adds a user for binding"""
user_data = {'objectClass': 'top person inetOrgPerson'.split(),
@@ -181,7 +140,7 @@ def add_user(topology, request):
log.info('Add the user')
try:
- topology.standalone.add_s(Entry((USER_DN, user_data)))
+ topology_st.standalone.add_s(Entry((USER_DN, user_data)))
except ldap.LDAPError as ex:
log.error("Failed to add user, error:{:s}".format(ex.message['desc']))
raise ex
@@ -191,27 +150,28 @@ def add_user(topology, request):
log.info('Remove the user entry')
try:
- topology.standalone.delete_s(USER_DN)
+ topology_st.standalone.delete_s(USER_DN)
except ldap.LDAPError as ex:
- log.error("Failed to remove user, error:{:s}"\
+ log.error("Failed to remove user, error:{:s}" \
.format(ex.message['desc']))
raise ex
+
request.addfinalizer(fin)
@pytest.fixture
-def local_policy(topology, add_user):
+def local_policy(topology_st, add_user):
"""Sets fine grained policy for user entry"""
log.info("Setting fine grained policy for user ({:s})".format(USER_DN))
try:
- subprocess.call(['%s/ns-newpwpolicy.pl' % topology.standalone.get_sbin_dir(),
+ subprocess.call(['%s/ns-newpwpolicy.pl' % topology_st.standalone.get_sbin_dir(),
'-D', DN_DM,
'-w', PASSWORD, '-h', HOST_STANDALONE,
'-p', str(PORT_STANDALONE), '-U', USER_DN,
'-Z', SERVERID_STANDALONE])
except subprocess.CalledProcessError as ex:
- log.error("Failed to set fine grained policy, error:{:s}"\
+ log.error("Failed to set fine grained policy, error:{:s}" \
.format(str(ex)))
raise ex
@@ -219,7 +179,7 @@ def local_policy(topology, add_user):
time.sleep(0.5)
-def get_password_warning(topology):
+def get_password_warning(topology_st):
"""Gets the password expiry warning time for the user"""
res_type = res_data = res_msgid = res_ctrls = None
@@ -227,31 +187,31 @@ def get_password_warning(topology):
log.info('Bind with the user and request the password expiry warning time')
try:
- result_id = topology.standalone.simple_bind(USER_DN, USER_PASSWD,
- serverctrls = [PasswordPolicyControl()])
- res_type, res_data, res_msgid, res_ctrls =\
- topology.standalone.result3(result_id)
+ result_id = topology_st.standalone.simple_bind(USER_DN, USER_PASSWD,
+ serverctrls=[PasswordPolicyControl()])
+ res_type, res_data, res_msgid, res_ctrls = \
+ topology_st.standalone.result3(result_id)
# This exception will be thrown when the user's password has expired
except ldap.INVALID_CREDENTIALS as ex:
raise ex
except ldap.LDAPError as ex:
- log.error("Failed to get password expiry warning time, error:{:s}"\
- .format(ex.message['desc']))
+ log.error("Failed to get password expiry warning time, error:{:s}" \
+ .format(ex.message['desc']))
raise ex
# Return the control
return res_ctrls
-def set_conf_attr(topology, attr, val):
+def set_conf_attr(topology_st, attr, val):
"""Sets the value of a given attribute under cn=config"""
log.info("Setting {:s} to {:s}".format(attr, val))
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, attr, val)])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, attr, val)])
except ldap.LDAPError as ex:
- log.error("Failed to set {:s} to {:s} error:{:s}"\
+ log.error("Failed to set {:s} to {:s} error:{:s}" \
.format(attr, val, ex.message['desc']))
raise ex
@@ -259,17 +219,17 @@ def set_conf_attr(topology, attr, val):
time.sleep(0.5)
-def get_conf_attr(topology, attr):
+def get_conf_attr(topology_st, attr):
"""Gets the value of a given
attribute under cn=config entry
"""
try:
- entry = topology.standalone.getEntry(DN_CONFIG, ldap.SCOPE_BASE,
- '(objectClass=*)', [attr])
+ entry = topology_st.standalone.getEntry(DN_CONFIG, ldap.SCOPE_BASE,
+ '(objectClass=*)', [attr])
val = entry.getValue(attr)
except ldap.LDAPError as ex:
- log.error("Failed to get the value of {:s}, error:{:s}"\
+ log.error("Failed to get the value of {:s}, error:{:s}" \
.format(attr, ex.message['desc']))
raise ex
@@ -277,8 +237,8 @@ def get_conf_attr(topology, attr):
return val
-(a)pytest.mark.parametrize("value", (' ' , 'junk123', 'on', 'off'))
-def test_different_values(topology, value):
+(a)pytest.mark.parametrize("value", (' ', 'junk123', 'on', 'off'))
+def test_different_values(topology_st, value):
"""Try to set passwordSendExpiringTime attribute
to various values both valid and invalid
@@ -298,31 +258,31 @@ def test_different_values(topology, value):
"""
log.info('Get the default value')
- defval = get_conf_attr(topology, CONFIG_ATTR)
+ defval = get_conf_attr(topology_st, CONFIG_ATTR)
if value not in ('on', 'off'):
log.info('An invalid value is being tested')
with pytest.raises(ldap.OPERATIONS_ERROR):
- set_conf_attr(topology, CONFIG_ATTR, value)
+ set_conf_attr(topology_st, CONFIG_ATTR, value)
log.info('Now check the value is unchanged')
- assert get_conf_attr(topology, CONFIG_ATTR) == defval
+ assert get_conf_attr(topology_st, CONFIG_ATTR) == defval
log.info("Invalid value {:s} was rejected correctly".format(value))
else:
log.info('A valid value is being tested')
- set_conf_attr(topology, CONFIG_ATTR, value)
+ set_conf_attr(topology_st, CONFIG_ATTR, value)
log.info('Now check that the value has been changed')
- assert get_conf_attr(topology, CONFIG_ATTR) == value
+ assert get_conf_attr(topology_st, CONFIG_ATTR) == value
log.info("{:s} is now set to {:s}".format(CONFIG_ATTR, value))
log.info('Set passwordSendExpiringTime back to the default value')
- set_conf_attr(topology, CONFIG_ATTR, defval)
+ set_conf_attr(topology_st, CONFIG_ATTR, defval)
-def test_expiry_time(topology, global_policy, add_user):
+def test_expiry_time(topology_st, global_policy, add_user):
"""Test whether the password expiry warning
time for a user is returned appropriately
@@ -346,23 +306,23 @@ def test_expiry_time(topology, global_policy, add_user):
res_ctrls = None
try:
log.info('Get the password expiry warning time')
- log.info("Binding with ({:s}) and requesting the password expiry warning time"\
+ log.info("Binding with ({:s}) and requesting the password expiry warning time" \
.format(USER_DN))
- res_ctrls = get_password_warning(topology)
+ res_ctrls = get_password_warning(topology_st)
log.info('Check whether the time is returned')
assert res_ctrls
- log.info("user's password will expire in {:d} seconds"\
+ log.info("user's password will expire in {:d} seconds" \
.format(res_ctrls[0].timeBeforeExpiration))
finally:
log.info("Rebinding as DM")
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
-(a)pytest.mark.parametrize("attr,val",[(CONFIG_ATTR, 'off'),
- ('passwordWarning', '3600')])
-def test_password_warning(topology, global_policy, add_user, attr, val):
+(a)pytest.mark.parametrize("attr,val", [(CONFIG_ATTR, 'off'),
+ ('passwordWarning', '3600')])
+def test_password_warning(topology_st, global_policy, add_user, attr, val):
"""Test password expiry warning time by
setting passwordSendExpiringTime to off
and setting passwordWarning to a short value
@@ -393,25 +353,25 @@ def test_password_warning(topology, global_policy, add_user, attr, val):
try:
log.info('Set configuration parameter')
- set_conf_attr(topology, attr, val)
+ set_conf_attr(topology_st, attr, val)
- log.info("Binding with ({:s}) and requesting password expiry warning time"\
+ log.info("Binding with ({:s}) and requesting password expiry warning time" \
.format(USER_DN))
- res_ctrls = get_password_warning(topology)
+ res_ctrls = get_password_warning(topology_st)
log.info('Check the state of the control')
if not res_ctrls:
- log.info("Password Expiry warning time is not returned as {:s} is set to {:s}"\
+ log.info("Password Expiry warning time is not returned as {:s} is set to {:s}" \
.format(attr, val))
else:
- log.info("({:s}) password will expire in {:d} seconds"\
+ log.info("({:s}) password will expire in {:d} seconds" \
.format(USER_DN, res_ctrls[0].timeBeforeExpiration))
finally:
log.info("Rebinding as DM")
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
-def test_with_different_password_states(topology, global_policy, add_user):
+def test_with_different_password_states(topology_st, global_policy, add_user):
"""Test the control with different password states
:Feature: Password Expiry Warning Time
@@ -440,17 +400,17 @@ def test_with_different_password_states(topology, global_policy, add_user):
res_ctrls = None
try:
- log.info("Expiring user's password by moving the"\
+ log.info("Expiring user's password by moving the" \
" system date past the valid period")
subprocess.check_call(['/usr/bin/date', '-s', '+30 day'])
log.info('Wait for the server to pick up new date')
time.sleep(5)
- log.info("Attempting to bind with user {:s} and retrive the password"\
+ log.info("Attempting to bind with user {:s} and retrive the password" \
" expiry warning time".format(USER_DN))
with pytest.raises(ldap.INVALID_CREDENTIALS) as ex:
- res_ctrls = get_password_warning(topology)
+ res_ctrls = get_password_warning(topology_st)
log.info("Bind Failed, error: {:s}".format(str(ex)))
@@ -460,21 +420,21 @@ def test_with_different_password_states(topology, global_policy, add_user):
log.info('Wait for the server to pick up new date')
time.sleep(5)
- log.info("Rebinding with {:s} and retrieving the password"\
+ log.info("Rebinding with {:s} and retrieving the password" \
" expiry warning time".format(USER_DN))
- res_ctrls = get_password_warning(topology)
+ res_ctrls = get_password_warning(topology_st)
log.info('Check that the control is returned')
assert res_ctrls
- log.info("user's password will expire in {:d} seconds"\
+ log.info("user's password will expire in {:d} seconds" \
.format(res_ctrls[0].timeBeforeExpiration))
finally:
log.info("Rebinding as DM")
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
-def test_default_behavior(topology, global_policy_default, add_user):
+def test_default_behavior(topology_st, global_policy_default, add_user):
"""Test the default behavior of password
expiry warning time
@@ -498,22 +458,22 @@ def test_default_behavior(topology, global_policy_default, add_user):
res_ctrls = None
try:
- log.info("Binding with {:s} and requesting the password expiry warning time"\
+ log.info("Binding with {:s} and requesting the password expiry warning time" \
.format(USER_DN))
- res_ctrls = get_password_warning(topology)
+ res_ctrls = get_password_warning(topology_st)
log.info('Check that control is returned even'
'if passwordSendExpiringTime is set to off')
assert res_ctrls
- log.info("user's password will expire in {:d} seconds"\
+ log.info("user's password will expire in {:d} seconds" \
.format(res_ctrls[0].timeBeforeExpiration))
finally:
log.info("Rebinding as DM")
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
-def test_with_local_policy(topology, global_policy, local_policy):
+def test_with_local_policy(topology_st, global_policy, local_policy):
"""Test the attribute with fine grained policy
set for the user
@@ -540,9 +500,9 @@ def test_with_local_policy(topology, global_policy, local_policy):
res_ctrls = None
try:
- log.info("Attempting to get password expiry warning time for"\
+ log.info("Attempting to get password expiry warning time for" \
" user {:s}".format(USER_DN))
- res_ctrls = get_password_warning(topology)
+ res_ctrls = get_password_warning(topology_st)
log.info('Check that the control is not returned')
assert not res_ctrls
@@ -550,7 +510,7 @@ def test_with_local_policy(topology, global_policy, local_policy):
log.info("Password expiry warning time is not returned")
finally:
log.info("Rebinding as DM")
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
if __name__ == '__main__':
diff --git a/dirsrvtests/tests/suites/password/pwd_algo_test.py b/dirsrvtests/tests/suites/password/pwd_algo_test.py
index aa8cbf5..b31f1ec 100644
--- a/dirsrvtests/tests/suites/password/pwd_algo_test.py
+++ b/dirsrvtests/tests/suites/password/pwd_algo_test.py
@@ -1,3 +1,11 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2016 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
import os
import sys
import time
@@ -10,64 +18,14 @@ from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
-DEBUGGING = True
USER_DN = 'uid=user,ou=People,%s' % DEFAULT_SUFFIX
-if DEBUGGING:
- logging.getLogger(__name__).setLevel(logging.DEBUG)
-else:
- logging.getLogger(__name__).setLevel(logging.INFO)
-
-
+logging.getLogger(__name__).setLevel(logging.INFO)
log = logging.getLogger(__name__)
-class TopologyStandalone(object):
- """The DS Topology Class"""
- def __init__(self, standalone):
- """Init"""
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- """Create DS Deployment"""
-
- # Creating standalone instance ...
- if DEBUGGING:
- standalone = DirSrv(verbose=True)
- else:
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- """If we are debugging just stop the instances, otherwise remove
- them
- """
- if DEBUGGING:
- standalone.stop()
- else:
- standalone.delete()
-
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
def _test_bind(inst, password):
result = True
userconn = ldap.initialize("ldap://%s:%s" % (HOST_STANDALONE, PORT_STANDALONE))
@@ -78,59 +36,52 @@ def _test_bind(inst, password):
result = False
return result
+
def _test_algo(inst, algo_name):
inst.config.set('passwordStorageScheme', algo_name)
- if DEBUGGING:
- print('Testing %s', algo_name)
-
# Create the user with a password
inst.add_s(Entry((
- USER_DN, {
- 'objectClass': 'top account simplesecurityobject'.split(),
- 'uid': 'user',
- 'userpassword': 'Secret123'
- })))
+ USER_DN, {
+ 'objectClass': 'top account simplesecurityobject'.split(),
+ 'uid': 'user',
+ 'userpassword': 'Secret123'
+ })))
# Make sure when we read the userPassword field, it is the correct ALGO
- pw_field = inst.search_s(USER_DN, ldap.SCOPE_BASE, '(objectClass=*)', ['userPassword'] )[0]
-
- if DEBUGGING:
- print(pw_field.getValue('userPassword'))
+ pw_field = inst.search_s(USER_DN, ldap.SCOPE_BASE, '(objectClass=*)', ['userPassword'])[0]
if algo_name != 'CLEAR':
- assert(algo_name.lower() in pw_field.getValue('userPassword').lower())
+ assert (algo_name.lower() in pw_field.getValue('userPassword').lower())
# Now make sure a bind works
- assert(_test_bind(inst, 'Secret123'))
+ assert (_test_bind(inst, 'Secret123'))
# Bind with a wrong shorter password, should fail
- assert(not _test_bind(inst, 'Wrong'))
+ assert (not _test_bind(inst, 'Wrong'))
# Bind with a wrong longer password, should fail
- assert(not _test_bind(inst, 'This is even more wrong'))
+ assert (not _test_bind(inst, 'This is even more wrong'))
# Bind with a wrong exact length password.
- assert(not _test_bind(inst, 'Alsowrong'))
+ assert (not _test_bind(inst, 'Alsowrong'))
# Bind with a subset password, should fail
- assert(not _test_bind(inst, 'Secret'))
+ assert (not _test_bind(inst, 'Secret'))
if algo_name != 'CRYPT':
# Bind with a subset password that is 1 char shorter, to detect off by 1 in clear
- assert(not _test_bind(inst, 'Secret12'))
+ assert (not _test_bind(inst, 'Secret12'))
# Bind with a superset password, should fail
- assert(not _test_bind(inst, 'Secret123456'))
+ assert (not _test_bind(inst, 'Secret123456'))
# Delete the user
inst.delete_s(USER_DN)
# done!
-def test_pwd_algo_test(topology):
- """
- Assert that all of our password algorithms correctly PASS and FAIL varying
- password conditions.
+def test_pwd_algo_test(topology_st):
+ """Assert that all of our password algorithms correctly PASS and FAIL varying
+ password conditions.
"""
- if DEBUGGING:
- # Add debugging steps(if any)...
- pass
- for algo in ('CLEAR', 'CRYPT', 'MD5', 'SHA', 'SHA256', 'SHA384', 'SHA512', 'SMD5', 'SSHA', 'SSHA256', 'SSHA384', 'SSHA512'):
- _test_algo(topology.standalone, algo)
+ for algo in (
+ 'CLEAR', 'CRYPT', 'MD5', 'SHA', 'SHA256', 'SHA384', 'SHA512', 'SMD5', 'SSHA', 'SSHA256', 'SSHA384',
+ 'SSHA512'):
+ _test_algo(topology_st.standalone, algo)
log.info('Test PASSED')
@@ -140,4 +91,3 @@ if __name__ == '__main__':
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
diff --git a/dirsrvtests/tests/suites/password/pwp_history_test.py b/dirsrvtests/tests/suites/password/pwp_history_test.py
index d4d3e60..31d48f9 100644
--- a/dirsrvtests/tests/suites/password/pwp_history_test.py
+++ b/dirsrvtests/tests/suites/password/pwp_history_test.py
@@ -1,3 +1,11 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2016 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
import os
import ldap
import logging
@@ -8,50 +16,13 @@ from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-class TopologyStandalone(object):
- """ Topology class """
- def __init__(self, standalone):
- """ init """
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- """
- Creating standalone instance ...
- """
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- """ Clean up instance """
- standalone.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-
-def test_pwp_history_test(topology):
+def test_pwp_history_test(topology_st):
"""
Test password policy history feature:
- Test password history is enforced
@@ -65,15 +36,15 @@ def test_pwp_history_test(topology):
# Configure password history policy and add a test user
#
try:
- topology.standalone.modify_s("cn=config",
- [(ldap.MOD_REPLACE,
- 'passwordHistory', 'on'),
- (ldap.MOD_REPLACE,
- 'passwordInHistory', '3'),
- (ldap.MOD_REPLACE,
- 'passwordChange', 'on'),
- (ldap.MOD_REPLACE,
- 'passwordStorageScheme', 'CLEAR')])
+ topology_st.standalone.modify_s("cn=config",
+ [(ldap.MOD_REPLACE,
+ 'passwordHistory', 'on'),
+ (ldap.MOD_REPLACE,
+ 'passwordInHistory', '3'),
+ (ldap.MOD_REPLACE,
+ 'passwordChange', 'on'),
+ (ldap.MOD_REPLACE,
+ 'passwordStorageScheme', 'CLEAR')])
log.info('Configured password policy.')
except ldap.LDAPError as e:
log.fatal('Failed to configure password policy: ' + str(e))
@@ -81,12 +52,12 @@ def test_pwp_history_test(topology):
time.sleep(1)
try:
- topology.standalone.add_s(Entry((USER_DN, {
- 'objectclass': ['top', 'extensibleObject'],
- 'sn': 'user',
- 'cn': 'test user',
- 'uid': 'testuser',
- 'userpassword': 'password'})))
+ topology_st.standalone.add_s(Entry((USER_DN, {
+ 'objectclass': ['top', 'extensibleObject'],
+ 'sn': 'user',
+ 'cn': 'test user',
+ 'uid': 'testuser',
+ 'userpassword': 'password'})))
except ldap.LDAPError as e:
log.fatal('Failed to add test user' + USER_DN + ': error ' + str(e))
assert False
@@ -95,15 +66,15 @@ def test_pwp_history_test(topology):
# Test that password history is enforced.
#
try:
- topology.standalone.simple_bind_s(USER_DN, 'password')
+ topology_st.standalone.simple_bind_s(USER_DN, 'password')
except ldap.LDAPError as e:
log.fatal('Failed to bind as user: ' + str(e))
assert False
# Attempt to change password to the same password
try:
- topology.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
- 'userpassword', 'password')])
+ topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
+ 'userpassword', 'password')])
log.info('Incorrectly able to to set password to existing password.')
assert False
except ldap.CONSTRAINT_VIOLATION:
@@ -118,13 +89,13 @@ def test_pwp_history_test(topology):
# password1
try:
- topology.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
- 'userpassword', 'password1')])
+ topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
+ 'userpassword', 'password1')])
except ldap.LDAPError as e:
log.fatal('Failed to change password: ' + str(e))
assert False
try:
- topology.standalone.simple_bind_s(USER_DN, 'password1')
+ topology_st.standalone.simple_bind_s(USER_DN, 'password1')
except ldap.LDAPError as e:
log.fatal('Failed to bind as user using "password1": ' + str(e))
assert False
@@ -132,13 +103,13 @@ def test_pwp_history_test(topology):
# password2
try:
- topology.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
- 'userpassword', 'password2')])
+ topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
+ 'userpassword', 'password2')])
except ldap.LDAPError as e:
log.fatal('Failed to change password: ' + str(e))
assert False
try:
- topology.standalone.simple_bind_s(USER_DN, 'password2')
+ topology_st.standalone.simple_bind_s(USER_DN, 'password2')
except ldap.LDAPError as e:
log.fatal('Failed to bind as user using "password2": ' + str(e))
assert False
@@ -146,13 +117,13 @@ def test_pwp_history_test(topology):
# password3
try:
- topology.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
- 'userpassword', 'password3')])
+ topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
+ 'userpassword', 'password3')])
except ldap.LDAPError as e:
log.fatal('Failed to change password: ' + str(e))
assert False
try:
- topology.standalone.simple_bind_s(USER_DN, 'password3')
+ topology_st.standalone.simple_bind_s(USER_DN, 'password3')
except ldap.LDAPError as e:
log.fatal('Failed to bind as user using "password3": ' + str(e))
assert False
@@ -160,13 +131,13 @@ def test_pwp_history_test(topology):
# password4
try:
- topology.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
- 'userpassword', 'password4')])
+ topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
+ 'userpassword', 'password4')])
except ldap.LDAPError as e:
log.fatal('Failed to change password: ' + str(e))
assert False
try:
- topology.standalone.simple_bind_s(USER_DN, 'password4')
+ topology_st.standalone.simple_bind_s(USER_DN, 'password4')
except ldap.LDAPError as e:
log.fatal('Failed to bind as user using "password4": ' + str(e))
assert False
@@ -176,9 +147,9 @@ def test_pwp_history_test(topology):
# Check that we only have 3 passwords stored in history\
#
try:
- entry = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE,
- 'objectclass=*',
- ['passwordHistory'])
+ entry = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE,
+ 'objectclass=*',
+ ['passwordHistory'])
pwds = entry[0].getValues('passwordHistory')
if len(pwds) != 3:
log.fatal('Incorrect number of passwords stored in histry: %d' %
@@ -194,8 +165,8 @@ def test_pwp_history_test(topology):
# Attempt to change the password to previous passwords
#
try:
- topology.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
- 'userpassword', 'password1')])
+ topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
+ 'userpassword', 'password1')])
log.info('Incorrectly able to to set password to previous password1.')
assert False
except ldap.CONSTRAINT_VIOLATION:
@@ -205,8 +176,8 @@ def test_pwp_history_test(topology):
assert False
try:
- topology.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
- 'userpassword', 'password2')])
+ topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
+ 'userpassword', 'password2')])
log.info('Incorrectly able to to set password to previous password2.')
assert False
except ldap.CONSTRAINT_VIOLATION:
@@ -215,8 +186,8 @@ def test_pwp_history_test(topology):
log.fatal('Failed to attempt to change password: ' + str(e))
assert False
try:
- topology.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
- 'userpassword', 'password3')])
+ topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
+ 'userpassword', 'password3')])
log.info('Incorrectly able to to set password to previous password3.')
assert False
except ldap.CONSTRAINT_VIOLATION:
@@ -229,29 +200,29 @@ def test_pwp_history_test(topology):
# Reset password by Directory Manager(admin reset)
#
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
log.fatal('Failed to bind as rootDN: ' + str(e))
assert False
try:
- topology.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
- 'userpassword',
- 'password-reset')])
+ topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
+ 'userpassword',
+ 'password-reset')])
except ldap.LDAPError as e:
log.fatal('Failed to attempt to reset password: ' + str(e))
assert False
# Try and change the password to the previous password before the reset
try:
- topology.standalone.simple_bind_s(USER_DN, 'password-reset')
+ topology_st.standalone.simple_bind_s(USER_DN, 'password-reset')
except ldap.LDAPError as e:
log.fatal('Failed to bind as user: ' + str(e))
assert False
try:
- topology.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
- 'userpassword', 'password4')])
+ topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
+ 'userpassword', 'password4')])
log.info('Incorrectly able to to set password to previous password4.')
assert False
except ldap.CONSTRAINT_VIOLATION:
diff --git a/dirsrvtests/tests/suites/posix_winsync_plugin/posix_winsync_test.py b/dirsrvtests/tests/suites/posix_winsync_plugin/posix_winsync_test.py
deleted file mode 100644
index 1c1a993..0000000
--- a/dirsrvtests/tests/suites/posix_winsync_plugin/posix_winsync_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_posix_winsync_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_posix_winsync_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/psearch/psearch_test.py b/dirsrvtests/tests/suites/psearch/psearch_test.py
deleted file mode 100644
index f78e19e..0000000
--- a/dirsrvtests/tests/suites/psearch/psearch_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_psearch_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_psearch_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/referint_plugin/referint_test.py b/dirsrvtests/tests/suites/referint_plugin/referint_test.py
deleted file mode 100644
index ded1622..0000000
--- a/dirsrvtests/tests/suites/referint_plugin/referint_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_referint_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_referint_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_test.py
index afed323..4f9ac46 100644
--- a/dirsrvtests/tests/suites/replication/cleanallruv_test.py
+++ b/dirsrvtests/tests/suites/replication/cleanallruv_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -20,11 +20,11 @@ from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_m4
+
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
class AddUsers(threading.Thread):
def __init__(self, inst, num_users):
@@ -34,7 +34,8 @@ class AddUsers(threading.Thread):
self.num_users = num_users
def openConnection(self, inst):
- # Open a new connection to our LDAP server
+ """Open a new connection to our LDAP server"""
+
server = DirSrv(verbose=False)
args_instance[SER_HOST] = inst.host
args_instance[SER_PORT] = inst.port
@@ -45,7 +46,8 @@ class AddUsers(threading.Thread):
return server
def run(self):
- # Start adding users
+ """Start adding users"""
+
conn = self.openConnection(self.inst)
idx = 0
@@ -53,9 +55,10 @@ class AddUsers(threading.Thread):
USER_DN = 'uid=' + self.inst.serverid + '_' + str(idx) + ',' + DEFAULT_SUFFIX
try:
conn.add_s(Entry((USER_DN, {'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user' + str(idx)})))
+ 'uid': 'user' + str(idx)})))
+
+ # One of the masters was probably put into read only mode - just break out
except ldap.UNWILLING_TO_PERFORM:
- # One of the masters was probably put into read only mode - just break out
break
except ldap.ALREADY_EXISTS:
pass
@@ -67,38 +70,23 @@ class AddUsers(threading.Thread):
conn.close()
-def remove_master4_agmts(msg, topology):
- """Remove all the repl agmts to master4.
- """
+def remove_master4_agmts(msg, topology_m4):
+ """Remove all the repl agmts to master4. """
+
log.info('%s: remove all the agreements to master 4...' % msg)
- try:
- topology.master1.agreement.delete(DEFAULT_SUFFIX,
- topology.master4.host,
- topology.master4.port)
- except ldap.LDAPError as e:
- log.fatal('%s: Failed to delete agmt(m1 -> m4), error: %s' %
- (msg, str(e)))
- assert False
- try:
- topology.master2.agreement.delete(DEFAULT_SUFFIX,
- topology.master4.host,
- topology.master4.port)
- except ldap.LDAPError as e:
- log.fatal('%s: Failed to delete agmt(m2 -> m4), error: %s' %
- (msg, str(e)))
- assert False
- try:
- topology.master3.agreement.delete(DEFAULT_SUFFIX,
- topology.master4.host,
- topology.master4.port)
- except ldap.LDAPError as e:
- log.fatal('%s: Failed to delete agmt(m3 -> m4), error: ' %
- (msg, str(e)))
- assert False
+ for num in range(1, 4):
+ try:
+ topology_m4.ms["master{}".format(num)].agreement.delete(DEFAULT_SUFFIX,
+ topology_m4.ms["master4"].host,
+ topology_m4.ms["master4"].port)
+ except ldap.LDAPError as e:
+ log.fatal('{}: Failed to delete agmt(m{} -> m4), error: {}'.format(msg, num, str(e)))
+ assert False
-def check_ruvs(msg, topology):
+def check_ruvs(msg, topology_m4):
"""Check masters 1- 3 for master 4's rid."""
+
clean = False
count = 0
while not clean and count < 10:
@@ -106,13 +94,12 @@ def check_ruvs(msg, topology):
# Check master 1
try:
- entry = topology.master1.search_s(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- REPLICA_RUV_FILTER)
+ entry = topology_m4.ms["master1"].search_s(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ REPLICA_RUV_FILTER)
if not entry:
log.error('%s: Failed to find db tombstone entry from master' %
msg)
- repl_fail(replica_inst)
elements = entry[0].getValues('nsds50ruv')
for ruv in elements:
if 'replica 4' in ruv:
@@ -127,13 +114,12 @@ def check_ruvs(msg, topology):
# Check master 2
try:
- entry = topology.master2.search_s(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- REPLICA_RUV_FILTER)
+ entry = topology_m4.ms["master2"].search_s(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ REPLICA_RUV_FILTER)
if not entry:
log.error('%s: Failed to find tombstone entry from master' %
msg)
- repl_fail(replica_inst)
elements = entry[0].getValues('nsds50ruv')
for ruv in elements:
if 'replica 4' in ruv:
@@ -148,13 +134,12 @@ def check_ruvs(msg, topology):
# Check master 3
try:
- entry = topology.master3.search_s(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- REPLICA_RUV_FILTER)
+ entry = topology_m4.ms["master3"].search_s(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ REPLICA_RUV_FILTER)
if not entry:
log.error('%s: Failed to find db tombstone entry from master' %
msg)
- repl_fail(replica_inst)
elements = entry[0].getValues('nsds50ruv')
for ruv in elements:
if 'replica 4' in ruv:
@@ -173,7 +158,7 @@ def check_ruvs(msg, topology):
return clean
-def task_done(topology, task_dn, timeout=60):
+def task_done(topology_m4, task_dn, timeout=60):
"""Check if the task is complete"""
attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode',
'nsTaskCurrentItem', 'nsTaskTotalItems']
@@ -182,7 +167,7 @@ def task_done(topology, task_dn, timeout=60):
while not done and count < timeout:
try:
- entry = topology.master1.getEntry(task_dn, attrlist=attrlist)
+ entry = topology_m4.ms["master1"].getEntry(task_dn, attrlist=attrlist)
if not entry or entry.nsTaskExitCode:
done = True
break
@@ -197,270 +182,7 @@ def task_done(topology, task_dn, timeout=60):
return done
-class TopologyReplication(object):
- def __init__(self, master1, master2, master3, master4, m1_m2_agmt, m1_m3_agmt, m1_m4_agmt):
- master1.open()
- self.master1 = master1
- master2.open()
- self.master2 = master2
- master3.open()
- self.master3 = master3
- master4.open()
- self.master4 = master4
-
- # Store the agreement dn's for future initializations
- self.m1_m2_agmt = m1_m2_agmt
- self.m1_m3_agmt = m1_m3_agmt
- self.m1_m4_agmt = m1_m4_agmt
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating master 1...
- master1 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master1.allocate(args_master)
- instance_master1 = master1.exists()
- if instance_master1:
- master1.delete()
- master1.create()
- master1.open()
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
- master1.log = log
-
- # Creating master 2...
- master2 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master2.allocate(args_master)
- instance_master2 = master2.exists()
- if instance_master2:
- master2.delete()
- master2.create()
- master2.open()
- master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
- # Creating master 3...
- master3 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_3
- args_instance[SER_PORT] = PORT_MASTER_3
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master3.allocate(args_master)
- instance_master3 = master3.exists()
- if instance_master3:
- master3.delete()
- master3.create()
- master3.open()
- master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_3)
-
- # Creating master 4...
- master4 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_4
- args_instance[SER_PORT] = PORT_MASTER_4
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_4
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master4.allocate(args_master)
- instance_master4 = master4.exists()
- if instance_master4:
- master4.delete()
- master4.create()
- master4.open()
- master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_4)
-
- #
- # Create all the agreements
- #
- # Creating agreement from master 1 to master 2
- properties = {RA_NAME: 'meTo_%s:%s' % (master2.host, master2.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
- if not m1_m2_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m1_m2_agmt)
-
- # Creating agreement from master 1 to master 3
- properties = {RA_NAME: 'meTo_%s:%s' % (master3.host, master3.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host, port=master3.port, properties=properties)
- if not m1_m3_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m1_m3_agmt)
-
- # Creating agreement from master 1 to master 4
- properties = {RA_NAME: 'meTo_%s:%s' % (master4.host, master4.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m1_m4_agmt = master1.agreement.create(suffix=SUFFIX, host=master4.host, port=master4.port, properties=properties)
- if not m1_m4_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m1_m4_agmt)
-
- # Creating agreement from master 2 to master 1
- properties = {RA_NAME: 'meTo_%s:%s' % (master1.host, master1.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
- if not m2_m1_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m2_m1_agmt)
-
- # Creating agreement from master 2 to master 3
- properties = {RA_NAME: 'meTo_%s:%s' % (master3.host, master3.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m2_m3_agmt = master2.agreement.create(suffix=SUFFIX, host=master3.host, port=master3.port, properties=properties)
- if not m2_m3_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m2_m3_agmt)
-
- # Creating agreement from master 2 to master 4
- properties = {RA_NAME: 'meTo_%s:%s' % (master4.host, master4.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m2_m4_agmt = master2.agreement.create(suffix=SUFFIX, host=master4.host, port=master4.port, properties=properties)
- if not m2_m4_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m2_m4_agmt)
-
- # Creating agreement from master 3 to master 1
- properties = {RA_NAME: 'meTo_%s:%s' % (master1.host, master1.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m3_m1_agmt = master3.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
- if not m3_m1_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m3_m1_agmt)
-
- # Creating agreement from master 3 to master 2
- properties = {RA_NAME: 'meTo_%s:%s' % (master2.host, master2.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m3_m2_agmt = master3.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
- if not m3_m2_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m3_m2_agmt)
-
- # Creating agreement from master 3 to master 4
- properties = {RA_NAME: 'meTo_%s:%s' % (master4.host, master4.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m3_m4_agmt = master3.agreement.create(suffix=SUFFIX, host=master4.host, port=master4.port, properties=properties)
- if not m3_m4_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m3_m4_agmt)
-
- # Creating agreement from master 4 to master 1
- properties = {RA_NAME: 'meTo_%s:%s' % (master1.host, master1.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m4_m1_agmt = master4.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
- if not m4_m1_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m4_m1_agmt)
-
- # Creating agreement from master 4 to master 2
- properties = {RA_NAME: 'meTo_%s:%s' % (master2.host, master2.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m4_m2_agmt = master4.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
- if not m4_m2_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m4_m2_agmt)
-
- # Creating agreement from master 4 to master 3
- properties = {RA_NAME: 'meTo_%s:%s' % (master3.host, master3.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m4_m3_agmt = master4.agreement.create(suffix=SUFFIX, host=master3.host, port=master3.port, properties=properties)
- if not m4_m3_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m4_m3_agmt)
-
- # Allow the replicas to get situated with the new agreements
- time.sleep(5)
-
- #
- # Initialize all the agreements
- #
- master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- master1.waitForReplInit(m1_m2_agmt)
- master1.agreement.init(SUFFIX, HOST_MASTER_3, PORT_MASTER_3)
- master1.waitForReplInit(m1_m3_agmt)
- master1.agreement.init(SUFFIX, HOST_MASTER_4, PORT_MASTER_4)
- master1.waitForReplInit(m1_m4_agmt)
-
- # Check replication is working...
- if master1.testReplication(DEFAULT_SUFFIX, master2):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- # Clear out the tmp dir
- master1.clearTmpDir(__file__)
- def fin():
- master1.delete()
- master2.delete()
- master3.delete()
- master4.delete()
- request.addfinalizer(fin)
-
- return TopologyReplication(master1, master2, master3, master4, m1_m2_agmt, m1_m3_agmt, m1_m4_agmt)
-
-
-def restore_master4(topology):
+def restore_master4(topology_m4):
'''
In our tests will always be removing master 4, so we need a common
way to restore it for another test
@@ -469,45 +191,46 @@ def restore_master4(topology):
log.info('Restoring master 4...')
# Enable replication on master 4
- topology.master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_4)
+ topology_m4.ms["master4"].replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
+ replicaId=REPLICAID_MASTER_4)
#
# Create agreements from master 4 -> m1, m2 ,m3
#
# Creating agreement from master 4 to master 1
- properties = {RA_NAME: 'meTo_%s:%s' % (topology.master1.host, topology.master1.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ properties = {RA_NAME: 'meTo_%s:%s' % (topology_m4.ms["master1"].host, topology_m4.ms["master1"].port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m4_m1_agmt = topology.master4.agreement.create(suffix=SUFFIX, host=topology.master1.host,
- port=topology.master1.port, properties=properties)
+ m4_m1_agmt = topology_m4.ms["master4"].agreement.create(suffix=SUFFIX, host=topology_m4.ms["master1"].host,
+ port=topology_m4.ms["master1"].port, properties=properties)
if not m4_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("%s created" % m4_m1_agmt)
# Creating agreement from master 4 to master 2
- properties = {RA_NAME: 'meTo_%s:%s' % (topology.master2.host, topology.master2.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ properties = {RA_NAME: 'meTo_%s:%s' % (topology_m4.ms["master2"].host, topology_m4.ms["master2"].port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m4_m2_agmt = topology.master4.agreement.create(suffix=SUFFIX, host=topology.master2.host,
- port=topology.master2.port, properties=properties)
+ m4_m2_agmt = topology_m4.ms["master4"].agreement.create(suffix=SUFFIX, host=topology_m4.ms["master2"].host,
+ port=topology_m4.ms["master2"].port, properties=properties)
if not m4_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("%s created" % m4_m2_agmt)
# Creating agreement from master 4 to master 3
- properties = {RA_NAME: 'meTo_%s:%s' % (topology.master3.host, topology.master3.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ properties = {RA_NAME: 'meTo_%s:%s' % (topology_m4.ms["master3"].host, topology_m4.ms["master3"].port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m4_m3_agmt = topology.master4.agreement.create(suffix=SUFFIX, host=topology.master3.host,
- port=topology.master3.port, properties=properties)
+ m4_m3_agmt = topology_m4.ms["master4"].agreement.create(suffix=SUFFIX, host=topology_m4.ms["master3"].host,
+ port=topology_m4.ms["master3"].port, properties=properties)
if not m4_m3_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
@@ -517,39 +240,39 @@ def restore_master4(topology):
# Create agreements from m1, m2, m3 to master 4
#
# Creating agreement from master 1 to master 4
- properties = {RA_NAME: 'meTo_%s:%s' % (topology.master4.host, topology.master4.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ properties = {RA_NAME: 'meTo_%s:%s' % (topology_m4.ms["master4"].host, topology_m4.ms["master4"].port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m1_m4_agmt = topology.master1.agreement.create(suffix=SUFFIX, host=topology.master4.host,
- port=topology.master4.port, properties=properties)
+ m1_m4_agmt = topology_m4.ms["master1"].agreement.create(suffix=SUFFIX, host=topology_m4.ms["master4"].host,
+ port=topology_m4.ms["master4"].port, properties=properties)
if not m1_m4_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("%s created" % m1_m4_agmt)
# Creating agreement from master 2 to master 4
- properties = {RA_NAME: 'meTo_%s:%s' % (topology.master4.host, topology.master4.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ properties = {RA_NAME: 'meTo_%s:%s' % (topology_m4.ms["master4"].host, topology_m4.ms["master4"].port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m2_m4_agmt = topology.master2.agreement.create(suffix=SUFFIX, host=topology.master4.host,
- port=topology.master4.port, properties=properties)
+ m2_m4_agmt = topology_m4.ms["master2"].agreement.create(suffix=SUFFIX, host=topology_m4.ms["master4"].host,
+ port=topology_m4.ms["master4"].port, properties=properties)
if not m2_m4_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("%s created" % m2_m4_agmt)
# Creating agreement from master 3 to master 4
- properties = {RA_NAME: 'meTo_%s:%s' % (topology.master4.host, topology.master4.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ properties = {RA_NAME: 'meTo_%s:%s' % (topology_m4.ms["master4"].host, topology_m4.ms["master4"].port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m3_m4_agmt = topology.master3.agreement.create(suffix=SUFFIX, host=topology.master4.host,
- port=topology.master4.port, properties=properties)
+ m3_m4_agmt = topology_m4.ms["master3"].agreement.create(suffix=SUFFIX, host=topology_m4.ms["master4"].host,
+ port=topology_m4.ms["master4"].port, properties=properties)
if not m3_m4_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
@@ -558,39 +281,39 @@ def restore_master4(topology):
#
# Stop the servers - this allows the rid(for master4) to be used again
#
- topology.master1.stop(timeout=30)
- topology.master2.stop(timeout=30)
- topology.master3.stop(timeout=30)
- topology.master4.stop(timeout=30)
+ topology_m4.ms["master1"].stop(timeout=30)
+ topology_m4.ms["master2"].stop(timeout=30)
+ topology_m4.ms["master3"].stop(timeout=30)
+ topology_m4.ms["master4"].stop(timeout=30)
#
# Initialize the agreements
#
# m1 -> m2
- topology.master1.start(timeout=30)
- topology.master2.start(timeout=30)
+ topology_m4.ms["master1"].start(timeout=30)
+ topology_m4.ms["master2"].start(timeout=30)
time.sleep(5)
- topology.master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- topology.master1.waitForReplInit(topology.m1_m2_agmt)
+ topology_m4.ms["master1"].agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
+ topology_m4.ms["master1"].waitForReplInit(topology_m4.ms["master1_agmts"]["m1_m2"])
# m1 -> m3
- topology.master3.start(timeout=30)
+ topology_m4.ms["master3"].start(timeout=30)
time.sleep(5)
- topology.master1.agreement.init(SUFFIX, HOST_MASTER_3, PORT_MASTER_3)
- topology.master1.waitForReplInit(topology.m1_m3_agmt)
+ topology_m4.ms["master1"].agreement.init(SUFFIX, HOST_MASTER_3, PORT_MASTER_3)
+ topology_m4.ms["master1"].waitForReplInit(topology_m4.ms["master1_agmts"]["m1_m3"])
# m1 -> m4
time.sleep(5)
- topology.master4.start(timeout=30)
- topology.master1.agreement.init(SUFFIX, HOST_MASTER_4, PORT_MASTER_4)
- topology.master1.waitForReplInit(topology.m1_m4_agmt)
+ topology_m4.ms["master4"].start(timeout=30)
+ topology_m4.ms["master1"].agreement.init(SUFFIX, HOST_MASTER_4, PORT_MASTER_4)
+ topology_m4.ms["master1"].waitForReplInit(topology_m4.ms["master1_agmts"]["m1_m4"])
time.sleep(5)
#
# Test Replication is working
#
# Check replication is working with previous working master(m1 -> m2)
- if topology.master1.testReplication(DEFAULT_SUFFIX, topology.master2):
+ if topology_m4.ms["master1"].testReplication(DEFAULT_SUFFIX, topology_m4.ms["master2"]):
log.info('Replication is working m1 -> m2.')
else:
log.fatal('restore_master4: Replication is not working from m1 -> m2.')
@@ -598,7 +321,7 @@ def restore_master4(topology):
time.sleep(1)
# Check replication is working from master 1 to master 4...
- if topology.master1.testReplication(DEFAULT_SUFFIX, topology.master4):
+ if topology_m4.ms["master1"].testReplication(DEFAULT_SUFFIX, topology_m4.ms["master4"]):
log.info('Replication is working m1 -> m4.')
else:
log.fatal('restore_master4: Replication is not working from m1 -> m4.')
@@ -606,7 +329,7 @@ def restore_master4(topology):
time.sleep(1)
# Check replication is working from master 4 to master1...
- if topology.master4.testReplication(DEFAULT_SUFFIX, topology.master1):
+ if topology_m4.ms["master4"].testReplication(DEFAULT_SUFFIX, topology_m4.ms["master1"]):
log.info('Replication is working m4 -> m1.')
else:
log.fatal('restore_master4: Replication is not working from m4 -> 1.')
@@ -616,7 +339,7 @@ def restore_master4(topology):
log.info('Master 4 has been successfully restored.')
-def test_cleanallruv_init(topology):
+def test_cleanallruv_init(topology_m4):
'''
Make updates on each master to make sure we have the all master RUVs on
each master.
@@ -625,61 +348,61 @@ def test_cleanallruv_init(topology):
log.info('Initializing cleanAllRUV test suite...')
# Master 1
- if not topology.master1.testReplication(DEFAULT_SUFFIX, topology.master2):
+ if not topology_m4.ms["master1"].testReplication(DEFAULT_SUFFIX, topology_m4.ms["master2"]):
log.fatal('test_cleanallruv_init: Replication is not working between master 1 and master 2.')
assert False
- if not topology.master1.testReplication(DEFAULT_SUFFIX, topology.master3):
+ if not topology_m4.ms["master1"].testReplication(DEFAULT_SUFFIX, topology_m4.ms["master3"]):
log.fatal('test_cleanallruv_init: Replication is not working between master 1 and master 3.')
assert False
- if not topology.master1.testReplication(DEFAULT_SUFFIX, topology.master4):
+ if not topology_m4.ms["master1"].testReplication(DEFAULT_SUFFIX, topology_m4.ms["master4"]):
log.fatal('test_cleanallruv_init: Replication is not working between master 1 and master 4.')
assert False
# Master 2
- if not topology.master2.testReplication(DEFAULT_SUFFIX, topology.master1):
+ if not topology_m4.ms["master2"].testReplication(DEFAULT_SUFFIX, topology_m4.ms["master1"]):
log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 1.')
assert False
- if not topology.master2.testReplication(DEFAULT_SUFFIX, topology.master3):
+ if not topology_m4.ms["master2"].testReplication(DEFAULT_SUFFIX, topology_m4.ms["master3"]):
log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 3.')
assert False
- if not topology.master2.testReplication(DEFAULT_SUFFIX, topology.master4):
+ if not topology_m4.ms["master2"].testReplication(DEFAULT_SUFFIX, topology_m4.ms["master4"]):
log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 4.')
assert False
# Master 3
- if not topology.master3.testReplication(DEFAULT_SUFFIX, topology.master1):
+ if not topology_m4.ms["master3"].testReplication(DEFAULT_SUFFIX, topology_m4.ms["master1"]):
log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 1.')
assert False
- if not topology.master3.testReplication(DEFAULT_SUFFIX, topology.master2):
+ if not topology_m4.ms["master3"].testReplication(DEFAULT_SUFFIX, topology_m4.ms["master2"]):
log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 2.')
assert False
- if not topology.master3.testReplication(DEFAULT_SUFFIX, topology.master4):
+ if not topology_m4.ms["master3"].testReplication(DEFAULT_SUFFIX, topology_m4.ms["master4"]):
log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 4.')
assert False
# Master 4
- if not topology.master4.testReplication(DEFAULT_SUFFIX, topology.master1):
+ if not topology_m4.ms["master4"].testReplication(DEFAULT_SUFFIX, topology_m4.ms["master1"]):
log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 1.')
assert False
- if not topology.master4.testReplication(DEFAULT_SUFFIX, topology.master2):
+ if not topology_m4.ms["master4"].testReplication(DEFAULT_SUFFIX, topology_m4.ms["master2"]):
log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 2.')
assert False
- if not topology.master4.testReplication(DEFAULT_SUFFIX, topology.master3):
+ if not topology_m4.ms["master4"].testReplication(DEFAULT_SUFFIX, topology_m4.ms["master3"]):
log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 3.')
assert False
log.info('Initialized cleanAllRUV test suite.')
-def test_cleanallruv_clean(topology):
+def test_cleanallruv_clean(topology_m4):
'''
Disable a master, remove agreements to that master, and clean the RUVs on
the remaining replicas
@@ -689,20 +412,16 @@ def test_cleanallruv_clean(topology):
# Disable master 4
log.info('test_cleanallruv_clean: disable master 4...')
- try:
- topology.master4.replica.disableReplication(DEFAULT_SUFFIX)
- except:
- log.fatal('error!')
- assert False
+ topology_m4.ms["master4"].replica.disableReplication(DEFAULT_SUFFIX)
# Remove the agreements from the other masters that point to master 4
- remove_master4_agmts("test_cleanallruv_clean", topology)
+ remove_master4_agmts("test_cleanallruv_clean", topology_m4)
# Run the task
log.info('test_cleanallruv_clean: run the cleanAllRUV task...')
try:
- topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4',
- args={TASK_WAIT: True})
+ topology_m4.ms["master1"].tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4',
+ args={TASK_WAIT: True})
except ValueError as e:
log.fatal('test_cleanallruv_clean: Problem running cleanAllRuv task: ' +
e.message('desc'))
@@ -710,7 +429,7 @@ def test_cleanallruv_clean(topology):
# Check the other master's RUV for 'replica 4'
log.info('test_cleanallruv_clean: check all the masters have been cleaned...')
- clean = check_ruvs("test_cleanallruv_clean", topology)
+ clean = check_ruvs("test_cleanallruv_clean", topology_m4)
if not clean:
log.fatal('test_cleanallruv_clean: Failed to clean replicas')
@@ -721,10 +440,10 @@ def test_cleanallruv_clean(topology):
#
# Cleanup - restore master 4
#
- restore_master4(topology)
+ restore_master4(topology_m4)
-def test_cleanallruv_clean_restart(topology):
+def test_cleanallruv_clean_restart(topology_m4):
'''
Test that if a master istopped during the clean process, that it
resumes and finishes when its started.
@@ -734,23 +453,19 @@ def test_cleanallruv_clean_restart(topology):
# Disable master 4
log.info('test_cleanallruv_clean_restart: disable master 4...')
- try:
- topology.master4.replica.disableReplication(DEFAULT_SUFFIX)
- except:
- log.fatal('error!')
- assert False
+ topology_m4.ms["master4"].replica.disableReplication(DEFAULT_SUFFIX)
# Remove the agreements from the other masters that point to master 4
log.info('test_cleanallruv_clean: remove all the agreements to master 4...')
- remove_master4_agmts("test_cleanallruv_clean restart", topology)
+ remove_master4_agmts("test_cleanallruv_clean restart", topology_m4)
# Stop master 3 to keep the task running, so we can stop master 1...
- topology.master3.stop(timeout=30)
+ topology_m4.ms["master3"].stop(timeout=30)
# Run the task
log.info('test_cleanallruv_clean_restart: run the cleanAllRUV task...')
try:
- (task_dn, rc) = topology.master1.tasks.cleanAllRUV(
+ (task_dn, rc) = topology_m4.ms["master1"].tasks.cleanAllRUV(
suffix=DEFAULT_SUFFIX, replicaid='4', args={TASK_WAIT: False})
except ValueError as e:
log.fatal('test_cleanallruv_clean_restart: Problem running cleanAllRuv task: ' +
@@ -759,27 +474,27 @@ def test_cleanallruv_clean_restart(topology):
# Sleep a bit, then stop master 1
time.sleep(5)
- topology.master1.stop(timeout=30)
+ topology_m4.ms["master1"].stop(timeout=30)
# Now start master 3 & 1, and make sure we didn't crash
- topology.master3.start(timeout=30)
- if topology.master3.detectDisorderlyShutdown():
+ topology_m4.ms["master3"].start(timeout=30)
+ if topology_m4.ms["master3"].detectDisorderlyShutdown():
log.fatal('test_cleanallruv_clean_restart: Master 3 previously crashed!')
assert False
- topology.master1.start(timeout=30)
- if topology.master1.detectDisorderlyShutdown():
+ topology_m4.ms["master1"].start(timeout=30)
+ if topology_m4.ms["master1"].detectDisorderlyShutdown():
log.fatal('test_cleanallruv_clean_restart: Master 1 previously crashed!')
assert False
# Wait a little for agmts/cleanallruv to wake up
- if not task_done(topology, task_dn):
+ if not task_done(topology_m4, task_dn):
log.fatal('test_cleanallruv_clean_restart: cleanAllRUV task did not finish')
assert False
# Check the other master's RUV for 'replica 4'
log.info('test_cleanallruv_clean_restart: check all the masters have been cleaned...')
- clean = check_ruvs("test_cleanallruv_clean_restart", topology)
+ clean = check_ruvs("test_cleanallruv_clean_restart", topology_m4)
if not clean:
log.fatal('Failed to clean replicas')
assert False
@@ -789,10 +504,10 @@ def test_cleanallruv_clean_restart(topology):
#
# Cleanup - restore master 4
#
- restore_master4(topology)
+ restore_master4(topology_m4)
-def test_cleanallruv_clean_force(topology):
+def test_cleanallruv_clean_force(topology_m4):
'''
Disable a master, remove agreements to that master, and clean the RUVs on
the remaining replicas
@@ -801,33 +516,29 @@ def test_cleanallruv_clean_force(topology):
log.info('Running test_cleanallruv_clean_force...')
# Stop master 3, while we update master 4, so that 3 is behind the other masters
- topology.master3.stop(timeout=10)
+ topology_m4.ms["master3"].stop(timeout=10)
# Add a bunch of updates to master 4
- m4_add_users = AddUsers(topology.master4, 1500)
+ m4_add_users = AddUsers(topology_m4.ms["master4"], 1500)
m4_add_users.start()
m4_add_users.join()
# Disable master 4
log.info('test_cleanallruv_clean_force: disable master 4...')
- try:
- topology.master4.replica.disableReplication(DEFAULT_SUFFIX)
- except:
- log.fatal('error!')
- assert False
+ topology_m4.ms["master4"].replica.disableReplication(DEFAULT_SUFFIX)
# Start master 3, it should be out of sync with the other replicas...
- topology.master3.start(timeout=30)
+ topology_m4.ms["master3"].start(timeout=30)
# Remove the agreements from the other masters that point to master 4
- remove_master4_agmts("test_cleanallruv_clean_force", topology)
+ remove_master4_agmts("test_cleanallruv_clean_force", topology_m4)
# Run the task, use "force" because master 3 is not in sync with the other replicas
# in regards to the replica 4 RUV
log.info('test_cleanallruv_clean_force: run the cleanAllRUV task...')
try:
- topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4',
- force=True, args={TASK_WAIT: True})
+ topology_m4.ms["master1"].tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4',
+ force=True, args={TASK_WAIT: True})
except ValueError as e:
log.fatal('test_cleanallruv_clean_force: Problem running cleanAllRuv task: ' +
e.message('desc'))
@@ -835,7 +546,7 @@ def test_cleanallruv_clean_force(topology):
# Check the other master's RUV for 'replica 4'
log.info('test_cleanallruv_clean_force: check all the masters have been cleaned...')
- clean = check_ruvs("test_cleanallruv_clean_force", topology)
+ clean = check_ruvs("test_cleanallruv_clean_force", topology_m4)
if not clean:
log.fatal('test_cleanallruv_clean_force: Failed to clean replicas')
assert False
@@ -845,10 +556,10 @@ def test_cleanallruv_clean_force(topology):
#
# Cleanup - restore master 4
#
- restore_master4(topology)
+ restore_master4(topology_m4)
-def test_cleanallruv_abort(topology):
+def test_cleanallruv_abort(topology_m4):
'''
Test the abort task.
@@ -864,24 +575,20 @@ def test_cleanallruv_abort(topology):
# Disable master 4
log.info('test_cleanallruv_abort: disable replication on master 4...')
- try:
- topology.master4.replica.disableReplication(DEFAULT_SUFFIX)
- except:
- log.fatal('test_cleanallruv_abort: failed to disable replication')
- assert False
+ topology_m4.ms["master4"].replica.disableReplication(DEFAULT_SUFFIX)
# Remove the agreements from the other masters that point to master 4
- remove_master4_agmts("test_cleanallruv_abort", topology)
+ remove_master4_agmts("test_cleanallruv_abort", topology_m4)
# Stop master 2
log.info('test_cleanallruv_abort: stop master 2 to freeze the cleanAllRUV task...')
- topology.master2.stop(timeout=30)
+ topology_m4.ms["master2"].stop(timeout=30)
# Run the task
log.info('test_cleanallruv_abort: add the cleanAllRUV task...')
try:
- (clean_task_dn, rc) = topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
- replicaid='4', args={TASK_WAIT: False})
+ (clean_task_dn, rc) = topology_m4.ms["master1"].tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
+ replicaid='4', args={TASK_WAIT: False})
except ValueError as e:
log.fatal('test_cleanallruv_abort: Problem running cleanAllRuv task: ' +
e.message('desc'))
@@ -893,8 +600,8 @@ def test_cleanallruv_abort(topology):
# Abort the task
log.info('test_cleanallruv_abort: abort the cleanAllRUV task...')
try:
- topology.master1.tasks.abortCleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4',
- args={TASK_WAIT: True})
+ topology_m4.ms["master1"].tasks.abortCleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4',
+ args={TASK_WAIT: True})
except ValueError as e:
log.fatal('test_cleanallruv_abort: Problem running abortCleanAllRuv task: ' +
e.message('desc'))
@@ -902,21 +609,21 @@ def test_cleanallruv_abort(topology):
# Check master 1 does not have the clean task running
log.info('test_cleanallruv_abort: check master 1 no longer has a cleanAllRUV task...')
- if not task_done(topology, clean_task_dn):
+ if not task_done(topology_m4, clean_task_dn):
log.fatal('test_cleanallruv_abort: CleanAllRUV task was not aborted')
assert False
# Start master 2
log.info('test_cleanallruv_abort: start master 2 to begin the restore process...')
- topology.master2.start(timeout=30)
+ topology_m4.ms["master2"].start(timeout=30)
#
# Now run the clean task task again to we can properly restore master 4
#
log.info('test_cleanallruv_abort: run cleanAllRUV task so we can properly restore master 4...')
try:
- topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
- replicaid='4', args={TASK_WAIT: True})
+ topology_m4.ms["master1"].tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
+ replicaid='4', args={TASK_WAIT: True})
except ValueError as e:
log.fatal('test_cleanallruv_abort: Problem running cleanAllRuv task: ' + e.message('desc'))
assert False
@@ -926,10 +633,10 @@ def test_cleanallruv_abort(topology):
#
# Cleanup - Restore master 4
#
- restore_master4(topology)
+ restore_master4(topology_m4)
-def test_cleanallruv_abort_restart(topology):
+def test_cleanallruv_abort_restart(topology_m4):
'''
Test the abort task can handle a restart, and then resume
'''
@@ -938,25 +645,21 @@ def test_cleanallruv_abort_restart(topology):
# Disable master 4
log.info('test_cleanallruv_abort_restart: disable replication on master 4...')
- try:
- topology.master4.replica.disableReplication(DEFAULT_SUFFIX)
- except:
- log.fatal('error!')
- assert False
+ topology_m4.ms["master4"].replica.disableReplication(DEFAULT_SUFFIX)
# Remove the agreements from the other masters that point to master 4
log.info('test_cleanallruv_abort_restart: remove all the agreements to master 4...)')
- remove_master4_agmts("test_cleanallruv_abort_restart", topology)
+ remove_master4_agmts("test_cleanallruv_abort_restart", topology_m4)
# Stop master 3
log.info('test_cleanallruv_abort_restart: stop master 3 to freeze the cleanAllRUV task...')
- topology.master3.stop()
+ topology_m4.ms["master3"].stop()
# Run the task
log.info('test_cleanallruv_abort_restart: add the cleanAllRUV task...')
try:
- (clean_task_dn, rc) = topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
- replicaid='4', args={TASK_WAIT: False})
+ (clean_task_dn, rc) = topology_m4.ms["master1"].tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
+ replicaid='4', args={TASK_WAIT: False})
except ValueError as e:
log.fatal('test_cleanallruv_abort_restart: Problem running cleanAllRuv task: ' +
e.message('desc'))
@@ -968,8 +671,8 @@ def test_cleanallruv_abort_restart(topology):
# Abort the task
log.info('test_cleanallruv_abort_restart: abort the cleanAllRUV task...')
try:
- topology.master1.tasks.abortCleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4',
- certify=True, args={TASK_WAIT: False})
+ topology_m4.ms["master1"].tasks.abortCleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4',
+ certify=True, args={TASK_WAIT: False})
except ValueError as e:
log.fatal('test_cleanallruv_abort_restart: Problem running test_cleanallruv_abort_restart task: ' +
e.message('desc'))
@@ -981,21 +684,21 @@ def test_cleanallruv_abort_restart(topology):
# Check master 1 does not have the clean task running
log.info('test_cleanallruv_abort: check master 1 no longer has a cleanAllRUV task...')
- if not task_done(topology, clean_task_dn):
+ if not task_done(topology_m4, clean_task_dn):
log.fatal('test_cleanallruv_abort_restart: CleanAllRUV task was not aborted')
assert False
# Now restart master 1, and make sure the abort process completes
- topology.master1.restart()
- if topology.master1.detectDisorderlyShutdown():
+ topology_m4.ms["master1"].restart()
+ if topology_m4.ms["master1"].detectDisorderlyShutdown():
log.fatal('test_cleanallruv_abort_restart: Master 1 previously crashed!')
assert False
# Start master 3
- topology.master3.start()
+ topology_m4.ms["master3"].start()
# Check master 1 tried to run abort task. We expect the abort task to be aborted.
- if not topology.master1.searchErrorsLog('Aborting abort task'):
+ if not topology_m4.ms["master1"].searchErrorsLog('Aborting abort task'):
log.fatal('test_cleanallruv_abort_restart: Abort task did not restart')
assert False
@@ -1004,8 +707,8 @@ def test_cleanallruv_abort_restart(topology):
#
log.info('test_cleanallruv_abort_restart: run cleanAllRUV task so we can properly restore master 4...')
try:
- topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
- replicaid='4', args={TASK_WAIT: True})
+ topology_m4.ms["master1"].tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
+ replicaid='4', args={TASK_WAIT: True})
except ValueError as e:
log.fatal('test_cleanallruv_abort_restart: Problem running cleanAllRuv task: ' +
e.message('desc'))
@@ -1016,10 +719,10 @@ def test_cleanallruv_abort_restart(topology):
#
# Cleanup - Restore master 4
#
- restore_master4(topology)
+ restore_master4(topology_m4)
-def test_cleanallruv_abort_certify(topology):
+def test_cleanallruv_abort_certify(topology_m4):
'''
Test the abort task.
@@ -1035,24 +738,20 @@ def test_cleanallruv_abort_certify(topology):
# Disable master 4
log.info('test_cleanallruv_abort_certify: disable replication on master 4...')
- try:
- topology.master4.replica.disableReplication(DEFAULT_SUFFIX)
- except:
- log.fatal('error!')
- assert False
+ topology_m4.ms["master4"].replica.disableReplication(DEFAULT_SUFFIX)
# Remove the agreements from the other masters that point to master 4
- remove_master4_agmts("test_cleanallruv_abort_certify", topology)
+ remove_master4_agmts("test_cleanallruv_abort_certify", topology_m4)
# Stop master 2
log.info('test_cleanallruv_abort_certify: stop master 2 to freeze the cleanAllRUV task...')
- topology.master2.stop()
+ topology_m4.ms["master2"].stop()
# Run the task
log.info('test_cleanallruv_abort_certify: add the cleanAllRUV task...')
try:
- (clean_task_dn, rc) = topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
- replicaid='4', args={TASK_WAIT: False})
+ (clean_task_dn, rc) = topology_m4.ms["master1"].tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
+ replicaid='4', args={TASK_WAIT: False})
except ValueError as e:
log.fatal('test_cleanallruv_abort_certify: Problem running cleanAllRuv task: ' +
e.message('desc'))
@@ -1064,8 +763,9 @@ def test_cleanallruv_abort_certify(topology):
# Abort the task
log.info('test_cleanallruv_abort_certify: abort the cleanAllRUV task...')
try:
- (abort_task_dn, rc) = topology.master1.tasks.abortCleanAllRUV(suffix=DEFAULT_SUFFIX,
- replicaid='4', certify=True, args={TASK_WAIT: False})
+ (abort_task_dn, rc) = topology_m4.ms["master1"].tasks.abortCleanAllRUV(suffix=DEFAULT_SUFFIX,
+ replicaid='4', certify=True,
+ args={TASK_WAIT: False})
except ValueError as e:
log.fatal('test_cleanallruv_abort_certify: Problem running abortCleanAllRuv task: ' +
e.message('desc'))
@@ -1075,36 +775,36 @@ def test_cleanallruv_abort_certify(topology):
log.info('test_cleanallruv_abort_certify: sleep for 5 seconds')
time.sleep(5)
- if task_done(topology, abort_task_dn, 60):
+ if task_done(topology_m4, abort_task_dn, 60):
log.fatal('test_cleanallruv_abort_certify: abort task incorrectly finished')
assert False
# Now start master 2 so it can be aborted
log.info('test_cleanallruv_abort_certify: start master 2 to allow the abort task to finish...')
- topology.master2.start()
+ topology_m4.ms["master2"].start()
# Wait for the abort task to stop
- if not task_done(topology, abort_task_dn, 60):
+ if not task_done(topology_m4, abort_task_dn, 60):
log.fatal('test_cleanallruv_abort_certify: The abort CleanAllRUV task was not aborted')
assert False
# Check master 1 does not have the clean task running
log.info('test_cleanallruv_abort_certify: check master 1 no longer has a cleanAllRUV task...')
- if not task_done(topology, clean_task_dn):
+ if not task_done(topology_m4, clean_task_dn):
log.fatal('test_cleanallruv_abort_certify: CleanAllRUV task was not aborted')
assert False
# Start master 2
log.info('test_cleanallruv_abort_certify: start master 2 to begin the restore process...')
- topology.master2.start()
+ topology_m4.ms["master2"].start()
#
# Now run the clean task task again to we can properly restore master 4
#
log.info('test_cleanallruv_abort_certify: run cleanAllRUV task so we can properly restore master 4...')
try:
- topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
- replicaid='4', args={TASK_WAIT: True})
+ topology_m4.ms["master1"].tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX,
+ replicaid='4', args={TASK_WAIT: True})
except ValueError as e:
log.fatal('test_cleanallruv_abort_certify: Problem running cleanAllRuv task: ' +
e.message('desc'))
@@ -1115,10 +815,10 @@ def test_cleanallruv_abort_certify(topology):
#
# Cleanup - Restore master 4
#
- restore_master4(topology)
+ restore_master4(topology_m4)
-def test_cleanallruv_stress_clean(topology):
+def test_cleanallruv_stress_clean(topology_m4):
'''
Put each server(m1 - m4) under stress, and perform the entire clean process
'''
@@ -1126,13 +826,13 @@ def test_cleanallruv_stress_clean(topology):
log.info('test_cleanallruv_stress_clean: put all the masters under load...')
# Put all the masters under load
- m1_add_users = AddUsers(topology.master1, 2000)
+ m1_add_users = AddUsers(topology_m4.ms["master1"], 2000)
m1_add_users.start()
- m2_add_users = AddUsers(topology.master2, 2000)
+ m2_add_users = AddUsers(topology_m4.ms["master2"], 2000)
m2_add_users.start()
- m3_add_users = AddUsers(topology.master3, 2000)
+ m3_add_users = AddUsers(topology_m4.ms["master3"], 2000)
m3_add_users.start()
- m4_add_users = AddUsers(topology.master4, 2000)
+ m4_add_users = AddUsers(topology_m4.ms["master4"], 2000)
m4_add_users.start()
# Allow sometime to get replication flowing in all directions
@@ -1142,7 +842,7 @@ def test_cleanallruv_stress_clean(topology):
# Put master 4 into read only mode
log.info('test_cleanallruv_stress_clean: put master 4 into read-only mode...')
try:
- topology.master4.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-readonly', 'on')])
+ topology_m4.ms["master4"].modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-readonly', 'on')])
except ldap.LDAPError as e:
log.fatal('test_cleanallruv_stress_clean: Failed to put master 4 into read-only mode: error ' +
e.message['desc'])
@@ -1155,19 +855,19 @@ def test_cleanallruv_stress_clean(topology):
# Disable master 4
log.info('test_cleanallruv_stress_clean: disable replication on master 4...')
try:
- topology.master4.replica.disableReplication(DEFAULT_SUFFIX)
+ topology_m4.ms["master4"].replica.disableReplication(DEFAULT_SUFFIX)
except:
log.fatal('test_cleanallruv_stress_clean: failed to diable replication')
assert False
# Remove the agreements from the other masters that point to master 4
- remove_master4_agmts("test_cleanallruv_stress_clean", topology)
+ remove_master4_agmts("test_cleanallruv_stress_clean", topology_m4)
# Run the task
log.info('test_cleanallruv_stress_clean: Run the cleanAllRUV task...')
try:
- topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4',
- args={TASK_WAIT: True})
+ topology_m4.ms["master1"].tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4',
+ args={TASK_WAIT: True})
except ValueError as e:
log.fatal('test_cleanallruv_stress_clean: Problem running cleanAllRuv task: ' +
e.message('desc'))
@@ -1182,7 +882,7 @@ def test_cleanallruv_stress_clean(topology):
# Check the other master's RUV for 'replica 4'
log.info('test_cleanallruv_stress_clean: check if all the replicas have been cleaned...')
- clean = check_ruvs("test_cleanallruv_stress_clean", topology)
+ clean = check_ruvs("test_cleanallruv_stress_clean", topology_m4)
if not clean:
log.fatal('test_cleanallruv_stress_clean: Failed to clean replicas')
assert False
@@ -1199,13 +899,13 @@ def test_cleanallruv_stress_clean(topology):
# Turn off readonly mode
try:
- topology.master4.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-readonly', 'off')])
+ topology_m4.ms["master4"].modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-readonly', 'off')])
except ldap.LDAPError as e:
log.fatal('test_cleanallruv_stress_clean: Failed to put master 4 into read-only mode: error ' +
e.message['desc'])
assert False
- restore_master4(topology)
+ restore_master4(topology_m4)
if __name__ == '__main__':
diff --git a/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py b/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py
index 02c27b9..54b47e6 100644
--- a/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py
+++ b/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py
@@ -1,3 +1,11 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2016 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
import os
import sys
import time
@@ -11,6 +19,7 @@ from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
from collections import Counter
+from lib389.topologies import topology_m2
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
@@ -20,157 +29,49 @@ installation1_prefix = None
WAITFOR_ASYNC_ATTR = "nsDS5ReplicaWaitForAsyncResults"
-class TopologyReplication(object):
- def __init__(self, master1, master2, m1_m2_agmt, m2_m1_agmt):
- master1.open()
- master2.open()
- self.masters = ((master1, m1_m2_agmt),
- (master2, m2_m1_agmt))
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating master 1...
- master1 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master1.allocate(args_master)
- instance_master1 = master1.exists()
- if instance_master1:
- master1.delete()
- master1.create()
- master1.open()
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-
- # Creating master 2...
- master2 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master2.allocate(args_master)
- instance_master2 = master2.exists()
- if instance_master2:
- master2.delete()
- master2.create()
- master2.open()
- master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
- #
- # Create all the agreements
- #
- # Creating agreement from master 1 to master 2
- properties = {RA_NAME: 'meTo_%s:%s' %(master2.host, master2.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
- if not m1_m2_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m1_m2_agmt)
-
- # Creating agreement from master 2 to master 1
- properties = {RA_NAME: 'meTo_%s:%s' %(master1.host, master1.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
- if not m2_m1_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m2_m1_agmt)
-
- # Allow the replicas to get situated with the new agreements...
- time.sleep(5)
-
- #
- # Initialize all the agreements
- #
- master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- master1.waitForReplInit(m1_m2_agmt)
- master2.agreement.init(SUFFIX, HOST_MASTER_1, PORT_MASTER_1)
- master2.waitForReplInit(m2_m1_agmt)
-
- # Check replication is working...
- if master1.testReplication(DEFAULT_SUFFIX, master2):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- log.info("Set Replication Debugging loglevel for the errorlog")
- master1.setLogLevel(LOG_REPLICA)
- master2.setLogLevel(LOG_REPLICA)
-
- logging_attr = 'nsslapd-logging-hr-timestamps-enabled'
- master1.modify_s("cn=config", [(ldap.MOD_REPLACE, logging_attr, "off")])
- master2.modify_s("cn=config", [(ldap.MOD_REPLACE, logging_attr, "off")])
-
- # Delete each instance in the end
- def fin():
- master1.delete()
- master2.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- master1.clearTmpDir(__file__)
-
- return TopologyReplication(master1, master2, m1_m2_agmt, m2_m1_agmt)
-
-
-(a)pytest.fixture(params=[(None, (4, 10)),
- ('2000', (0, 1)),
- ('0', (4, 10)),
- ('-5', (4, 10))])
-def waitfor_async_attr(topology, request):
+(a)pytest.fixture(params=[(None, (4, 11)),
+ ('2000', (0, 2)),
+ ('0', (4, 11)),
+ ('-5', (4, 11))])
+def waitfor_async_attr(topology_m2, request):
"""Sets attribute on all replicas"""
attr_value = request.param[0]
expected_result = request.param[1]
# Run through all masters
- for master in topology.masters:
- agmt = master[1]
+ for num in range(1, 3):
+ master = topology_m2.ms["master{}".format(num)]
+ agmt = topology_m2.ms["master{}_agmts".format(num)].values()[0]
try:
if attr_value:
log.info("Set %s: %s on %s" % (
- WAITFOR_ASYNC_ATTR, attr_value, master[0].serverid))
+ WAITFOR_ASYNC_ATTR, attr_value, master.serverid))
mod = [(ldap.MOD_REPLACE, WAITFOR_ASYNC_ATTR, attr_value)]
else:
log.info("Delete %s from %s" % (
- WAITFOR_ASYNC_ATTR, master[0].serverid))
+ WAITFOR_ASYNC_ATTR, master.serverid))
mod = [(ldap.MOD_DELETE, WAITFOR_ASYNC_ATTR, None)]
- master[0].modify_s(agmt, mod)
+ master.modify_s(agmt, mod)
except ldap.LDAPError as e:
log.error('Failed to set or delete %s attribute: (%s)' % (
- WAITFOR_ASYNC_ATTR, e.message['desc']))
+ WAITFOR_ASYNC_ATTR, e.message['desc']))
return (attr_value, expected_result)
@pytest.fixture
-def entries(topology, request):
+def entries(topology_m2, request):
"""Adds entries to the master1"""
- master1 = topology.masters[0][0]
+ master1 = topology_m2.ms["master1"]
TEST_OU = "test"
test_dn = SUFFIX
test_list = []
log.info("Add 100 nested entries under replicated suffix on %s" % master1.serverid)
- for i in xrange(100):
+ for i in range(100):
test_dn = 'ou=%s%s,%s' % (TEST_OU, i, test_dn)
test_list.insert(0, test_dn)
try:
@@ -180,7 +81,7 @@ def entries(topology, request):
'ou': TEST_OU})))
except ldap.LDAPError as e:
log.error('Failed to add entry (%s): error (%s)' % (test_dn,
- e.message['desc']))
+ e.message['desc']))
assert False
log.info("Delete created entries")
@@ -196,14 +97,15 @@ def entries(topology, request):
log.info("Clear the errors log in the end of the test case")
with open(master1.errlog, 'w') as errlog:
errlog.writelines("")
+
request.addfinalizer(fin)
-def test_not_int_value(topology):
+def test_not_int_value(topology_m2):
"""Tests not integer value"""
- master1 = topology.masters[0][0]
- agmt = topology.masters[0][1]
+ master1 = topology_m2.ms["master1"]
+ agmt = topology_m2.ms["master1_agmts"]["m1_m2"]
log.info("Try to set %s: wv1" % WAITFOR_ASYNC_ATTR)
try:
@@ -213,15 +115,15 @@ def test_not_int_value(topology):
assert e.message['desc'] == 'Invalid syntax'
-def test_multi_value(topology):
+def test_multi_value(topology_m2):
"""Tests multi value"""
- master1 = topology.masters[0][0]
- agmt = topology.masters[0][1]
+ master1 = topology_m2.ms["master1"]
+ agmt = topology_m2.ms["master1_agmts"]["m1_m2"]
log.info("agmt: %s" % agmt)
log.info("Try to set %s: 100 and 101 in the same time (multi value test)" % (
- WAITFOR_ASYNC_ATTR))
+ WAITFOR_ASYNC_ATTR))
try:
mod = [(ldap.MOD_ADD, WAITFOR_ASYNC_ATTR, "100")]
master1.modify_s(agmt, mod)
@@ -231,34 +133,46 @@ def test_multi_value(topology):
assert e.message['desc'] == 'Object class violation'
-def test_value_check(topology, waitfor_async_attr):
+def test_value_check(topology_m2, waitfor_async_attr):
"""Checks that value has been set correctly"""
attr_value = waitfor_async_attr[0]
- for master in topology.masters:
- agmt = master[1]
+ for num in range(1, 3):
+ master = topology_m2.ms["master{}".format(num)]
+ agmt = topology_m2.ms["master{}_agmts".format(num)].values()[0]
- log.info("Check attr %s on %s" % (WAITFOR_ASYNC_ATTR, master[0].serverid))
+ log.info("Check attr %s on %s" % (WAITFOR_ASYNC_ATTR, master.serverid))
try:
if attr_value:
- entry = master[0].search_s(agmt, ldap.SCOPE_BASE, "%s=%s" % (
- WAITFOR_ASYNC_ATTR, attr_value))
+ entry = master.search_s(agmt, ldap.SCOPE_BASE, "%s=%s" % (
+ WAITFOR_ASYNC_ATTR, attr_value))
assert entry
else:
- entry = master[0].search_s(agmt, ldap.SCOPE_BASE, "%s=*" % WAITFOR_ASYNC_ATTR)
+ entry = master.search_s(agmt, ldap.SCOPE_BASE, "%s=*" % WAITFOR_ASYNC_ATTR)
assert not entry
except ldap.LDAPError as e:
log.fatal('Search failed, error: ' + e.message['desc'])
assert False
-def test_behavior_with_value(topology, waitfor_async_attr, entries):
+def test_behavior_with_value(topology_m2, waitfor_async_attr, entries):
"""Tests replication behavior with valid
nsDS5ReplicaWaitForAsyncResults attribute values
"""
- master1 = topology.masters[0][0]
+ master1 = topology_m2.ms["master1"]
+ master2 = topology_m2.ms["master2"]
+
+ log.info("Set Replication Debugging loglevel for the errorlog")
+ master1.setLogLevel(LOG_REPLICA)
+ master2.setLogLevel(LOG_REPLICA)
+
+ master1.modify_s("cn=config", [(ldap.MOD_REPLACE,
+ 'nsslapd-logging-hr-timestamps-enabled', "off")])
+ master2.modify_s("cn=config", [(ldap.MOD_REPLACE,
+ 'nsslapd-logging-hr-timestamps-enabled', "off")])
+
sync_dict = Counter()
min_ap = waitfor_async_attr[1][0]
max_ap = waitfor_async_attr[1][1]
@@ -268,8 +182,9 @@ def test_behavior_with_value(topology, waitfor_async_attr, entries):
log.info("Gather all sync attempts within Counter dict, group by timestamp")
with open(master1.errlog, 'r') as errlog:
errlog_filtered = filter(lambda x: "waitfor_async_results" in x, errlog)
+
+ # Watch only over unsuccessful sync attempts
for line in errlog_filtered:
- # Watch only over unsuccessful sync attempts
if line.split()[3] != line.split()[4]:
timestamp = line.split(']')[0]
sync_dict[timestamp] += 1
diff --git a/dirsrvtests/tests/suites/replsync_plugin/repl_sync_test.py b/dirsrvtests/tests/suites/replsync_plugin/repl_sync_test.py
deleted file mode 100644
index fc3a559..0000000
--- a/dirsrvtests/tests/suites/replsync_plugin/repl_sync_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_repl_sync_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_repl_sync_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/resource_limits/res_limits_test.py b/dirsrvtests/tests/suites/resource_limits/res_limits_test.py
deleted file mode 100644
index e904bbf..0000000
--- a/dirsrvtests/tests/suites/resource_limits/res_limits_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_res_limits_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_res_limits_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/retrocl_plugin/retrocl_test.py b/dirsrvtests/tests/suites/retrocl_plugin/retrocl_test.py
deleted file mode 100644
index d1419fa..0000000
--- a/dirsrvtests/tests/suites/retrocl_plugin/retrocl_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_retrocl_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_retrocl_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/reverpwd_plugin/reverpwd_test.py b/dirsrvtests/tests/suites/reverpwd_plugin/reverpwd_test.py
deleted file mode 100644
index b5b978b..0000000
--- a/dirsrvtests/tests/suites/reverpwd_plugin/reverpwd_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_reverpwd_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_reverpwd_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/roles_plugin/roles_test.py b/dirsrvtests/tests/suites/roles_plugin/roles_test.py
deleted file mode 100644
index 2a5ea23..0000000
--- a/dirsrvtests/tests/suites/roles_plugin/roles_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_roles_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_roles_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/rootdn_plugin/rootdn_plugin_test.py b/dirsrvtests/tests/suites/rootdn_plugin/rootdn_plugin_test.py
index a904644..929f0e9 100644
--- a/dirsrvtests/tests/suites/rootdn_plugin/rootdn_plugin_test.py
+++ b/dirsrvtests/tests/suites/rootdn_plugin/rootdn_plugin_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -18,50 +18,16 @@ from lib389.tools import DirSrvTools
from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
PLUGIN_DN = 'cn=' + PLUGIN_ROOTDN_ACCESS + ',cn=plugins,cn=config'
USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_rootdn_init(topology):
+def test_rootdn_init(topology_st):
'''
Initialize our setup to test the ROot DN Access Control Plugin
@@ -85,7 +51,7 @@ def test_rootdn_init(topology):
ACI = ('(target ="ldap:///cn=config")(targetattr = "*")(version 3.0' +
';acl "all access";allow (all)(userdn="ldap:///anyone");)')
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_ADD, 'aci', ACI)])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_ADD, 'aci', ACI)])
except ldap.LDAPError as e:
log.fatal('test_rootdn_init: Failed to add aci to config: error ' +
e.message['desc'])
@@ -95,7 +61,7 @@ def test_rootdn_init(topology):
# Create a user to modify the config
#
try:
- topology.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(),
+ topology_st.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(),
'uid': 'user1',
'userpassword': PASSWORD})))
except ldap.LDAPError as e:
@@ -107,7 +73,7 @@ def test_rootdn_init(topology):
# Enable dynamic plugins
#
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
except ldap.LDAPError as e:
log.fatal('test_rootdn_init: Failed to set dynamic plugins: error ' + e.message['desc'])
assert False
@@ -115,12 +81,12 @@ def test_rootdn_init(topology):
#
# Enable the plugin (aftewr enabling dynamic plugins)
#
- topology.standalone.plugins.enable(PLUGIN_ROOTDN_ACCESS)
+ topology_st.standalone.plugins.enable(PLUGIN_ROOTDN_ACCESS)
log.info('test_rootdn_init: Initialized root DN test suite.')
-def test_rootdn_access_specific_time(topology):
+def test_rootdn_access_specific_time(topology_st):
'''
Test binding inside and outside of a specific time
'''
@@ -137,7 +103,7 @@ def test_rootdn_access_specific_time(topology):
close_time = '1800'
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-open-time', open_time),
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-open-time', open_time),
(ldap.MOD_ADD, 'rootdn-close-time', close_time)])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_specific_time: Failed to set (blocking) open/close times: error ' +
@@ -148,7 +114,7 @@ def test_rootdn_access_specific_time(topology):
# Bind as Root DN - should fail
#
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
succeeded = True
except ldap.LDAPError as e:
succeeded = False
@@ -161,13 +127,13 @@ def test_rootdn_access_specific_time(topology):
# Set config to allow the entire day
#
try:
- topology.standalone.simple_bind_s(USER1_DN, PASSWORD)
+ topology_st.standalone.simple_bind_s(USER1_DN, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_specific_time: test_rootdn: failed to bind as user1')
assert False
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'),
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'),
(ldap.MOD_REPLACE, 'rootdn-close-time', '2359')])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_specific_time: Failed to set (open) open/close times: error ' +
@@ -175,7 +141,7 @@ def test_rootdn_access_specific_time(topology):
assert False
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_specific_time: Root DN bind failed unexpectedly failed: error ' +
e.message['desc'])
@@ -185,7 +151,7 @@ def test_rootdn_access_specific_time(topology):
# Cleanup - undo the changes we made so the next test has a clean slate
#
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-open-time', None),
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-open-time', None),
(ldap.MOD_DELETE, 'rootdn-close-time', None)])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_specific_time: Failed to delete open and close time: error ' +
@@ -193,7 +159,7 @@ def test_rootdn_access_specific_time(topology):
assert False
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_specific_time: Root DN bind failed unexpectedly failed: error ' +
e.message['desc'])
@@ -202,7 +168,7 @@ def test_rootdn_access_specific_time(topology):
log.info('test_rootdn_access_specific_time: PASSED')
-def test_rootdn_access_day_of_week(topology):
+def test_rootdn_access_day_of_week(topology_st):
'''
Test the days of week feature
'''
@@ -231,7 +197,7 @@ def test_rootdn_access_day_of_week(topology):
# Set the deny days
#
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed',
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed',
deny_days)])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_day_of_week: Failed to set the deny days: error ' +
@@ -242,7 +208,7 @@ def test_rootdn_access_day_of_week(topology):
# Bind as Root DN - should fail
#
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
succeeded = True
except ldap.LDAPError as e:
succeeded = False
@@ -255,13 +221,13 @@ def test_rootdn_access_day_of_week(topology):
# Set the allow days
#
try:
- topology.standalone.simple_bind_s(USER1_DN, PASSWORD)
+ topology_st.standalone.simple_bind_s(USER1_DN, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_day_of_week: : failed to bind as user1')
assert False
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed',
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed',
allow_days)])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_day_of_week: Failed to set the deny days: error ' +
@@ -269,7 +235,7 @@ def test_rootdn_access_day_of_week(topology):
assert False
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_day_of_week: Root DN bind failed unexpectedly failed: error ' +
e.message['desc'])
@@ -279,14 +245,14 @@ def test_rootdn_access_day_of_week(topology):
# Cleanup - undo the changes we made so the next test has a clean slate
#
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-days-allowed', None)])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-days-allowed', None)])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_day_of_week: Failed to set rootDN plugin config: error ' +
e.message['desc'])
assert False
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_day_of_week: Root DN bind failed unexpectedly failed: error ' +
e.message['desc'])
@@ -295,7 +261,7 @@ def test_rootdn_access_day_of_week(topology):
log.info('test_rootdn_access_day_of_week: PASSED')
-def test_rootdn_access_denied_ip(topology):
+def test_rootdn_access_denied_ip(topology_st):
'''
Test denied IP feature - we can just test denying 127.0.01
'''
@@ -303,7 +269,7 @@ def test_rootdn_access_denied_ip(topology):
log.info('Running test_rootdn_access_denied_ip...')
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
'rootdn-deny-ip',
'127.0.0.1'),
(ldap.MOD_ADD,
@@ -318,7 +284,7 @@ def test_rootdn_access_denied_ip(topology):
# Bind as Root DN - should fail
#
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
succeeded = True
except ldap.LDAPError as e:
succeeded = False
@@ -331,20 +297,20 @@ def test_rootdn_access_denied_ip(topology):
# Change the denied IP so root DN succeeds
#
try:
- topology.standalone.simple_bind_s(USER1_DN, PASSWORD)
+ topology_st.standalone.simple_bind_s(USER1_DN, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_denied_ip: : failed to bind as user1')
assert False
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-ip', '255.255.255.255')])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-ip', '255.255.255.255')])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_denied_ip: Failed to set rootDN plugin config: error ' +
e.message['desc'])
assert False
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_denied_ip: Root DN bind failed unexpectedly failed: error ' +
e.message['desc'])
@@ -354,14 +320,14 @@ def test_rootdn_access_denied_ip(topology):
# Cleanup - undo the changes we made so the next test has a clean slate
#
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-deny-ip', None)])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-deny-ip', None)])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_denied_ip: Failed to set rootDN plugin config: error ' +
e.message['desc'])
assert False
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_denied_ip: Root DN bind failed unexpectedly failed: error ' +
e.message['desc'])
@@ -370,7 +336,7 @@ def test_rootdn_access_denied_ip(topology):
log.info('test_rootdn_access_denied_ip: PASSED')
-def test_rootdn_access_denied_host(topology):
+def test_rootdn_access_denied_host(topology_st):
'''
Test denied Host feature - we can just test denying localhost
'''
@@ -379,10 +345,10 @@ def test_rootdn_access_denied_host(topology):
hostname = socket.gethostname()
localhost = DirSrvTools.getLocalhost()
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD,
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD,
'rootdn-deny-host',
hostname)])
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD,
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD,
'rootdn-deny-host',
localhost)])
except ldap.LDAPError as e:
@@ -394,7 +360,7 @@ def test_rootdn_access_denied_host(topology):
# Bind as Root DN - should fail
#
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
succeeded = True
except ldap.LDAPError as e:
succeeded = False
@@ -407,20 +373,20 @@ def test_rootdn_access_denied_host(topology):
# Change the denied host so root DN succeeds
#
try:
- topology.standalone.simple_bind_s(USER1_DN, PASSWORD)
+ topology_st.standalone.simple_bind_s(USER1_DN, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_denied_host: : failed to bind as user1')
assert False
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-host', 'i.dont.exist.com')])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-host', 'i.dont.exist.com')])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_denied_host: Failed to set rootDN plugin config: error ' +
e.message['desc'])
assert False
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_denied_host: Root DN bind failed unexpectedly failed: error ' +
e.message['desc'])
@@ -430,14 +396,14 @@ def test_rootdn_access_denied_host(topology):
# Cleanup - undo the changes we made so the next test has a clean slate
#
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-deny-host', None)])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-deny-host', None)])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_denied_host: Failed to set rootDN plugin config: error ' +
e.message['desc'])
assert False
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_denied_host: Root DN bind failed unexpectedly failed: error ' +
e.message['desc'])
@@ -446,7 +412,7 @@ def test_rootdn_access_denied_host(topology):
log.info('test_rootdn_access_denied_host: PASSED')
-def test_rootdn_access_allowed_ip(topology):
+def test_rootdn_access_allowed_ip(topology_st):
'''
Test allowed ip feature
'''
@@ -457,7 +423,7 @@ def test_rootdn_access_allowed_ip(topology):
# Set allowed host to an unknown host - blocks the Root DN
#
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-ip', '255.255.255.255')])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-ip', '255.255.255.255')])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_allowed_ip: Failed to set allowed host: error ' +
e.message['desc'])
@@ -467,7 +433,7 @@ def test_rootdn_access_allowed_ip(topology):
# Bind as Root DN - should fail
#
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
succeeded = True
except ldap.LDAPError as e:
succeeded = False
@@ -480,13 +446,13 @@ def test_rootdn_access_allowed_ip(topology):
# Allow localhost
#
try:
- topology.standalone.simple_bind_s(USER1_DN, PASSWORD)
+ topology_st.standalone.simple_bind_s(USER1_DN, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_allowed_ip: : failed to bind as user1')
assert False
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-ip', '127.0.0.1'),
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-ip', '127.0.0.1'),
(ldap.MOD_ADD, 'rootdn-allow-ip', '::1')])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_allowed_ip: Failed to set allowed host: error ' +
@@ -494,7 +460,7 @@ def test_rootdn_access_allowed_ip(topology):
assert False
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_allowed_ip: Root DN bind failed unexpectedly failed: error ' +
e.message['desc'])
@@ -504,14 +470,14 @@ def test_rootdn_access_allowed_ip(topology):
# Cleanup - undo everything we did so the next test has a clean slate
#
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-allow-ip', None)])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-allow-ip', None)])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_allowed_ip: Failed to delete(rootdn-allow-ip): error ' +
e.message['desc'])
assert False
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_allowed_ip: Root DN bind failed unexpectedly failed: error ' +
e.message['desc'])
@@ -520,7 +486,7 @@ def test_rootdn_access_allowed_ip(topology):
log.info('test_rootdn_access_allowed_ip: PASSED')
-def test_rootdn_access_allowed_host(topology):
+def test_rootdn_access_allowed_host(topology_st):
'''
Test allowed ip feature
'''
@@ -531,7 +497,7 @@ def test_rootdn_access_allowed_host(topology):
# Set allowed host to an unknown host - blocks the Root DN
#
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-host', 'i.dont.exist.com')])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-host', 'i.dont.exist.com')])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_allowed_host: Failed to set allowed host: error ' +
e.message['desc'])
@@ -541,7 +507,7 @@ def test_rootdn_access_allowed_host(topology):
# Bind as Root DN - should fail
#
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
succeeded = True
except ldap.LDAPError as e:
succeeded = False
@@ -554,7 +520,7 @@ def test_rootdn_access_allowed_host(topology):
# Allow localhost
#
try:
- topology.standalone.simple_bind_s(USER1_DN, PASSWORD)
+ topology_st.standalone.simple_bind_s(USER1_DN, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_allowed_host: : failed to bind as user1')
assert False
@@ -562,10 +528,10 @@ def test_rootdn_access_allowed_host(topology):
hostname = socket.gethostname()
localhost = DirSrvTools.getLocalhost()
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD,
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD,
'rootdn-allow-host',
localhost)])
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD,
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD,
'rootdn-allow-host',
hostname)])
except ldap.LDAPError as e:
@@ -574,7 +540,7 @@ def test_rootdn_access_allowed_host(topology):
assert False
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_allowed_host: Root DN bind failed unexpectedly failed: error ' +
e.message['desc'])
@@ -584,14 +550,14 @@ def test_rootdn_access_allowed_host(topology):
# Cleanup - undo everything we did so the next test has a clean slate
#
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-allow-host', None)])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-allow-host', None)])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_allowed_host: Failed to delete(rootdn-allow-host): error ' +
e.message['desc'])
assert False
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_allowed_host: Root DN bind failed unexpectedly failed: error ' +
e.message['desc'])
@@ -600,7 +566,7 @@ def test_rootdn_access_allowed_host(topology):
log.info('test_rootdn_access_allowed_host: PASSED')
-def test_rootdn_config_validate(topology):
+def test_rootdn_config_validate(topology_st):
'''
Test configuration validation
@@ -616,14 +582,14 @@ def test_rootdn_config_validate(topology):
# Test rootdn-open-time
#
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000')])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to just add "rootdn-open-time" ')
assert False
except ldap.LDAPError:
pass
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-open-time', '0000'),
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-open-time', '0000'),
(ldap.MOD_ADD, 'rootdn-open-time', '0001')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add multiple "rootdn-open-time"')
assert False
@@ -631,7 +597,7 @@ def test_rootdn_config_validate(topology):
pass
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '-1'),
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '-1'),
(ldap.MOD_REPLACE, 'rootdn-close-time', '0000')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-open-time: -1"')
assert False
@@ -639,7 +605,7 @@ def test_rootdn_config_validate(topology):
pass
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '2400'),
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '2400'),
(ldap.MOD_REPLACE, 'rootdn-close-time', '0000')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-open-time: 2400"')
assert False
@@ -647,7 +613,7 @@ def test_rootdn_config_validate(topology):
pass
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', 'aaaaa'),
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', 'aaaaa'),
(ldap.MOD_REPLACE, 'rootdn-close-time', '0000')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-open-time: aaaaa"')
assert False
@@ -658,14 +624,14 @@ def test_rootdn_config_validate(topology):
# Test rootdn-close-time
#
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-close-time', '0000')])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-close-time', '0000')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add just "rootdn-close-time"')
assert False
except ldap.LDAPError:
pass
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-close-time', '0000'),
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-close-time', '0000'),
(ldap.MOD_ADD, 'rootdn-close-time', '0001')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add multiple "rootdn-open-time"')
assert False
@@ -673,7 +639,7 @@ def test_rootdn_config_validate(topology):
pass
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'),
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'),
(ldap.MOD_REPLACE, 'rootdn-close-time', '-1')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-close-time: -1"')
assert False
@@ -681,7 +647,7 @@ def test_rootdn_config_validate(topology):
pass
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'),
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'),
(ldap.MOD_REPLACE, 'rootdn-close-time', '2400')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-close-time: 2400"')
assert False
@@ -689,7 +655,7 @@ def test_rootdn_config_validate(topology):
pass
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'),
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'),
(ldap.MOD_REPLACE, 'rootdn-close-time', 'aaaaa')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-close-time: aaaaa"')
assert False
@@ -700,7 +666,7 @@ def test_rootdn_config_validate(topology):
# Test days allowed
#
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-days-allowed', 'Mon'),
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-days-allowed', 'Mon'),
(ldap.MOD_ADD, 'rootdn-days-allowed', 'Tue')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add two "rootdn-days-allowed"')
assert False
@@ -708,28 +674,28 @@ def test_rootdn_config_validate(topology):
pass
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Mon1')])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Mon1')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-days-allowed: Mon1"')
assert False
except ldap.LDAPError:
pass
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Tue, Mon1')])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Tue, Mon1')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-days-allowed: Tue, Mon1"')
assert False
except ldap.LDAPError:
pass
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'm111m')])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'm111m')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-days-allowed: 111"')
assert False
except ldap.LDAPError:
pass
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Gur')])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Gur')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-days-allowed: Gur"')
assert False
except ldap.LDAPError:
@@ -739,7 +705,7 @@ def test_rootdn_config_validate(topology):
# Test allow ips
#
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-ip', '12.12.Z.12')])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-ip', '12.12.Z.12')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-allow-ip: 12.12.Z.12"')
assert False
except ldap.LDAPError:
@@ -749,7 +715,7 @@ def test_rootdn_config_validate(topology):
# Test deny ips
#
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-ip', '12.12.Z.12')])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-ip', '12.12.Z.12')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-deny-ip: 12.12.Z.12"')
assert False
except ldap.LDAPError:
@@ -759,7 +725,7 @@ def test_rootdn_config_validate(topology):
# Test allow hosts
#
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-host', 'host._.com')])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-host', 'host._.com')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-allow-host: host._.com"')
assert False
except ldap.LDAPError:
@@ -769,7 +735,7 @@ def test_rootdn_config_validate(topology):
# Test deny hosts
#
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-host', 'host.####.com')])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-host', 'host.####.com')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-deny-host: host.####.com"')
assert False
except ldap.LDAPError:
diff --git a/dirsrvtests/tests/suites/sasl/sasl_test.py b/dirsrvtests/tests/suites/sasl/sasl_test.py
deleted file mode 100644
index 589e960..0000000
--- a/dirsrvtests/tests/suites/sasl/sasl_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_sasl_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_sasl_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/schema/test_schema.py b/dirsrvtests/tests/suites/schema/test_schema.py
index 6d790d8..ed13d91 100644
--- a/dirsrvtests/tests/suites/schema/test_schema.py
+++ b/dirsrvtests/tests/suites/schema/test_schema.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -24,7 +24,7 @@ from lib389 import DirSrv, Entry, tools
from lib389.tools import DirSrvTools
from lib389._constants import *
from lib389.properties import *
-
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
@@ -34,40 +34,6 @@ occlass = ldap.schema.models.ObjectClass
syntax_len_supported = False
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to create a DirSrv instance for the 'module'.
- '''
- schemainst = DirSrv(verbose=False)
-
- # Args for the master instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- schemainst.allocate(args_instance)
-
- # Remove all the instance
- if schemainst.exists():
- schemainst.delete()
-
- # Create the instance
- schemainst.create()
- schemainst.open()
-
- def fin():
- schemainst.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(schemainst)
-
-
def ochasattr(subschema, oc, mustormay, attr, key):
"""See if the oc and any of its parents and ancestors have the
given attr"""
@@ -156,13 +122,13 @@ def atgetdiffs(ldschema, at1, at2):
return ret
-def test_schema_comparewithfiles(topology):
+def test_schema_comparewithfiles(topology_st):
'''Compare the schema from ldap cn=schema with the schema files'''
log.info('Running test_schema_comparewithfiles...')
retval = True
- schemainst = topology.standalone
+ schemainst = topology_st.standalone
ldschema = schemainst.schema.get_subschema()
assert ldschema
for fn in schemainst.schema.list_files():
diff --git a/dirsrvtests/tests/suites/schema_reload_plugin/schema_reload_test.py b/dirsrvtests/tests/suites/schema_reload_plugin/schema_reload_test.py
deleted file mode 100644
index 878c7f9..0000000
--- a/dirsrvtests/tests/suites/schema_reload_plugin/schema_reload_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_schema_reload_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_schema_reload_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/snmp/snmp_test.py b/dirsrvtests/tests/suites/snmp/snmp_test.py
deleted file mode 100644
index a30b626..0000000
--- a/dirsrvtests/tests/suites/snmp/snmp_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_snmp_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_snmp_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/ssl/ssl_test.py b/dirsrvtests/tests/suites/ssl/ssl_test.py
deleted file mode 100644
index 42738de..0000000
--- a/dirsrvtests/tests/suites/ssl/ssl_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_ssl_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_ssl_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/syntax_plugin/syntax_test.py b/dirsrvtests/tests/suites/syntax_plugin/syntax_test.py
deleted file mode 100644
index 91d2e55..0000000
--- a/dirsrvtests/tests/suites/syntax_plugin/syntax_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_syntax_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_syntax_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/usn_plugin/usn_test.py b/dirsrvtests/tests/suites/usn_plugin/usn_test.py
deleted file mode 100644
index 2e81672..0000000
--- a/dirsrvtests/tests/suites/usn_plugin/usn_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_usn_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_usn_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/views_plugin/views_test.py b/dirsrvtests/tests/suites/views_plugin/views_test.py
deleted file mode 100644
index 3168a0f..0000000
--- a/dirsrvtests/tests/suites/views_plugin/views_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_views_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_views_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/vlv/vlv_test.py b/dirsrvtests/tests/suites/vlv/vlv_test.py
deleted file mode 100644
index 57945d7..0000000
--- a/dirsrvtests/tests/suites/vlv/vlv_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_vlv_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_vlv_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/whoami_plugin/whoami_test.py b/dirsrvtests/tests/suites/whoami_plugin/whoami_test.py
deleted file mode 100644
index e528cc8..0000000
--- a/dirsrvtests/tests/suites/whoami_plugin/whoami_test.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_whoami_init(topology):
- '''
- Write any test suite initialization here(if needed)
- '''
-
- return
-
-
-def test_whoami_(topology):
- '''
- Write a single test here...
- '''
-
- return
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/tickets/finalizer.py b/dirsrvtests/tests/tickets/finalizer.py
index f93ea5c..690c76e 100644
--- a/dirsrvtests/tests/tickets/finalizer.py
+++ b/dirsrvtests/tests/tickets/finalizer.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47313_test.py b/dirsrvtests/tests/tickets/ticket47313_test.py
index 703ea42..5064126 100644
--- a/dirsrvtests/tests/tickets/ticket47313_test.py
+++ b/dirsrvtests/tests/tickets/ticket47313_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47384_test.py b/dirsrvtests/tests/tickets/ticket47384_test.py
index 17dfbaf..3229751 100644
--- a/dirsrvtests/tests/tickets/ticket47384_test.py
+++ b/dirsrvtests/tests/tickets/ticket47384_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47431_test.py b/dirsrvtests/tests/tickets/ticket47431_test.py
index 27a52b9..3453776 100644
--- a/dirsrvtests/tests/tickets/ticket47431_test.py
+++ b/dirsrvtests/tests/tickets/ticket47431_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47462_test.py b/dirsrvtests/tests/tickets/ticket47462_test.py
index 50b867e..66a2385 100644
--- a/dirsrvtests/tests/tickets/ticket47462_test.py
+++ b/dirsrvtests/tests/tickets/ticket47462_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47490_test.py b/dirsrvtests/tests/tickets/ticket47490_test.py
index a4c2491..0da542e 100644
--- a/dirsrvtests/tests/tickets/ticket47490_test.py
+++ b/dirsrvtests/tests/tickets/ticket47490_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47553_test.py b/dirsrvtests/tests/tickets/ticket47553_test.py
index 84d462d..01cd08c 100644
--- a/dirsrvtests/tests/tickets/ticket47553_test.py
+++ b/dirsrvtests/tests/tickets/ticket47553_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47560_test.py b/dirsrvtests/tests/tickets/ticket47560_test.py
index c8424d6..f52926f 100644
--- a/dirsrvtests/tests/tickets/ticket47560_test.py
+++ b/dirsrvtests/tests/tickets/ticket47560_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47573_test.py b/dirsrvtests/tests/tickets/ticket47573_test.py
index ee47f46..e7e9641 100644
--- a/dirsrvtests/tests/tickets/ticket47573_test.py
+++ b/dirsrvtests/tests/tickets/ticket47573_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47619_test.py b/dirsrvtests/tests/tickets/ticket47619_test.py
index 05c1b84..988ea04 100644
--- a/dirsrvtests/tests/tickets/ticket47619_test.py
+++ b/dirsrvtests/tests/tickets/ticket47619_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47640_test.py b/dirsrvtests/tests/tickets/ticket47640_test.py
index 526ac22..09ed691 100644
--- a/dirsrvtests/tests/tickets/ticket47640_test.py
+++ b/dirsrvtests/tests/tickets/ticket47640_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47653MMR_test.py b/dirsrvtests/tests/tickets/ticket47653MMR_test.py
index 107edac..5cd7118 100644
--- a/dirsrvtests/tests/tickets/ticket47653MMR_test.py
+++ b/dirsrvtests/tests/tickets/ticket47653MMR_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47653_test.py b/dirsrvtests/tests/tickets/ticket47653_test.py
index 8f42ade..0eda94b 100644
--- a/dirsrvtests/tests/tickets/ticket47653_test.py
+++ b/dirsrvtests/tests/tickets/ticket47653_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47669_test.py b/dirsrvtests/tests/tickets/ticket47669_test.py
index 021ec15..e26fa05 100644
--- a/dirsrvtests/tests/tickets/ticket47669_test.py
+++ b/dirsrvtests/tests/tickets/ticket47669_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47676_test.py b/dirsrvtests/tests/tickets/ticket47676_test.py
index cdf4096..3ba29c5 100644
--- a/dirsrvtests/tests/tickets/ticket47676_test.py
+++ b/dirsrvtests/tests/tickets/ticket47676_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47714_test.py b/dirsrvtests/tests/tickets/ticket47714_test.py
index 83c5bef..08ca98a 100644
--- a/dirsrvtests/tests/tickets/ticket47714_test.py
+++ b/dirsrvtests/tests/tickets/ticket47714_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47721_test.py b/dirsrvtests/tests/tickets/ticket47721_test.py
index 033fe70..b1606bb 100644
--- a/dirsrvtests/tests/tickets/ticket47721_test.py
+++ b/dirsrvtests/tests/tickets/ticket47721_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47781_test.py b/dirsrvtests/tests/tickets/ticket47781_test.py
index fe65c89..40de5bb 100644
--- a/dirsrvtests/tests/tickets/ticket47781_test.py
+++ b/dirsrvtests/tests/tickets/ticket47781_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47787_test.py b/dirsrvtests/tests/tickets/ticket47787_test.py
index 443e223..53e5b00 100644
--- a/dirsrvtests/tests/tickets/ticket47787_test.py
+++ b/dirsrvtests/tests/tickets/ticket47787_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47808_test.py b/dirsrvtests/tests/tickets/ticket47808_test.py
index 862333a..a92059e 100644
--- a/dirsrvtests/tests/tickets/ticket47808_test.py
+++ b/dirsrvtests/tests/tickets/ticket47808_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47815_test.py b/dirsrvtests/tests/tickets/ticket47815_test.py
index 04201f1..b00f5e8 100644
--- a/dirsrvtests/tests/tickets/ticket47815_test.py
+++ b/dirsrvtests/tests/tickets/ticket47815_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47819_test.py b/dirsrvtests/tests/tickets/ticket47819_test.py
index 95c2ba5..2b751c8 100644
--- a/dirsrvtests/tests/tickets/ticket47819_test.py
+++ b/dirsrvtests/tests/tickets/ticket47819_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47823_test.py b/dirsrvtests/tests/tickets/ticket47823_test.py
index 36bd19a..71b7356 100644
--- a/dirsrvtests/tests/tickets/ticket47823_test.py
+++ b/dirsrvtests/tests/tickets/ticket47823_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47828_test.py b/dirsrvtests/tests/tickets/ticket47828_test.py
index 4f48440..e3b8306 100644
--- a/dirsrvtests/tests/tickets/ticket47828_test.py
+++ b/dirsrvtests/tests/tickets/ticket47828_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47829_test.py b/dirsrvtests/tests/tickets/ticket47829_test.py
index 94a3a2e..0e95adc 100644
--- a/dirsrvtests/tests/tickets/ticket47829_test.py
+++ b/dirsrvtests/tests/tickets/ticket47829_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47833_test.py b/dirsrvtests/tests/tickets/ticket47833_test.py
index 7140f01..419f6eb 100644
--- a/dirsrvtests/tests/tickets/ticket47833_test.py
+++ b/dirsrvtests/tests/tickets/ticket47833_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47838_test.py b/dirsrvtests/tests/tickets/ticket47838_test.py
index 6a3adb7..9023878 100644
--- a/dirsrvtests/tests/tickets/ticket47838_test.py
+++ b/dirsrvtests/tests/tickets/ticket47838_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47869MMR_test.py b/dirsrvtests/tests/tickets/ticket47869MMR_test.py
index 95627d2..a52db5e 100644
--- a/dirsrvtests/tests/tickets/ticket47869MMR_test.py
+++ b/dirsrvtests/tests/tickets/ticket47869MMR_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47871_test.py b/dirsrvtests/tests/tickets/ticket47871_test.py
index 6d19e8e..417a87e 100644
--- a/dirsrvtests/tests/tickets/ticket47871_test.py
+++ b/dirsrvtests/tests/tickets/ticket47871_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47900_test.py b/dirsrvtests/tests/tickets/ticket47900_test.py
index 404c91e..1265eea 100644
--- a/dirsrvtests/tests/tickets/ticket47900_test.py
+++ b/dirsrvtests/tests/tickets/ticket47900_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47910_test.py b/dirsrvtests/tests/tickets/ticket47910_test.py
index 0455722..b2986ea 100644
--- a/dirsrvtests/tests/tickets/ticket47910_test.py
+++ b/dirsrvtests/tests/tickets/ticket47910_test.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47920_test.py b/dirsrvtests/tests/tickets/ticket47920_test.py
index 078cae1..301d6a6 100644
--- a/dirsrvtests/tests/tickets/ticket47920_test.py
+++ b/dirsrvtests/tests/tickets/ticket47920_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47921_test.py b/dirsrvtests/tests/tickets/ticket47921_test.py
index 537a59b..e46e996 100644
--- a/dirsrvtests/tests/tickets/ticket47921_test.py
+++ b/dirsrvtests/tests/tickets/ticket47921_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47927_test.py b/dirsrvtests/tests/tickets/ticket47927_test.py
index c425ace..7e19ae0 100644
--- a/dirsrvtests/tests/tickets/ticket47927_test.py
+++ b/dirsrvtests/tests/tickets/ticket47927_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47937_test.py b/dirsrvtests/tests/tickets/ticket47937_test.py
index 5a0f354..71a6ef1 100644
--- a/dirsrvtests/tests/tickets/ticket47937_test.py
+++ b/dirsrvtests/tests/tickets/ticket47937_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47950_test.py b/dirsrvtests/tests/tickets/ticket47950_test.py
index 7dc8c60..fc3975b 100644
--- a/dirsrvtests/tests/tickets/ticket47950_test.py
+++ b/dirsrvtests/tests/tickets/ticket47950_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47953_test.py b/dirsrvtests/tests/tickets/ticket47953_test.py
index bebd76e..69c57e2 100644
--- a/dirsrvtests/tests/tickets/ticket47953_test.py
+++ b/dirsrvtests/tests/tickets/ticket47953_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47963_test.py b/dirsrvtests/tests/tickets/ticket47963_test.py
index 6ca74fd..0200198 100644
--- a/dirsrvtests/tests/tickets/ticket47963_test.py
+++ b/dirsrvtests/tests/tickets/ticket47963_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47966_test.py b/dirsrvtests/tests/tickets/ticket47966_test.py
index 75550a6..4748c12 100644
--- a/dirsrvtests/tests/tickets/ticket47966_test.py
+++ b/dirsrvtests/tests/tickets/ticket47966_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47970_test.py b/dirsrvtests/tests/tickets/ticket47970_test.py
index 6d224ba..5eb426d 100644
--- a/dirsrvtests/tests/tickets/ticket47970_test.py
+++ b/dirsrvtests/tests/tickets/ticket47970_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47973_test.py b/dirsrvtests/tests/tickets/ticket47973_test.py
index c7048c0..9116246 100644
--- a/dirsrvtests/tests/tickets/ticket47973_test.py
+++ b/dirsrvtests/tests/tickets/ticket47973_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47980_test.py b/dirsrvtests/tests/tickets/ticket47980_test.py
index a4a49b7..eefc103 100644
--- a/dirsrvtests/tests/tickets/ticket47980_test.py
+++ b/dirsrvtests/tests/tickets/ticket47980_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47981_test.py b/dirsrvtests/tests/tickets/ticket47981_test.py
index 3920cfd..07761fc 100644
--- a/dirsrvtests/tests/tickets/ticket47981_test.py
+++ b/dirsrvtests/tests/tickets/ticket47981_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket47988_test.py b/dirsrvtests/tests/tickets/ticket47988_test.py
index c9835c3..0e975e4 100644
--- a/dirsrvtests/tests/tickets/ticket47988_test.py
+++ b/dirsrvtests/tests/tickets/ticket47988_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket48005_test.py b/dirsrvtests/tests/tickets/ticket48005_test.py
index 39d9334..7463a84 100644
--- a/dirsrvtests/tests/tickets/ticket48005_test.py
+++ b/dirsrvtests/tests/tickets/ticket48005_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket48026_test.py b/dirsrvtests/tests/tickets/ticket48026_test.py
index 730e94b..7eae5c8 100644
--- a/dirsrvtests/tests/tickets/ticket48026_test.py
+++ b/dirsrvtests/tests/tickets/ticket48026_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket48109_test.py b/dirsrvtests/tests/tickets/ticket48109_test.py
index 1d7a334..85faefe 100644
--- a/dirsrvtests/tests/tickets/ticket48109_test.py
+++ b/dirsrvtests/tests/tickets/ticket48109_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket48170_test.py b/dirsrvtests/tests/tickets/ticket48170_test.py
index 3ffa964..7cbea4a 100644
--- a/dirsrvtests/tests/tickets/ticket48170_test.py
+++ b/dirsrvtests/tests/tickets/ticket48170_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket48226_test.py b/dirsrvtests/tests/tickets/ticket48226_test.py
index 90f4a21..9812d74 100644
--- a/dirsrvtests/tests/tickets/ticket48226_test.py
+++ b/dirsrvtests/tests/tickets/ticket48226_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket48228_test.py b/dirsrvtests/tests/tickets/ticket48228_test.py
index 851d776..8559bcd 100644
--- a/dirsrvtests/tests/tickets/ticket48228_test.py
+++ b/dirsrvtests/tests/tickets/ticket48228_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket48252_test.py b/dirsrvtests/tests/tickets/ticket48252_test.py
index e4e0cbb..37f2635 100644
--- a/dirsrvtests/tests/tickets/ticket48252_test.py
+++ b/dirsrvtests/tests/tickets/ticket48252_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket48265_test.py b/dirsrvtests/tests/tickets/ticket48265_test.py
index d255f33..8195ea9 100644
--- a/dirsrvtests/tests/tickets/ticket48265_test.py
+++ b/dirsrvtests/tests/tickets/ticket48265_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket48294_test.py b/dirsrvtests/tests/tickets/ticket48294_test.py
index 567d5b8..265533f 100644
--- a/dirsrvtests/tests/tickets/ticket48294_test.py
+++ b/dirsrvtests/tests/tickets/ticket48294_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket48295_test.py b/dirsrvtests/tests/tickets/ticket48295_test.py
index 96ebaf2..42e2d38 100644
--- a/dirsrvtests/tests/tickets/ticket48295_test.py
+++ b/dirsrvtests/tests/tickets/ticket48295_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket48366_test.py b/dirsrvtests/tests/tickets/ticket48366_test.py
index 578d5e9..46ee8ba 100644
--- a/dirsrvtests/tests/tickets/ticket48366_test.py
+++ b/dirsrvtests/tests/tickets/ticket48366_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket48759_test.py b/dirsrvtests/tests/tickets/ticket48759_test.py
index 3d8b026..d007728 100644
--- a/dirsrvtests/tests/tickets/ticket48759_test.py
+++ b/dirsrvtests/tests/tickets/ticket48759_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket48891_test.py b/dirsrvtests/tests/tickets/ticket48891_test.py
index dadd9d5..5c6e57d 100644
--- a/dirsrvtests/tests/tickets/ticket48891_test.py
+++ b/dirsrvtests/tests/tickets/ticket48891_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket48906_test.py b/dirsrvtests/tests/tickets/ticket48906_test.py
index b462395..393743b 100644
--- a/dirsrvtests/tests/tickets/ticket48906_test.py
+++ b/dirsrvtests/tests/tickets/ticket48906_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
diff --git a/dirsrvtests/tests/tickets/ticket548_test.py b/dirsrvtests/tests/tickets/ticket548_test.py
index 5171dbb..257213a 100644
--- a/dirsrvtests/tests/tickets/ticket548_test.py
+++ b/dirsrvtests/tests/tickets/ticket548_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
6 years, 9 months
6 commits - configure.ac include/base include/public ldap/include ldap/servers ldap/systools lib/base lib/ldaputil Makefile.am
by William Brown
Makefile.am | 10 ++++-----
configure.ac | 15 ++++++++++++-
include/base/systems.h | 26 +++++++++++++++++++++++
include/public/base/systems.h | 9 ++++++++
ldap/include/portable.h | 8 +++----
ldap/servers/slapd/log.c | 2 -
ldap/servers/slapd/tools/ldclt/ldclt.h | 5 ++++
ldap/systools/idsktune.c | 4 +++
lib/base/crit.cpp | 36 ++++++++++++++++-----------------
lib/base/dnsdmain.cpp | 2 -
lib/base/file.cpp | 2 -
lib/ldaputil/cert.c | 4 ++-
lib/ldaputil/certmap.c | 4 ++-
lib/ldaputil/dbconf.c | 4 ++-
lib/ldaputil/encode.c | 4 ++-
15 files changed, 100 insertions(+), 35 deletions(-)
New commits:
commit 251424ef23522b716269059083b3377cbbd42d72
Author: William Brown <wibrown(a)redhat.com>
Date: Thu May 12 16:14:44 2016 +1000
Ticket 48797 - Add freebsd support to ns-slapd: Configure and makefile.
Description: Fix the configure file and make file to support FreeBSD specifics
https://fedorahosted.org/389/ticket/48797
Author: wibrown
Reviewed by: lslebodn (Thanks!)
diff --git a/Makefile.am b/Makefile.am
index 546f89e..1d76aeb 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -61,7 +61,7 @@ AM_LDFLAGS = -lpthread
else
#AM_LDFLAGS = -Wl,-z,defs
AM_LDFLAGS = $(ASAN_DEFINES)
-endif
+endif #end hpux
#------------------------
# Linker Flags
@@ -89,7 +89,7 @@ PCRE_LINK = @pcre_lib@ -lpcre
NETSNMP_LINK = @netsnmp_lib@ @netsnmp_link@
PAM_LINK = -lpam
KERBEROS_LINK = $(kerberos_lib)
-DLOPEN_LINK = -ldl
+
SYSTEMD_LINK = @systemd_lib@
@@ -1751,7 +1751,7 @@ ns_slapd_SOURCES = ldap/servers/slapd/abandon.c \
ns_slapd_CPPFLAGS = $(AM_CPPFLAGS) @sasl_inc@ @openldap_inc@ @ldapsdk_inc@ @nss_inc@ \
@nspr_inc@ @svrcore_inc@ @systemd_inc@
-ns_slapd_LDADD = libslapd.la libldaputil.a $(LDAPSDK_LINK) $(NSS_LINK) $(DLOPEN_LINK) \
+ns_slapd_LDADD = libslapd.la libldaputil.a $(LDAPSDK_LINK) $(NSS_LINK) $(LIBADD_DL) \
$(NSPR_LINK) $(SASL_LINK) $(SVRCORE_LINK) $(LIBNSL) $(LIBSOCKET) $(THREADLIB) $(SYSTEMD_LINK)
# We need to link ns-slapd with the C++ compiler on HP-UX since we load
# some C++ shared libraries (such as icu).
@@ -1946,7 +1946,7 @@ endif
if SYSTEMD
$(fixupcmd) $^ | sed -e 's/@preamble@/# This file is in systemd EnvironmentFile format - see man systemd.exec/' > $@
else
- $(fixupcmd) $^ | sed -n -e 's/@preamble@//' -e '/^#/{p;d}' -e '/^$$/{p;d}' -e 's/^\([^=]*\)\(=.*\)$$/\1\2 ; export \1/ ; p' > $@
+ $(fixupcmd) $^ | sed -n -e 's/@preamble@//' -e '/^#/{p;d;}' -e '/^$$/{p;d;}' -e 's/^\([^=]*\)\(=.*\)$$/\1\2 ; export \1/ ; p' > $@
$(fixupcmd) $(srcdir)/ldap/admin/src/initconfig.in >> $@
endif
@@ -1955,7 +1955,7 @@ endif
if SYSTEMD
$(fixupcmd) $^ | sed -e 's/@preamble@/# This file is in systemd EnvironmentFile format - see man systemd.exec/' > $@
else
- $(fixupcmd) $^ | sed -n -e 's/@preamble@//' -e '/^#/{p;d}' -e '/^$$/{p;d}' -e 's/^\([^=]*\)\(=.*\)$$/\1\2 ; export \1/ ; p' > $@
+ $(fixupcmd) $^ | sed -n -e 's/@preamble@//' -e '/^#/{p;d;}' -e '/^$$/{p;d;}' -e 's/^\([^=]*\)\(=.*\)$$/\1\2 ; export \1/ ; p' > $@
endif
%/$(PACKAGE_NAME).pc: %/dirsrv.pc.in
diff --git a/configure.ac b/configure.ac
index 327452f..57b5b29 100644
--- a/configure.ac
+++ b/configure.ac
@@ -39,7 +39,7 @@ AC_PROG_LIBTOOL
AC_HEADER_DIRENT
AC_HEADER_STDC
AC_HEADER_SYS_WAIT
-AC_CHECK_HEADERS([arpa/inet.h fcntl.h malloc.h netdb.h netinet/in.h stdlib.h string.h strings.h sys/file.h sys/socket.h sys/time.h unistd.h inttypes.h mntent.h])
+AC_CHECK_HEADERS([arpa/inet.h fcntl.h malloc.h netdb.h netinet/in.h stdlib.h string.h strings.h sys/file.h sys/socket.h sys/time.h unistd.h inttypes.h mntent.h sys/sysinfo.h])
# Checks for typedefs, structures, and compiler characteristics.
AC_HEADER_STAT
@@ -68,6 +68,9 @@ AC_FUNC_STRFTIME
AC_FUNC_VPRINTF
AC_CHECK_FUNCS([clock_gettime endpwent ftruncate getcwd gethostbyname inet_ntoa localtime_r memmove memset mkdir munmap putenv rmdir setrlimit socket strcasecmp strchr strcspn strdup strerror strncasecmp strpbrk strrchr strstr strtol tzset])
+# This will detect if we need to add the LIBADD_DL value for us.
+LT_LIB_DLLOAD
+
AC_MSG_CHECKING(for --enable-debug)
AC_ARG_ENABLE(debug, AS_HELP_STRING([--enable-debug], [Enable debug features (default: no)]),
[
@@ -550,6 +553,15 @@ case $host in
AC_SUBST([LIBCRYPT], [$LIBCRYPT])
AC_DEFINE([USE_POSIX_RWLOCKS], [1], [POSIX rwlocks])
;;
+ *-*-freebsd*)
+ AC_DEFINE([FREEBSD], [1], [FreeBSD])
+ platform="freebsd"
+ initdir='$(sysconfdir)/rc.d'
+ THREADLIB=-lthr
+ AC_SUBST([THREADLIB], [$THREADLIB])
+ AC_DEFINE([USE_POSIX_RWLOCKS], [1], [POSIX rwlocks])
+ LIBDL=
+ ;;
ia64-hp-hpux*)
AC_DEFINE([hpux], [1], [HP-UX])
AC_DEFINE([HPUX], [1], [HP-UX])
@@ -654,6 +666,7 @@ AC_SUBST(initconfigdir)
AM_CONDITIONAL([HPUX],[test "$platform" = "hpux"])
AM_CONDITIONAL([SOLARIS],[test "$platform" = "solaris"])
+AM_CONDITIONAL([FREEBSD],[test "$platform" = "freebsd"])
# Check for library dependencies
m4_include(m4/nspr.m4)
commit 97897530358104868bac4c04d79fbd2407a49390
Author: William Brown <wibrown(a)redhat.com>
Date: Thu May 12 16:49:33 2016 +1000
Ticket 48797 - Add freebsd support to ns-slapd: Add freebsd support for ldaputil
Description: Add freebsd support to the various components of the ldaputil
https://fedorahosted.org/389/ticket/48797
Author: wibrown
Reviewed by: lslebodn (Thanks!)
diff --git a/lib/ldaputil/cert.c b/lib/ldaputil/cert.c
index dfe77c6..aca4f98 100644
--- a/lib/ldaputil/cert.c
+++ b/lib/ldaputil/cert.c
@@ -13,7 +13,9 @@
#include <string.h>
-#include <malloc.h>
+/* This was malloc.h - but it's moved to stdlib.h on most platforms, and FBSD is strict */
+/* Make it stdlib.h, and revert to malloc.h with ifdefs if we have issues here. WB 2016 */
+#include <stdlib.h>
/* removed for ns security integration
#include <sec.h>
diff --git a/lib/ldaputil/certmap.c b/lib/ldaputil/certmap.c
index 8525f51..5f28b8c 100644
--- a/lib/ldaputil/certmap.c
+++ b/lib/ldaputil/certmap.c
@@ -15,7 +15,9 @@
#include <stdio.h>
#include <string.h>
#include <ctype.h>
-#include <malloc.h>
+/* This was malloc.h - but it's moved to stdlib.h on most platforms, and FBSD is strict */
+/* Make it stdlib.h, and revert to malloc.h with ifdefs if we have issues here. WB 2016 */
+#include <stdlib.h>
/* removed for ns security integration
#include <sec.h>
diff --git a/lib/ldaputil/dbconf.c b/lib/ldaputil/dbconf.c
index 048bc56..d5c315b 100644
--- a/lib/ldaputil/dbconf.c
+++ b/lib/ldaputil/dbconf.c
@@ -13,7 +13,9 @@
#include <string.h>
-#include <malloc.h>
+/* This was malloc.h - but it's moved to stdlib.h on most platforms, and FBSD is strict */
+/* Make it stdlib.h, and revert to malloc.h with ifdefs if we have issues here. WB 2016 */
+#include <stdlib.h>
#include <ctype.h>
#include <ldaputil/errors.h>
diff --git a/lib/ldaputil/encode.c b/lib/ldaputil/encode.c
index 267d795..eea3edd 100644
--- a/lib/ldaputil/encode.c
+++ b/lib/ldaputil/encode.c
@@ -12,7 +12,9 @@
#endif
-#include <malloc.h>
+/* This was malloc.h - but it's moved to stdlib.h on most platforms, and FBSD is strict */
+/* Make it stdlib.h, and revert to malloc.h with ifdefs if we have issues here. WB 2016 */
+#include <stdlib.h>
#include <string.h>
#include <ldaputil/certmap.h>
#include <ldaputil/encode.h>
commit b7cda0d1fda6a06d77fbc35dcffe486f219cf3a3
Author: William Brown <wibrown(a)redhat.com>
Date: Thu May 12 16:48:45 2016 +1000
Ticket 48797 - Add freebsd support to ns-slapd: Add support for dsktune
Description: Add freebsd support to dsktune
https://fedorahosted.org/389/ticket/48797
Author: wibrown
Reviewed by: lslebodn (Thanks!)
diff --git a/ldap/systools/idsktune.c b/ldap/systools/idsktune.c
index 08b7f12..c171c95 100644
--- a/ldap/systools/idsktune.c
+++ b/ldap/systools/idsktune.c
@@ -25,6 +25,10 @@ static char *build_date = "14-JULY-2016";
#define IDDS_SYSV_INCLUDE 1
#endif
+#if defined(__FreeBSD__)
+#define IDDS_BSD_INCLUDE 1
+#endif
+
#include <sys/types.h>
#if !defined(__VMS)
commit 2d53f14e56a8864d417a66bca99c700c5e6b6d41
Author: William Brown <wibrown(a)redhat.com>
Date: Thu May 12 16:45:33 2016 +1000
Ticket 48797 - Add freebsd support to ns-slapd: Add support for cpp in Fbsd
Description: Change the name of some variables to prevent collision in FreeBSD
https://fedorahosted.org/389/ticket/48797
Author: wibrown
Reviewed by: lslebodn (Thanks!)
diff --git a/lib/base/crit.cpp b/lib/base/crit.cpp
index e7daf17..ccc1de4 100644
--- a/lib/base/crit.cpp
+++ b/lib/base/crit.cpp
@@ -42,12 +42,12 @@ typedef struct critical {
PRLock *lock;
PRUint32 count;
PRThread *owner;
-} critical_t;
+} ns_critical_t;
typedef struct condvar {
- critical_t *lock;
+ ns_critical_t *lock;
PRCondVar *cvar;
-} condvar_t;
+} ns_condvar_t;
#endif
/* -------------------------- critical sections --------------------------- */
@@ -57,7 +57,7 @@ typedef struct condvar {
NSAPI_PUBLIC int crit_owner_is_me(CRITICAL id)
{
#ifdef USE_NSPR
- critical_t *crit = (critical_t*)id;
+ ns_critical_t *crit = (ns_critical_t*)id;
return (crit->owner == PR_GetCurrentThread());
#else
@@ -68,7 +68,7 @@ NSAPI_PUBLIC int crit_owner_is_me(CRITICAL id)
NSAPI_PUBLIC CRITICAL crit_init(void)
{
#ifdef USE_NSPR
- critical_t *crit = (critical_t*)PERM_MALLOC(sizeof(critical_t)) ;
+ ns_critical_t *crit = (ns_critical_t*)PERM_MALLOC(sizeof(ns_critical_t)) ;
if (crit) {
if (!(crit->lock = PR_NewLock())) {
@@ -87,7 +87,7 @@ NSAPI_PUBLIC CRITICAL crit_init(void)
NSAPI_PUBLIC void crit_enter(CRITICAL id)
{
#ifdef USE_NSPR
- critical_t *crit = (critical_t*)id;
+ ns_critical_t *crit = (ns_critical_t*)id;
PRThread *me = PR_GetCurrentThread();
if ( crit->owner == me) {
@@ -106,7 +106,7 @@ NSAPI_PUBLIC void crit_enter(CRITICAL id)
NSAPI_PUBLIC void crit_exit(CRITICAL id)
{
#ifdef USE_NSPR
- critical_t *crit = (critical_t*)id;
+ ns_critical_t *crit = (ns_critical_t*)id;
if (crit->owner != PR_GetCurrentThread())
return;
@@ -121,7 +121,7 @@ NSAPI_PUBLIC void crit_exit(CRITICAL id)
NSAPI_PUBLIC void crit_terminate(CRITICAL id)
{
#ifdef USE_NSPR
- critical_t *crit = (critical_t*)id;
+ ns_critical_t *crit = (ns_critical_t*)id;
PR_DestroyLock((PRLock*)crit->lock);
PERM_FREE(crit);
@@ -135,9 +135,9 @@ NSAPI_PUBLIC void crit_terminate(CRITICAL id)
NSAPI_PUBLIC CONDVAR condvar_init(CRITICAL id)
{
#ifdef USE_NSPR
- critical_t *crit = (critical_t*)id;
+ ns_critical_t *crit = (ns_critical_t*)id;
- condvar_t *cvar = (condvar_t*)PERM_MALLOC(sizeof(condvar_t)) ;
+ ns_condvar_t *cvar = (ns_condvar_t*)PERM_MALLOC(sizeof(ns_condvar_t)) ;
if (crit) {
cvar->lock = crit;
@@ -153,7 +153,7 @@ NSAPI_PUBLIC CONDVAR condvar_init(CRITICAL id)
NSAPI_PUBLIC void condvar_wait(CONDVAR _cv)
{
#ifdef USE_NSPR
- condvar_t *cv = (condvar_t *)_cv;
+ ns_condvar_t *cv = (ns_condvar_t *)_cv;
/* Save away recursion count so we can restore it after the wait */
int saveCount = cv->lock->count;
PRThread *saveOwner = cv->lock->owner;
@@ -170,10 +170,10 @@ NSAPI_PUBLIC void condvar_wait(CONDVAR _cv)
}
-NSAPI_PUBLIC void condvar_timed_wait(CONDVAR _cv, long secs)
+NSAPI_PUBLIC void ns_condvar_timed_wait(CONDVAR _cv, long secs)
{
#ifdef USE_NSPR
- condvar_t *cv = (condvar_t *)_cv;
+ ns_condvar_t *cv = (ns_condvar_t *)_cv;
/* Save away recursion count so we can restore it after the wait */
int saveCount = cv->lock->count;
PRThread *saveOwner = cv->lock->owner;
@@ -197,7 +197,7 @@ NSAPI_PUBLIC void condvar_timed_wait(CONDVAR _cv, long secs)
NSAPI_PUBLIC void condvar_notify(CONDVAR _cv)
{
#ifdef USE_NSPR
- condvar_t *cv = (condvar_t *)_cv;
+ ns_condvar_t *cv = (ns_condvar_t *)_cv;
PR_ASSERT(cv->lock->owner == PR_GetCurrentThread());
PR_NotifyCondVar(cv->cvar);
#endif
@@ -206,16 +206,16 @@ NSAPI_PUBLIC void condvar_notify(CONDVAR _cv)
NSAPI_PUBLIC void condvar_notifyAll(CONDVAR _cv)
{
#ifdef USE_NSPR
- condvar_t *cv = (condvar_t *)_cv;
+ ns_condvar_t *cv = (ns_condvar_t *)_cv;
PR_ASSERT(cv->lock->owner == PR_GetCurrentThread());
PR_NotifyAllCondVar(cv->cvar);
#endif
}
-NSAPI_PUBLIC void condvar_terminate(CONDVAR _cv)
+NSAPI_PUBLIC void ns_condvar_terminate(CONDVAR _cv)
{
#ifdef USE_NSPR
- condvar_t *cv = (condvar_t *)_cv;
+ ns_condvar_t *cv = (ns_condvar_t *)_cv;
PR_DestroyCondVar(cv->cvar);
PERM_FREE(cv);
#endif
@@ -297,7 +297,7 @@ cs_terminate(COUNTING_SEMAPHORE csp)
/* usfreesema() */
return;
#else
- condvar_terminate(cs->cv);
+ ns_condvar_terminate(cs->cv);
crit_terminate(cs->cv_lock);
crit_terminate(cs->lock);
PERM_FREE(cs);
diff --git a/lib/base/dnsdmain.cpp b/lib/base/dnsdmain.cpp
index 545f617..0b522ad 100644
--- a/lib/base/dnsdmain.cpp
+++ b/lib/base/dnsdmain.cpp
@@ -40,7 +40,7 @@ extern int getdomainname(char *, size_t);
#else
extern int getdomainname(char *, int);
#endif /* Linux */
-#if defined(HPUX) || defined(Linux) || defined(SOLARIS_GCC)
+#if defined(HPUX) || defined(Linux) || defined(SOLARIS_GCC) || defined(__FreeBSD__)
extern int gethostname (char *name, size_t namelen);
#else
extern int gethostname (char *name, int namelen);
diff --git a/lib/base/file.cpp b/lib/base/file.cpp
index ad4333e..4e3609a 100644
--- a/lib/base/file.cpp
+++ b/lib/base/file.cpp
@@ -213,7 +213,7 @@ NSAPI_PUBLIC int file_notfound(void)
return (errno == ENOENT);
}
-#if !defined(LINUX)
+#if !defined(LINUX) && !defined(__FreeBSD__)
extern char *sys_errlist[];
#endif
commit eb4c74043ccf8be48f7551b3c03bf8ff985469cf
Author: William Brown <wibrown(a)redhat.com>
Date: Thu May 12 16:42:35 2016 +1000
Ticket 48797 - Add freebsd support to ns-slapd: Header files
Description: Add header file support for freeBSD
https://fedorahosted.org/389/ticket/48797
Author: wibrown
Reviewed by: lslebodn (Thanks!)
diff --git a/include/base/systems.h b/include/base/systems.h
index fead5a0..15f10fb 100644
--- a/include/base/systems.h
+++ b/include/base/systems.h
@@ -142,6 +142,32 @@
#define NET_SOCKETS
#define SHMEM_MMAP_FLAGS MAP_SHARED
+#elif defined(__FreeBSD__)
+
+#define ACCELERATOR_CACHE
+#define DNS_CACHE
+#define FILE_INHERIT_FCNTL
+#define DAEMON_UNIX_MOBRULE
+#define BSD_RLIMIT
+#define BSD_SIGNALS
+#define FILE_UNIX_MMAP
+#define FILE_MMAP_FLAGS (MAP_FILE | MAP_SHARED)
+#define SHMEM_UNIX_MMAP
+#define SHMEM_MMAP_FLAGS MAP_SHARED
+#define AUTH_DBM
+#define SEM_FLOCK
+#define DLL_CAPABLE
+#define DLL_DLOPEN
+#define DLL_DLOPEN_FLAGS RTLD_NOW
+#define HAVE_ATEXIT
+#define HAS_STATFS
+#define JAVA_STATIC_LINK
+#undef NEED_CRYPT_PROTO
+#define NET_SOCKETS
+#ifndef NO_DOMAINNAME
+#define NO_DOMAINNAME
+#endif
+
#else
#error "Missing defines in ns/netsite/include/base/systems.h"
#endif
diff --git a/include/public/base/systems.h b/include/public/base/systems.h
index 535a35c..ba36502 100644
--- a/include/public/base/systems.h
+++ b/include/public/base/systems.h
@@ -65,6 +65,15 @@
#define SHMEM_UNIX_MMAP
#define ZERO(ptr,len) memset(ptr,0,len)
+#elif defined(__FreeBSD__)
+
+#define FILE_UNIX
+#define FILE_UNIX_MMAP
+#define MALLOC_POOLS
+#define SEM_FLOCK
+#define SHMEM_UNIX_MMAP
+#define ZERO(ptr,len) memset(ptr,0,len)
+
#else
#error "Missing defines in ns/netsite/include/public/base/systems.h"
#endif
diff --git a/ldap/include/portable.h b/ldap/include/portable.h
index ea164c0..0f454ce 100644
--- a/ldap/include/portable.h
+++ b/ldap/include/portable.h
@@ -124,7 +124,7 @@
* Are sys_errlist and sys_nerr declared in stdio.h?
*/
#ifndef SYSERRLIST_IN_STDIO
-#if defined( freebsd ) || defined(Linux)
+#if defined( __FreeBSD__ ) || defined(Linux)
#define SYSERRLIST_IN_STDIO
#endif
#endif
@@ -250,7 +250,7 @@ int strncasecmp(const char *, const char *, size_t);
defined(UNIXWARE) || defined(SUNOS4) || defined(SNI) || defined(BSDI) || \
defined(NCR) || defined(OSF1) || defined(NEC) || \
( defined(HPUX10) && !defined(_REENTRANT)) || defined(HPUX11) || \
- defined(UnixWare) || defined(LINUX2_0)
+ defined(UnixWare) || defined(LINUX2_0) || defined (__FreeBSD__)
#define GETHOSTBYNAME( n, r, b, l, e ) gethostbyname( n )
#elif defined(AIX)
#define GETHOSTBYNAME_BUF_T struct hostent_data
@@ -282,7 +282,7 @@ typedef char GETHOSTBYADDR_buf_t [BUFSIZ];
#if defined(HPUX9) || defined(LINUX1_2) || defined(SUNOS4) || defined(SNI) || \
- defined(SCOOS) || defined(BSDI) || defined(NCR) || \
+ defined(SCOOS) || defined(BSDI) || defined(NCR) || defined (__FreeBSD__) || \
defined(NEC) || ( defined(HPUX10) && !defined(_REENTRANT))
#define CTIME( c, b, l ) ctime( c )
#elif defined( hpux10 )
@@ -312,7 +312,7 @@ char *strtok_r(char *, const char *, char **);
extern char *strdup();
#endif /* ultrix || nextstep */
-#if defined( sunos4 ) || defined( OSF1 )
+#if defined( sunos4 ) || defined( OSF1 ) || defined (__FreeBSD__)
#define BSD_TIME 1 /* for servers/slapd/log.h */
#endif /* sunos4 || osf */
diff --git a/ldap/servers/slapd/tools/ldclt/ldclt.h b/ldap/servers/slapd/tools/ldclt/ldclt.h
index b5da93a..e371221 100644
--- a/ldap/servers/slapd/tools/ldclt/ldclt.h
+++ b/ldap/servers/slapd/tools/ldclt/ldclt.h
@@ -172,6 +172,11 @@ dd/mm/yy | Author | Comments
#if defined(USE_OPENLDAP)
#define ABS(x) ((x > 0) ? (x) : (-x))
#endif
+
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
/*
* Misc constant definitions
*/
commit 5063d0dbb5fdbf8bd749c0e121740579c8e6e346
Author: Lukas Slebodnik <lslebodn(a)redhat.com>
Date: Tue Nov 29 21:41:28 2016 +0000
Ticket 48978 - Fix implicit function declaration
Description: This patch fixes typo in function name
for converting loglevel to sysloglevel
https://fedorahosted.org/389/ticket/48978
Reviewed by: wibrown
make all-am
make[1]: Entering directory '/tmp/ds'
CC ldap/servers/slapd/libslapd_la-log.lo
In file included from ldap/servers/slapd/slap.h:132:0,
from ldap/servers/slapd/log.h:43,
from ldap/servers/slapd/log.c:29:
ldap/servers/slapd/log.c: In function 'slapi_log_error':
ldap/servers/slapd/log.c:2471:36: error: implicit declaration of function 'get_syslog_level' [-Werror=implicit-function-declaration]
rc = sd_journal_printv(get_syslog_level(loglevel), fmt, ap_err);
^
cc1: some warnings being treated as errors
Makefile:8207: recipe for target 'ldap/servers/slapd/libslapd_la-log.lo' failed
make[1]: *** [ldap/servers/slapd/libslapd_la-log.lo] Error 1
make[1]: Leaving directory '/tmp/ds'
Makefile:3271: recipe for target 'all' failed
make: *** [all] Error 2
sh# git grep -n get_syslog_level
ldap/servers/slapd/log.c:2471: rc = sd_journal_printv(get_syslog_level(loglevel), fmt, ap_err);
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
index 1195c54..2f43a98 100644
--- a/ldap/servers/slapd/log.c
+++ b/ldap/servers/slapd/log.c
@@ -2468,7 +2468,7 @@ slapi_log_error( int loglevel, char *subsystem, char *fmt, ... )
va_start( ap_err, fmt );
/* va_start( ap_file, fmt ); */
/* This isn't handling RC nicely ... */
- rc = sd_journal_printv(get_syslog_level(loglevel), fmt, ap_err);
+ rc = sd_journal_printv(get_syslog_loglevel(loglevel), fmt, ap_err);
/* rc = sd_journal_printv(LOG_ERROR, fmt, ap_file); */
/* va_end(ap_file); */
va_end(ap_err);
6 years, 9 months
4 commits - ldap/servers lib/ldaputil
by William Brown
ldap/servers/plugins/acl/aclparse.c | 7 -
ldap/servers/plugins/memberof/memberof.c | 6
ldap/servers/plugins/referint/referint.c | 4
ldap/servers/plugins/retrocl/retrocl_cn.c | 3
ldap/servers/plugins/rever/pbe.c | 7 -
ldap/servers/plugins/rootdn_access/rootdn_access.c | 6
ldap/servers/slapd/add.c | 1
ldap/servers/slapd/attrsyntax.c | 15 --
ldap/servers/slapd/back-ldbm/import-threads.c | 7 -
ldap/servers/slapd/back-ldbm/ldbm_config.c | 3
ldap/servers/slapd/back-ldbm/ldbm_modrdn.c | 9 -
ldap/servers/slapd/connection.c | 15 --
ldap/servers/slapd/daemon.c | 21 +--
ldap/servers/slapd/defbackend.c | 15 --
ldap/servers/slapd/delete.c | 4
ldap/servers/slapd/entry.c | 14 --
ldap/servers/slapd/fedse.c | 4
ldap/servers/slapd/filter.c | 5
ldap/servers/slapd/generation.c | 8 -
ldap/servers/slapd/getsocketpeer.c | 4
ldap/servers/slapd/ldaputil.c | 12 -
ldap/servers/slapd/libglobs.c | 79 ++++++-----
ldap/servers/slapd/log.c | 3
ldap/servers/slapd/main.c | 27 +---
ldap/servers/slapd/mapping_tree.c | 12 -
ldap/servers/slapd/modify.c | 4
ldap/servers/slapd/modrdn.c | 4
ldap/servers/slapd/operation.c | 3
ldap/servers/slapd/pblock.c | 2
ldap/servers/slapd/plugin.c | 19 +-
ldap/servers/slapd/plugin_internal_op.c | 4
ldap/servers/slapd/plugin_mr.c | 3
ldap/servers/slapd/proto-slap.h | 2
ldap/servers/slapd/sasl_map.c | 4
ldap/servers/slapd/schema.c | 6
ldap/servers/slapd/slap.h | 6
ldap/servers/slapd/slapi-private.h | 7 +
ldap/servers/slapd/ssl.c | 3
ldap/servers/slapd/task.c | 17 --
ldap/servers/slapd/time.c | 3
ldap/servers/slapd/uniqueid.c | 11 -
ldap/servers/slapd/uniqueidgen.c | 11 -
ldap/servers/slapd/util.c | 59 +++++++-
ldap/servers/slapd/uuid.c | 9 -
ldap/servers/slapd/valueset.c | 141 +++++++++++----------
lib/ldaputil/certmap.c | 14 --
46 files changed, 298 insertions(+), 325 deletions(-)
New commits:
commit b1f434e3f5b8d699909bc1e54465718641a296d3
Author: William Brown <firstyear(a)redhat.com>
Date: Mon Nov 28 11:41:54 2016 +1000
Ticket 49002 - Remove memset on allocation
Bug Description: Memset is slow, and has cause us some issues. c99 supports
allocing 0 structs with {0}, and we can also use calloc when needed. Calloc is
signifigantly faster that malloc + memset.
Fix Description: Remove memset where possible. We can't remove it universally
due to some struct reuse, and some libraries needing it, but this reduction
cleans the code greatly, and should give us a perf improvement.
https://fedorahosted.org/389/ticket/49002
Author: wibrown
Review by: mreynolds (Thanks!)
diff --git a/ldap/servers/plugins/acl/aclparse.c b/ldap/servers/plugins/acl/aclparse.c
index a1dae19..d4cecfa 100644
--- a/ldap/servers/plugins/acl/aclparse.c
+++ b/ldap/servers/plugins/acl/aclparse.c
@@ -1562,9 +1562,7 @@ __aclp__init_targetattr (aci_t *aci, char *attr_val, char **errbuf)
*
* The attribute goes in the attrTarget list.
*/
- attr = (Targetattr *) slapi_ch_malloc (sizeof (Targetattr));
- memset (attr, 0, sizeof(Targetattr));
-
+ attr = (Targetattr *) slapi_ch_calloc (1, sizeof (Targetattr));
/* strip double quotes */
lenstr = strlen(str);
if (*str == '"' && *(str + lenstr - 1) == '"') {
@@ -2150,8 +2148,7 @@ static int process_filter_list( Targetattrfilter ***input_attrFilterArray,
*
*/
- attrfilter = (Targetattrfilter *) slapi_ch_malloc (sizeof (Targetattrfilter));
- memset (attrfilter, 0, sizeof(Targetattrfilter));
+ attrfilter = (Targetattrfilter *) slapi_ch_calloc (1, sizeof (Targetattrfilter));
if (strstr( str,":") != NULL) {
if ( __acl_init_targetattrfilter( attrfilter, str ) != 0 ) {
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
index 9af9082..3b38559 100644
--- a/ldap/servers/plugins/memberof/memberof.c
+++ b/ldap/servers/plugins/memberof/memberof.c
@@ -2267,12 +2267,10 @@ int memberof_test_membership_callback(Slapi_Entry *e, void *callback_data)
candidate_array =
(Slapi_Value**)
- slapi_ch_malloc(sizeof(Slapi_Value*)*total);
- memset(candidate_array, 0, sizeof(Slapi_Value*)*total);
+ slapi_ch_calloc(1, sizeof(Slapi_Value*)*total);
member_array =
(Slapi_Value**)
- slapi_ch_malloc(sizeof(Slapi_Value*)*total);
- memset(member_array, 0, sizeof(Slapi_Value*)*total);
+ slapi_ch_calloc(1, sizeof(Slapi_Value*)*total);
hint = slapi_attr_first_value(attr, &val);
diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c
index e6af414..9feb91b 100644
--- a/ldap/servers/plugins/referint/referint.c
+++ b/ldap/servers/plugins/referint/referint.c
@@ -1500,7 +1500,8 @@ referint_thread_func(void *arg)
int my_fgetc(PRFileDesc *stream)
{
- static char buf[READ_BUFSIZE] = "\0";
+ /* This is equivalent to memset of 0, but statically defined. */
+ static char buf[READ_BUFSIZE] = {0};
static int position = READ_BUFSIZE;
int retval;
int err;
@@ -1508,7 +1509,6 @@ int my_fgetc(PRFileDesc *stream)
/* check if we need to load the buffer */
if( READ_BUFSIZE == position )
{
- memset(buf, '\0', READ_BUFSIZE);
if( ( err = PR_Read(stream, buf, READ_BUFSIZE) ) >= 0)
{
/* it read some data */;
diff --git a/ldap/servers/plugins/retrocl/retrocl_cn.c b/ldap/servers/plugins/retrocl/retrocl_cn.c
index ec81bc5..98c14b3 100644
--- a/ldap/servers/plugins/retrocl/retrocl_cn.c
+++ b/ldap/servers/plugins/retrocl/retrocl_cn.c
@@ -171,7 +171,7 @@ int retrocl_get_changenumbers(void)
*/
time_t retrocl_getchangetime( int type, int *err )
{
- cnumRet cr;
+ cnumRet cr = {0};
time_t ret;
if ( type != SLAPI_SEQ_FIRST && type != SLAPI_SEQ_LAST ) {
@@ -180,7 +180,6 @@ time_t retrocl_getchangetime( int type, int *err )
}
return NO_TIME;
}
- memset( &cr, '\0', sizeof( cnumRet ));
slapi_seq_callback( RETROCL_CHANGELOG_DN, type,
(char *)attr_changenumber, /* cast away const */
NULL,
diff --git a/ldap/servers/plugins/rever/pbe.c b/ldap/servers/plugins/rever/pbe.c
index 45b1f97..16982c4 100644
--- a/ldap/servers/plugins/rever/pbe.c
+++ b/ldap/servers/plugins/rever/pbe.c
@@ -184,12 +184,13 @@ genKey(struct pk11ContextStore **out, char *path, int mech, PRArenaPool *arena,
SECItem *pwitem = NULL;
SECItem *result = NULL;
SECItem *salt = NULL;
- SECItem der_algid;
+ SECItem der_algid = {0};
SECAlgorithmID *algid = NULL;
SECOidTag algoid;
CK_MECHANISM pbeMech;
CK_MECHANISM cryptoMech;
- SECAlgorithmID my_algid;
+ /* Have to use long form init due to internal structs */
+ SECAlgorithmID my_algid = {{0}, {0}};
char *configdir = NULL;
char *der_ascii = NULL;
char *iv = NULL;
@@ -262,7 +263,6 @@ genKey(struct pk11ContextStore **out, char *path, int mech, PRArenaPool *arena,
strcpy((char*)salt->data, iv);
salt->len = strlen(iv) + 1;
- PORT_Memset(&der_algid, 0, sizeof(der_algid));
if(!alg){
/*
* This is DES, or we are encoding AES - the process is the same.
@@ -285,7 +285,6 @@ genKey(struct pk11ContextStore **out, char *path, int mech, PRArenaPool *arena,
/*
* We are decoding AES - use the supplied algid
*/
- PORT_Memset(&my_algid, 0, sizeof(my_algid));
/* Decode the base64 der encoding */
der_ascii = PL_Base64Decode(alg, strlen(alg), NULL);
diff --git a/ldap/servers/plugins/rootdn_access/rootdn_access.c b/ldap/servers/plugins/rootdn_access/rootdn_access.c
index e5ebb13..3b65900 100644
--- a/ldap/servers/plugins/rootdn_access/rootdn_access.c
+++ b/ldap/servers/plugins/rootdn_access/rootdn_access.c
@@ -483,11 +483,10 @@ rootdn_check_access(Slapi_PBlock *pb){
*/
if(daysAllowed){
char *timestr;
- char day[4];
+ char day[4] = {0};
char *today = day;
timestr = asctime(timeinfo); // DDD MMM dd hh:mm:ss YYYY
- memset(day, 0 ,sizeof(day));
memmove(day, timestr, 3); // we only want the day
today = strToLower(today);
daysAllowed = strToLower(daysAllowed);
@@ -600,8 +599,7 @@ rootdn_check_access(Slapi_PBlock *pb){
* Check if we are IPv4, so we can grab the correct IP addr for "ip_str"
*/
if ( PR_IsNetAddrType( client_addr, PR_IpAddrV4Mapped ) ) {
- PRNetAddr v4addr;
- memset( &v4addr, 0, sizeof( v4addr ) );
+ PRNetAddr v4addr = {{0}};
v4addr.inet.family = PR_AF_INET;
v4addr.inet.ip = client_addr->ipv6.ip.pr_s6_addr32[3];
if( PR_NetAddrToString( &v4addr, ip_str, sizeof( ip_str )) != PR_SUCCESS){
diff --git a/ldap/servers/slapd/add.c b/ldap/servers/slapd/add.c
index 240fa41..8e671bb 100644
--- a/ldap/servers/slapd/add.c
+++ b/ldap/servers/slapd/add.c
@@ -268,7 +268,6 @@ done:
if(result_pb==NULL)
{
result_pb = slapi_pblock_new();
- pblock_init(result_pb);
slapi_pblock_set(result_pb, SLAPI_PLUGIN_INTOP_RESULT, &opresult);
}
diff --git a/ldap/servers/slapd/attrsyntax.c b/ldap/servers/slapd/attrsyntax.c
index 956c7b8..9b5be46 100644
--- a/ldap/servers/slapd/attrsyntax.c
+++ b/ldap/servers/slapd/attrsyntax.c
@@ -1010,11 +1010,10 @@ attr_syntax_create(
)
{
char *s;
- struct asyntaxinfo a;
+ struct asyntaxinfo a = {0};
int rc = LDAP_SUCCESS;
/* XXXmcs: had to cast away const in many places below */
- memset(&a, 0, sizeof(a));
*asip = NULL;
a.asi_name = slapi_ch_strdup(attr_names[0]);
if ( NULL != attr_names[1] ) {
@@ -1403,9 +1402,8 @@ attr_syntax_force_to_delete(struct asyntaxinfo *asip, void *arg)
void
attr_syntax_all_clear_flag( unsigned long flag )
{
- struct attr_syntax_enum_flaginfo fi;
+ struct attr_syntax_enum_flaginfo fi = {0};
- memset( &fi, 0, sizeof(fi));
fi.asef_flag = flag;
attr_syntax_enumerate_attrs( attr_syntax_clear_flag_callback,
(void *)&fi, PR_TRUE );
@@ -1419,9 +1417,8 @@ attr_syntax_all_clear_flag( unsigned long flag )
void
attr_syntax_delete_all_not_flagged( unsigned long flag )
{
- struct attr_syntax_enum_flaginfo fi;
+ struct attr_syntax_enum_flaginfo fi = {0};
- memset( &fi, 0, sizeof(fi));
fi.asef_flag = flag;
attr_syntax_enumerate_attrs( attr_syntax_delete_if_not_flagged,
(void *)&fi, PR_TRUE );
@@ -1433,9 +1430,8 @@ attr_syntax_delete_all_not_flagged( unsigned long flag )
void
attr_syntax_delete_all()
{
- struct attr_syntax_enum_flaginfo fi;
+ struct attr_syntax_enum_flaginfo fi = {0};
- memset( &fi, 0, sizeof(fi));
attr_syntax_enumerate_attrs( attr_syntax_force_to_delete,
(void *)&fi, PR_TRUE );
}
@@ -1447,9 +1443,8 @@ attr_syntax_delete_all()
void
attr_syntax_delete_all_for_schemareload(unsigned long flag)
{
- struct attr_syntax_enum_flaginfo fi;
+ struct attr_syntax_enum_flaginfo fi = {0};
- memset(&fi, 0, sizeof(fi));
fi.asef_flag = flag;
attr_syntax_enumerate_attrs_ext(oid2asi, attr_syntax_delete_if_not_flagged,
(void *)&fi);
diff --git a/ldap/servers/slapd/back-ldbm/import-threads.c b/ldap/servers/slapd/back-ldbm/import-threads.c
index 1cc7b42..0557778 100644
--- a/ldap/servers/slapd/back-ldbm/import-threads.c
+++ b/ldap/servers/slapd/back-ldbm/import-threads.c
@@ -3670,7 +3670,7 @@ dse_conf_verify_core(struct ldbminfo *li, char *src_dir, char *file_name, char *
Slapi_Entry **backup_entries = NULL;
Slapi_Entry **bep = NULL;
Slapi_Entry **curr_entries = NULL;
- Slapi_PBlock srch_pb;
+ Slapi_PBlock srch_pb = {0};
filename = slapi_ch_smprintf("%s/%s", src_dir, file_name);
@@ -3732,10 +3732,9 @@ dse_conf_verify_core(struct ldbminfo *li, char *src_dir, char *file_name, char *
bep++;
}
/* 623986: terminate the list if we reallocated backup_entries */
- if (backup_entry_len > 256)
+ if (backup_entry_len > 256) {
*bep = NULL;
-
- pblock_init(&srch_pb);
+ }
if (entry_filter != NULL)
{ /* Single instance restoration */
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c
index 4b612db..cc570d4 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c
@@ -2230,10 +2230,9 @@ void ldbm_config_internal_set(struct ldbminfo *li, char *attrname, char *value)
*/
void replace_ldbm_config_value(char *conftype, char *val, struct ldbminfo *li)
{
- Slapi_PBlock pb;
+ Slapi_PBlock pb = {0};
Slapi_Mods smods;
- pblock_init(&pb);
slapi_mods_init(&smods, 1);
slapi_mods_add(&smods, LDAP_MOD_REPLACE, conftype, strlen(val), val);
slapi_modify_internal_set_pb(&pb, CONFIG_LDBM_DN,
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
index c2d0eca..a78d850 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
@@ -1037,9 +1037,9 @@ ldbm_back_modrdn( Slapi_PBlock *pb )
for ( i = 0; rdns[i] != NULL; i++ )
{
char *type;
- Slapi_Value *svp[2];
- Slapi_Value sv;
- memset(&sv,0,sizeof(Slapi_Value));
+ Slapi_Value *svp[2] = {0};
+ /* Have to use long form init due to presence of internal struct */
+ Slapi_Value sv = {{0}, 0, 0};
if ( slapi_rdn2typeval( rdns[i], &type, &sv.bv ) != 0 )
{
slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_modrdn",
@@ -2020,9 +2020,8 @@ moddn_rename_child_entry(
entry_set_maxcsn(e->ep_entry, opcsn);
}
{
- Slapi_Mods smods;
+ Slapi_Mods smods = {0};
Slapi_Mods *smodsp = NULL;
- memset(&smods, 0, sizeof(smods));
slapi_mods_init(&smods, 2);
slapi_mods_add( &smods, LDAP_MOD_DELETE, LDBM_ENTRYDN_STR,
strlen( backentry_get_ndn(e) ), backentry_get_ndn(e) );
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
index 51eb694..9e68a59 100644
--- a/ldap/servers/slapd/connection.c
+++ b/ldap/servers/slapd/connection.c
@@ -104,7 +104,7 @@ connection_get_operation(void)
{
struct Slapi_op_stack *stack_obj = (struct Slapi_op_stack *)PR_StackPop(op_stack);
if (!stack_obj) {
- stack_obj = (struct Slapi_op_stack *)slapi_ch_malloc(sizeof(struct Slapi_op_stack));
+ stack_obj = (struct Slapi_op_stack *)slapi_ch_calloc(1, sizeof(struct Slapi_op_stack));
stack_obj->op = operation_new( plugin_build_operation_action_bitmap( 0,
plugin_get_server_plg() ));
} else {
@@ -282,8 +282,7 @@ connection_reset(Connection* conn, int ns, PRNetAddr * from, int fromLen, int is
memcpy( conn->cin_addr, from, sizeof( PRNetAddr ) );
if ( PR_IsNetAddrType( conn->cin_addr, PR_IpAddrV4Mapped ) ) {
- PRNetAddr v4addr;
- memset( &v4addr, 0, sizeof( v4addr ) );
+ PRNetAddr v4addr = {{0}};
v4addr.inet.family = PR_AF_INET;
v4addr.inet.ip = conn->cin_addr->ipv6.ip.pr_s6_addr32[3];
PR_NetAddrToString( &v4addr, buf_ip, sizeof( buf_ip ) );
@@ -295,7 +294,7 @@ connection_reset(Connection* conn, int ns, PRNetAddr * from, int fromLen, int is
} else {
/* try syscall since "from" was not given and PR_GetPeerName failed */
/* a corner case */
- struct sockaddr_in addr; /* assuming IPv4 */
+ struct sockaddr_in addr = {0}; /* assuming IPv4 */
#if ( defined( hpux ) )
int addrlen;
#else
@@ -303,7 +302,6 @@ connection_reset(Connection* conn, int ns, PRNetAddr * from, int fromLen, int is
#endif
addrlen = sizeof( addr );
- memset( &addr, 0, addrlen );
if ( (conn->c_prfd == NULL) &&
(getpeername( conn->c_sd, (struct sockaddr *)&addr, &addrlen ) == 0) )
@@ -344,8 +342,7 @@ connection_reset(Connection* conn, int ns, PRNetAddr * from, int fromLen, int is
PL_strncpyz(buf_destip, "unknown local file", sizeof(buf_destip));
}
} else if ( PR_IsNetAddrType( conn->cin_destaddr, PR_IpAddrV4Mapped ) ) {
- PRNetAddr v4destaddr;
- memset( &v4destaddr, 0, sizeof( v4destaddr ) );
+ PRNetAddr v4destaddr = {{0}};
v4destaddr.inet.family = PR_AF_INET;
v4destaddr.inet.ip = conn->cin_destaddr->ipv6.ip.pr_s6_addr32[3];
PR_NetAddrToString( &v4destaddr, buf_destip, sizeof( buf_destip ) );
@@ -360,7 +357,7 @@ connection_reset(Connection* conn, int ns, PRNetAddr * from, int fromLen, int is
} else {
/* try syscall since c_prfd == NULL */
/* a corner case */
- struct sockaddr_in destaddr; /* assuming IPv4 */
+ struct sockaddr_in destaddr = {0}; /* assuming IPv4 */
#if ( defined( hpux ) )
int destaddrlen;
#else
@@ -368,7 +365,7 @@ connection_reset(Connection* conn, int ns, PRNetAddr * from, int fromLen, int is
#endif
destaddrlen = sizeof( destaddr );
- memset( &destaddr, 0, destaddrlen );
+
if ( (getsockname( conn->c_sd, (struct sockaddr *)&destaddr, &destaddrlen ) == 0) ) {
conn->cin_destaddr = (PRNetAddr *)slapi_ch_malloc( sizeof( PRNetAddr ));
memset( conn->cin_destaddr, 0, sizeof( PRNetAddr ));
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 6da658e..1ea5a84 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -2464,7 +2464,7 @@ handle_new_connection(Connection_Table *ct, int tcps, PRFileDesc *pr_acceptfd, i
int ns = 0;
Connection *conn = NULL;
/* struct sockaddr_in from;*/
- PRNetAddr from;
+ PRNetAddr from = {{0}};
PRFileDesc *pr_clonefd = NULL;
ber_len_t maxbersize;
slapdFrontendConfig_t *fecfg = getFrontendConfig();
@@ -2472,7 +2472,6 @@ handle_new_connection(Connection_Table *ct, int tcps, PRFileDesc *pr_acceptfd, i
if (newconn) {
*newconn = NULL;
}
- memset(&from, 0, sizeof(from)); /* reset to nulls so we can see what was set */
if ( (ns = accept_and_configure( tcps, pr_acceptfd, &from,
sizeof(from), secure, local, &pr_clonefd)) == SLAPD_INVALID_SOCKET ) {
return -1;
@@ -2519,8 +2518,7 @@ handle_new_connection(Connection_Table *ct, int tcps, PRFileDesc *pr_acceptfd, i
LBER_SBIOD_LEVEL_PROVIDER, conn );
#else /* !USE_OPENLDAP */
{
- struct lber_x_ext_io_fns func_pointers;
- memset(&func_pointers, 0, sizeof(func_pointers));
+ struct lber_x_ext_io_fns func_pointers = {0};
func_pointers.lbextiofn_size = LBER_X_EXTIO_FNS_SIZE;
func_pointers.lbextiofn_read = NULL; /* see connection_read_function */
func_pointers.lbextiofn_write = write_function;
@@ -3044,7 +3042,6 @@ slapd_listenhost2addr(const char *listenhost, PRNetAddr ***addr)
void *iter = NULL;
int addrcnt = 0;
int i = 0;
- memset( netaddr, 0, sizeof( PRNetAddr ));
/* need to count the address, first */
while ( (iter = PR_EnumerateAddrInfo( iter, infop, 0, netaddr ))
!= NULL ) {
@@ -3391,17 +3388,17 @@ static void
get_loopback_by_addr( void )
{
#ifdef GETHOSTBYADDR_BUF_T
- struct hostent hp;
- GETHOSTBYADDR_BUF_T hbuf;
+ struct hostent hp = {0};
+ GETHOSTBYADDR_BUF_T hbuf;
#endif
- unsigned long ipaddr;
- struct in_addr ia;
- int herrno, rc = 0;
+ unsigned long ipaddr;
+ struct in_addr ia;
+ int herrno = 0;
+ int rc = 0;
- memset( (char *)&hp, 0, sizeof(hp));
ipaddr = htonl( INADDR_LOOPBACK );
(void) GETHOSTBYADDR( (char *)&ipaddr, sizeof( ipaddr ),
- AF_INET, &hp, hbuf, sizeof(hbuf), &herrno );
+ AF_INET, &hp, hbuf, sizeof(hbuf), &herrno );
}
#endif /* RESOLVER_NEEDS_LOW_FILE_DESCRIPTORS */
diff --git a/ldap/servers/slapd/defbackend.c b/ldap/servers/slapd/defbackend.c
index a07fddb..9a9bc98 100644
--- a/ldap/servers/slapd/defbackend.c
+++ b/ldap/servers/slapd/defbackend.c
@@ -32,8 +32,8 @@
/*
* ---------------- Static Variables -----------------------------------------
*/
-static struct slapdplugin defbackend_plugin;
-static Slapi_Backend *defbackend_backend = NULL;
+static struct slapdplugin defbackend_plugin = {0};
+static Slapi_Backend *defbackend_backend = NULL;
/*
@@ -58,26 +58,23 @@ defbackend_init( void )
{
int rc;
char *errmsg;
- Slapi_PBlock pb;
+ Slapi_PBlock pb = {0};
slapi_log_err(SLAPI_LOG_TRACE, "defbackend_init", "<==\n");
/*
* create a new backend
*/
- pblock_init( &pb );
defbackend_backend = slapi_be_new( DEFBACKEND_TYPE , DEFBACKEND_TYPE, 1 /* Private */, 0 /* Do Not Log Changes */ );
- if (( rc = slapi_pblock_set( &pb, SLAPI_BACKEND, defbackend_backend ))
- != 0 ) {
- errmsg = "slapi_pblock_set SLAPI_BACKEND failed";
- goto cleanup_and_return;
+ if (( rc = slapi_pblock_set( &pb, SLAPI_BACKEND, defbackend_backend )) != 0 ) {
+ errmsg = "slapi_pblock_set SLAPI_BACKEND failed";
+ goto cleanup_and_return;
}
/*
* create a plugin structure for this backend since the
* slapi_pblock_set()/slapi_pblock_get() functions assume there is one.
*/
- memset( &defbackend_plugin, '\0', sizeof( struct slapdplugin ));
defbackend_plugin.plg_type = SLAPI_PLUGIN_DATABASE;
defbackend_backend->be_database = &defbackend_plugin;
if (( rc = slapi_pblock_set( &pb, SLAPI_PLUGIN, &defbackend_plugin ))
diff --git a/ldap/servers/slapd/delete.c b/ldap/servers/slapd/delete.c
index 37f5317..a16718a 100644
--- a/ldap/servers/slapd/delete.c
+++ b/ldap/servers/slapd/delete.c
@@ -107,12 +107,10 @@ free_and_return:;
Slapi_PBlock *
slapi_delete_internal(const char *idn, LDAPControl **controls, int dummy)
{
- Slapi_PBlock pb;
+ Slapi_PBlock pb = {0};
Slapi_PBlock *result_pb;
int opresult;
- pblock_init (&pb);
-
slapi_delete_internal_set_pb (&pb, idn, controls, NULL, plugin_get_default_component_id(), 0);
delete_internal_pb (&pb);
diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c
index 3d0723c..51ca3cc 100644
--- a/ldap/servers/slapd/entry.c
+++ b/ldap/servers/slapd/entry.c
@@ -4102,7 +4102,6 @@ slapi_entry_diff(Slapi_Mods *smods, Slapi_Entry *e1, Slapi_Entry *e2, int diff_c
static void
delete_subtree(Slapi_PBlock *pb, const char *dn, void *plg_id)
{
- Slapi_PBlock mypb;
int ret = 0;
int opresult;
@@ -4117,11 +4116,11 @@ delete_subtree(Slapi_PBlock *pb, const char *dn, void *plg_id)
Slapi_DN *rootDN = slapi_sdn_new_dn_byval(dn);
slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries);
for (ep = entries; ep && *ep; ep++) {
+ Slapi_PBlock mypb = {0};
const Slapi_DN *sdn = slapi_entry_get_sdn_const(*ep);
-
- if (slapi_sdn_compare(sdn, rootDN) == 0)
+ if (slapi_sdn_compare(sdn, rootDN) == 0) {
continue;
- pblock_init(&mypb);
+ }
slapi_delete_internal_set_pb(&mypb, slapi_sdn_get_dn(sdn),
NULL, NULL, plg_id, 0);
slapi_delete_internal_pb(&mypb);
@@ -4157,7 +4156,6 @@ slapi_entries_diff(Slapi_Entry **old_entries, Slapi_Entry **curr_entries,
char *my_logging_prestr = "";
Slapi_Entry **oep, **cep;
int rval = 0;
- Slapi_PBlock pb;
#define SLAPI_ENTRY_FLAG_DIFF_IN_BOTH 0x80
if (NULL != logging_prestr && '\0' != *logging_prestr)
@@ -4222,7 +4220,7 @@ slapi_entries_diff(Slapi_Entry **old_entries, Slapi_Entry **curr_entries,
}
if (0 == isfirst && force_update && testall)
{
- pblock_init(&pb);
+ Slapi_PBlock pb = {0};
slapi_modify_internal_set_pb_ext(&pb,
slapi_entry_get_sdn_const(*oep),
slapi_mods_get_ldapmods_byref(smods),
@@ -4250,9 +4248,9 @@ slapi_entries_diff(Slapi_Entry **old_entries, Slapi_Entry **curr_entries,
{
if (force_update)
{
+ Slapi_PBlock pb = {0};
LDAPMod **mods;
slapi_entry2mods(*oep, NULL, &mods);
- pblock_init(&pb);
slapi_add_internal_set_pb(&pb, slapi_entry_get_dn_const(*oep),
mods, NULL, plg_id, 0);
slapi_add_internal_pb(&pb);
@@ -4279,7 +4277,7 @@ slapi_entries_diff(Slapi_Entry **old_entries, Slapi_Entry **curr_entries,
if (testall)
{
if (force_update) {
- pblock_init(&pb);
+ Slapi_PBlock pb = {0};
delete_subtree(&pb, slapi_entry_get_dn_const(*cep), plg_id);
pblock_done(&pb);
}
diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c
index f02c4f6..d64e7c6 100644
--- a/ldap/servers/slapd/fedse.c
+++ b/ldap/servers/slapd/fedse.c
@@ -1556,9 +1556,7 @@ static int
init_dse_file(const char *configdir, Slapi_DN *config)
{
int rc= 1; /* OK */
- Slapi_PBlock pb;
-
- memset(&pb, 0, sizeof(pb));
+ Slapi_PBlock pb = {0};
if(pfedse==NULL)
{
diff --git a/ldap/servers/slapd/filter.c b/ldap/servers/slapd/filter.c
index 085a8d9..77c369f 100644
--- a/ldap/servers/slapd/filter.c
+++ b/ldap/servers/slapd/filter.c
@@ -774,10 +774,9 @@ slapi_filter_free( struct slapi_filter *f, int recurse )
slapi_ch_free((void**)&f->f_mr_type);
slapi_ber_bvdone(&f->f_mr_value);
if (f->f_mr.mrf_destroy != NULL) {
- Slapi_PBlock pb;
- pblock_init (&pb);
+ Slapi_PBlock pb = {0};
if ( ! slapi_pblock_set (&pb, SLAPI_PLUGIN_OBJECT, f->f_mr.mrf_object)) {
- f->f_mr.mrf_destroy (&pb);
+ f->f_mr.mrf_destroy (&pb);
}
}
break;
diff --git a/ldap/servers/slapd/generation.c b/ldap/servers/slapd/generation.c
index bd1690b..76dffda 100644
--- a/ldap/servers/slapd/generation.c
+++ b/ldap/servers/slapd/generation.c
@@ -59,13 +59,11 @@ get_database_dataversion(const char *dn)
void
set_database_dataversion(const char *dn, const char *dataversion)
{
- LDAPMod gen_mod;
- LDAPMod *mods[2];
+ LDAPMod gen_mod = {0};
+ LDAPMod *mods[2] = {0};
struct berval* gen_vals[2];
struct berval gen_val;
- Slapi_PBlock *pb;
-
- memset (&gen_mod, 0, sizeof(gen_mod));
+ Slapi_PBlock *pb;
gen_mod.mod_op = LDAP_MOD_REPLACE | LDAP_MOD_BVALUES;
gen_mod.mod_type = "nsslapd-dataversion"; /* JCMREPL - Shouldn't be a Netscape specific attribute name */
diff --git a/ldap/servers/slapd/getsocketpeer.c b/ldap/servers/slapd/getsocketpeer.c
index 255322b..2a738a1 100644
--- a/ldap/servers/slapd/getsocketpeer.c
+++ b/ldap/servers/slapd/getsocketpeer.c
@@ -82,7 +82,7 @@ int slapd_get_socket_peer(PRFileDesc *nspr_fd, uid_t *uid, gid_t *gid)
#include <sys/types.h>
#include <sys/stat.h>
#include <errno.h>
- struct msghdr msg;
+ struct msghdr msg = {0};
struct iovec iov;
char dummy[8];
int pass_sd[2];
@@ -90,8 +90,6 @@ int slapd_get_socket_peer(PRFileDesc *nspr_fd, uid_t *uid, gid_t *gid)
unsigned int retrycnt = 0xffffffff; /* safety net */
int myerrno = 0;
- memset((void *)&msg, 0, sizeof(msg));
-
iov.iov_base = dummy;
iov.iov_len = sizeof(dummy);
msg.msg_iov = &iov;
diff --git a/ldap/servers/slapd/ldaputil.c b/ldap/servers/slapd/ldaputil.c
index 090b613..e5b7c56 100644
--- a/ldap/servers/slapd/ldaputil.c
+++ b/ldap/servers/slapd/ldaputil.c
@@ -1776,8 +1776,8 @@ credentials_are_valid(
{
char *logname = "credentials_are_valid";
int myrc = 0;
- krb5_creds mcreds; /* match these values */
- krb5_creds creds; /* returned creds */
+ krb5_creds mcreds = {0}; /* match these values */
+ krb5_creds creds = {0}; /* returned creds */
char *tgs_princ_name = NULL;
krb5_timestamp currenttime;
int authtracelevel = SLAPI_LOG_SHELL; /* special auth tracing */
@@ -1786,8 +1786,6 @@ credentials_are_valid(
int time_buffer = 30; /* seconds - go ahead and renew if creds are
about to expire */
- memset(&mcreds, 0, sizeof(mcreds));
- memset(&creds, 0, sizeof(creds));
*rc = 0;
if (!cc) {
/* ok - no error */
@@ -1890,7 +1888,7 @@ set_krb5_creds(
krb5_principal princ = NULL;
char *princ_name = NULL;
krb5_error_code rc = 0;
- krb5_creds creds;
+ krb5_creds creds = {0};
krb5_keytab kt = NULL;
char *cc_name = NULL;
char ktname[MAX_KEYTAB_NAME_LEN];
@@ -1902,10 +1900,6 @@ set_krb5_creds(
appear to be used
currently */
- /* wipe this out so we can safely free it later if we
- short circuit */
- memset(&creds, 0, sizeof(creds));
-
/*
* we are using static variables and sharing an in-memory credentials cache
* so we put a lock around all kerberos interactions
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
index 1e456c8..1195c54 100644
--- a/ldap/servers/slapd/log.c
+++ b/ldap/servers/slapd/log.c
@@ -3705,7 +3705,7 @@ log__delete_error_logfile(int locked)
int rv = 0;
char *logstr;
char buffer[BUFSIZ];
- char tbuf[TBUFSIZE];
+ char tbuf[TBUFSIZE] = {0};
/* If we have only one log, then will delete this one */
if (loginfo.log_error_maxnumlogs == 1) {
@@ -3826,7 +3826,6 @@ delete_logfile:
return 0;
}
}
- memset(tbuf, 0, sizeof(tbuf));
log_convert_time (delete_logp->l_ctime, tbuf, 1 /*short */);
if (!locked) {
/* if locked, we should not call slapi_log_err,
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index 158d49d..a59b7d5 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -1167,9 +1167,8 @@ cleanup:
void
signal2sigaction( int s, void *a )
{
- struct sigaction act;
+ struct sigaction act = {0};
- memset(&act, 0, sizeof(struct sigaction));
act.sa_handler = (VFP)a;
act.sa_flags = 0;
(void)sigemptyset( &act.sa_mask );
@@ -2003,7 +2002,7 @@ static int
slapd_exemode_ldif2db(void)
{
int return_value= 0;
- Slapi_PBlock pb;
+ Slapi_PBlock pb = {0};
struct slapdplugin *plugin;
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
@@ -2094,7 +2093,6 @@ slapd_exemode_ldif2db(void)
if (!(slapd_ldap_debug & LDAP_DEBUG_BACKLDBM)) {
g_set_detached(1);
}
- memset( &pb, '\0', sizeof(pb) );
pb.pb_backend = NULL;
pb.pb_plugin = plugin;
pb.pb_removedupvals = ldif2db_removedupvals;
@@ -2126,7 +2124,7 @@ static int
slapd_exemode_db2ldif(int argc, char** argv)
{
int return_value= 0;
- Slapi_PBlock pb;
+ Slapi_PBlock pb = {0};
struct slapdplugin *plugin;
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
char *my_ldiffile;
@@ -2218,7 +2216,6 @@ slapd_exemode_db2ldif(int argc, char** argv)
if (!(slapd_ldap_debug & LDAP_DEBUG_BACKLDBM)) {
g_set_detached(1);
}
- memset( &pb, '\0', sizeof(pb) );
pb.pb_backend = NULL;
pb.pb_plugin = plugin;
pb.pb_ldif_include = db2ldif_include;
@@ -2344,7 +2341,7 @@ static int slapd_exemode_db2index(void)
{
int return_value= 0;
struct slapdplugin *plugin;
- Slapi_PBlock pb;
+ Slapi_PBlock pb = {0};
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
mapping_tree_init();
@@ -2414,7 +2411,6 @@ static int slapd_exemode_db2index(void)
usage( myname, extraname );
return 1;
}
- memset( &pb, '\0', sizeof(pb) );
pb.pb_backend = NULL;
pb.pb_plugin = plugin;
pb.pb_db2index_attrs = db2index_attrs;
@@ -2432,7 +2428,7 @@ static int
slapd_exemode_db2archive(void)
{
int return_value= 0;
- Slapi_PBlock pb;
+ Slapi_PBlock pb = {0};
struct slapdplugin *backend_plugin;
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
@@ -2469,7 +2465,6 @@ slapd_exemode_db2archive(void)
g_set_detached(1);
}
- memset( &pb, '\0', sizeof(pb) );
pb.pb_backend = NULL;
pb.pb_plugin = backend_plugin;
pb.pb_instance_name = NULL;
@@ -2484,7 +2479,7 @@ static int
slapd_exemode_archive2db(void)
{
int return_value= 0;
- Slapi_PBlock pb;
+ Slapi_PBlock pb = {0};
struct slapdplugin *backend_plugin;
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
@@ -2522,7 +2517,6 @@ slapd_exemode_archive2db(void)
g_set_detached(1);
}
- memset( &pb, '\0', sizeof(pb) );
pb.pb_backend = NULL;
pb.pb_plugin = backend_plugin;
pb.pb_instance_name = cmd_line_instance_name;
@@ -2541,7 +2535,7 @@ static int
slapd_exemode_upgradedb(void)
{
int return_value= 0;
- Slapi_PBlock pb;
+ Slapi_PBlock pb = {0};
struct slapdplugin *backend_plugin;
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
@@ -2579,7 +2573,6 @@ slapd_exemode_upgradedb(void)
return 1;
}
- memset( &pb, '\0', sizeof(pb) );
pb.pb_backend = NULL;
pb.pb_plugin = backend_plugin;
pb.pb_seq_val = archive_name;
@@ -2608,7 +2601,7 @@ static int
slapd_exemode_upgradednformat(void)
{
int rc = -1; /* error, by default */
- Slapi_PBlock pb;
+ Slapi_PBlock pb = {0};
struct slapdplugin *backend_plugin;
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
@@ -2650,7 +2643,6 @@ slapd_exemode_upgradednformat(void)
goto bail;
}
- memset( &pb, '\0', sizeof(pb) );
pb.pb_backend = NULL;
pb.pb_plugin = backend_plugin;
pb.pb_instance_name = cmd_line_instance_name;
@@ -2686,7 +2678,7 @@ static int
slapd_exemode_dbverify(void)
{
int return_value = 0;
- Slapi_PBlock pb;
+ Slapi_PBlock pb = {0};
struct slapdplugin *backend_plugin;
/* this should be the first time to be called! if the init order
@@ -2706,7 +2698,6 @@ slapd_exemode_dbverify(void)
return 1;
}
- memset( &pb, '\0', sizeof(pb) );
pb.pb_backend = NULL;
pb.pb_seq_type = dbverify_verbose;
pb.pb_plugin = backend_plugin;
diff --git a/ldap/servers/slapd/mapping_tree.c b/ldap/servers/slapd/mapping_tree.c
index e0f1ffe..4e91757 100644
--- a/ldap/servers/slapd/mapping_tree.c
+++ b/ldap/servers/slapd/mapping_tree.c
@@ -3318,7 +3318,7 @@ slapi_get_suffix_by_dn(const Slapi_DN *dn)
int
slapi_mtn_set_referral(const Slapi_DN *sdn, char ** referral)
{
- Slapi_PBlock pb;
+ Slapi_PBlock pb = {0};
Slapi_Mods smods;
int rc = LDAP_SUCCESS,i = 0, j = 0;
Slapi_DN* node_sdn;
@@ -3390,7 +3390,6 @@ slapi_mtn_set_referral(const Slapi_DN *sdn, char ** referral)
if ( do_modify )
{
- pblock_init (&pb);
slapi_modify_internal_set_pb_ext (&pb, node_sdn,
slapi_mods_get_ldapmods_byref(&smods), NULL,
NULL, (void *) plugin_get_default_component_id(), 0);
@@ -3416,7 +3415,7 @@ slapi_mtn_set_referral(const Slapi_DN *sdn, char ** referral)
int
slapi_mtn_set_state(const Slapi_DN *sdn, char *state)
{
- Slapi_PBlock pb;
+ Slapi_PBlock pb = {0};
Slapi_Mods smods;
int rc = LDAP_SUCCESS;
Slapi_DN *node_sdn;
@@ -3444,7 +3443,6 @@ slapi_mtn_set_state(const Slapi_DN *sdn, char *state)
/* Otherwise, means that the state has changed, modify it */
slapi_mods_init (&smods, 1);
slapi_mods_add(&smods, LDAP_MOD_REPLACE, "nsslapd-state", strlen(state), state);
- pblock_init (&pb);
slapi_modify_internal_set_pb_ext (&pb, node_sdn,
slapi_mods_get_ldapmods_byref(&smods), NULL,
NULL, (void *) plugin_get_default_component_id(), 0);
@@ -3466,7 +3464,7 @@ bail:
Slapi_Attr *
mtn_get_attr(char* node_dn, char * type)
{
- Slapi_PBlock pb;
+ Slapi_PBlock pb = {0};
int res = 0;
Slapi_Entry **entries = NULL;
Slapi_Attr *attr = NULL;
@@ -3475,7 +3473,6 @@ mtn_get_attr(char* node_dn, char * type)
attrs = (char **)slapi_ch_calloc(2, sizeof(char *));
attrs[0] = slapi_ch_strdup(type);
- pblock_init(&pb);
slapi_search_internal_set_pb(&pb, node_dn, LDAP_SCOPE_BASE,
"objectclass=nsMappingTree", attrs, 0, NULL, NULL,
(void *) plugin_get_default_component_id(), 0);
@@ -3856,13 +3853,12 @@ static void dump_mapping_tree(mapping_tree_node *parent, int depth)
static int
_mtn_update_config_param(int op, char *type, char *strvalue)
{
- Slapi_PBlock confpb;
+ Slapi_PBlock confpb = {0};
Slapi_DN sdn;
Slapi_Mods smods;
LDAPMod **mods;
int rc = LDAP_PARAM_ERROR;
- pblock_init (&confpb);
slapi_mods_init (&smods, 0);
switch (op) {
case LDAP_MOD_DELETE:
diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
index 3aa4359..51bf057 100644
--- a/ldap/servers/slapd/modify.c
+++ b/ldap/servers/slapd/modify.c
@@ -406,12 +406,10 @@ slapi_modify_internal(const char *idn,
LDAPControl **controls,
int dummy)
{
- Slapi_PBlock pb;
+ Slapi_PBlock pb = {0};
Slapi_PBlock *result_pb = NULL;
int opresult;
- pblock_init(&pb);
-
slapi_modify_internal_set_pb (&pb, idn, (LDAPMod**)mods, controls, NULL,
(void *)plugin_get_default_component_id(), 0);
diff --git a/ldap/servers/slapd/modrdn.c b/ldap/servers/slapd/modrdn.c
index 8b77539..15f5210 100644
--- a/ldap/servers/slapd/modrdn.c
+++ b/ldap/servers/slapd/modrdn.c
@@ -245,14 +245,12 @@ slapi_modrdn_internal(const char *iodn, const char *inewrdn, int deloldrdn, LDAP
Slapi_PBlock *
slapi_rename_internal(const char *iodn, const char *inewrdn, const char *inewsuperior, int deloldrdn, LDAPControl **controls, int dummy)
{
- Slapi_PBlock pb;
+ Slapi_PBlock pb = {0};
Slapi_PBlock *result_pb = NULL;
int opresult= 0;
Slapi_DN sdn;
Slapi_DN newsuperiorsdn;
- pblock_init (&pb);
-
slapi_sdn_init_dn_byref(&sdn, iodn);
slapi_sdn_init_dn_byref(&newsuperiorsdn, inewsuperior);
diff --git a/ldap/servers/slapd/operation.c b/ldap/servers/slapd/operation.c
index 19e572a..ccbc549 100644
--- a/ldap/servers/slapd/operation.c
+++ b/ldap/servers/slapd/operation.c
@@ -148,6 +148,7 @@ operation_init(Slapi_Operation *o, int flags)
if (NULL != o)
{
BerElement *ber = o->o_ber; /* may have already been set */
+ /* We can't get rid of this til we remove the operation stack. */
memset(o,0,sizeof(Slapi_Operation));
o->o_ber = ber;
o->o_msgid = -1;
@@ -195,7 +196,7 @@ operation_new(int flags)
BerElement *ber = NULL;
if(flags & OP_FLAG_INTERNAL)
{
- o = (Slapi_Operation *) slapi_ch_malloc(sizeof(Slapi_Operation));
+ o = (Slapi_Operation *) slapi_ch_calloc(1, sizeof(Slapi_Operation));
}
else
{
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
index dcac322..52b8cf5 100644
--- a/ldap/servers/slapd/pblock.c
+++ b/ldap/servers/slapd/pblock.c
@@ -35,7 +35,7 @@ pblock_init_common(
)
{
PR_ASSERT( NULL != pb );
- memset( pb, '\0', sizeof(Slapi_PBlock) );
+ /* No need to memset, this is only called in backend_manager, and it uses {0} */
pb->pb_backend = be;
pb->pb_conn = conn;
pb->pb_op = op;
diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c
index 4cdd567..7744aa6 100644
--- a/ldap/servers/slapd/plugin.c
+++ b/ldap/servers/slapd/plugin.c
@@ -1753,10 +1753,9 @@ plugin_dependency_startall(int argc, char** argv, char *errmsg, int operation, c
if(!config[plugin_index].entry_created)
{
int plugin_actions = 0;
- Slapi_PBlock newpb;
+ Slapi_PBlock newpb = {0};
Slapi_Entry *newe;
- pblock_init(&newpb);
newe = slapi_entry_dup( config[plugin_index].e );
slapi_add_entry_internal_set_pb(&newpb, newe, NULL,
plugin_get_default_component_id(), plugin_actions);
@@ -2776,8 +2775,9 @@ plugin_free(struct slapdplugin *plugin)
}
release_componentid(plugin->plg_identity);
slapi_counter_destroy(&plugin->plg_op_counter);
- if (!plugin->plg_group)
+ if (!plugin->plg_group) {
plugin_config_cleanup(&plugin->plg_conf);
+ }
slapi_ch_free((void**)&plugin);
}
@@ -2844,7 +2844,7 @@ plugin_setup(Slapi_Entry *plugin_entry, struct slapi_componentid *group,
struct slapi_componentid *cid = NULL;
const char *existname = 0;
slapi_plugin_init_fnptr initfunc = p_initfunc;
- Slapi_PBlock pb;
+ Slapi_PBlock pb = {0};
int status = 0;
int enabled = 1;
char *configdir = 0;
@@ -3067,7 +3067,6 @@ plugin_setup(Slapi_Entry *plugin_entry, struct slapi_componentid *group,
PR_snprintf(attrname, sizeof(attrname), "%s%d", ATTR_PLUGIN_ARG, ++ii);
} while (skipped < MAXSKIPPED);
- memset((char *)&pb, '\0', sizeof(pb));
slapi_pblock_set(&pb, SLAPI_PLUGIN, plugin);
slapi_pblock_set(&pb, SLAPI_PLUGIN_VERSION, (void *)SLAPI_PLUGIN_CURRENT_VERSION);
@@ -3472,9 +3471,7 @@ plugin_remove_plugins(struct slapdplugin *plugin_entry, char *plugin_type)
/*
* Call the close function, cleanup the hashtable & the global shutdown list
*/
- Slapi_PBlock pb;
-
- pblock_init(&pb);
+ Slapi_PBlock pb = {0};
plugin_set_stopped(plugin);
if (slapi_counter_get_value(plugin->plg_op_counter) > 0){
/*
@@ -4328,7 +4325,7 @@ bail:
int
slapi_set_plugin_default_config(const char *type, Slapi_Value *value)
{
- Slapi_PBlock pb;
+ Slapi_PBlock pb = {0};
Slapi_Entry **entries = NULL;
int rc = LDAP_SUCCESS;
char **search_attrs = NULL; /* used by search */
@@ -4340,7 +4337,6 @@ slapi_set_plugin_default_config(const char *type, Slapi_Value *value)
charray_add(&search_attrs, slapi_ch_strdup(type));
/* cn=plugin default config,cn=config */
- pblock_init(&pb);
slapi_search_internal_set_pb(&pb,
SLAPI_PLUGIN_DEFAULT_CONFIG, /* Base DN (normalized) */
LDAP_SCOPE_BASE,
@@ -4432,7 +4428,7 @@ slapi_set_plugin_default_config(const char *type, Slapi_Value *value)
int
slapi_get_plugin_default_config(char *type, Slapi_ValueSet **valueset)
{
- Slapi_PBlock pb;
+ Slapi_PBlock pb = {0};
Slapi_Entry **entries = NULL;
int rc = LDAP_PARAM_ERROR;
char **search_attrs = NULL; /* used by search */
@@ -4444,7 +4440,6 @@ slapi_get_plugin_default_config(char *type, Slapi_ValueSet **valueset)
charray_add(&search_attrs, slapi_ch_strdup(type));
/* cn=plugin default config,cn=config */
- pblock_init(&pb);
slapi_search_internal_set_pb(&pb,
SLAPI_PLUGIN_DEFAULT_CONFIG, /* Base DN (normalized) */
LDAP_SCOPE_BASE,
diff --git a/ldap/servers/slapd/plugin_internal_op.c b/ldap/servers/slapd/plugin_internal_op.c
index 05ee90d..8cbdf06 100644
--- a/ldap/servers/slapd/plugin_internal_op.c
+++ b/ldap/servers/slapd/plugin_internal_op.c
@@ -205,7 +205,7 @@ slapi_seq_callback( const char *ibase,
plugin_referral_entry_callback ref_callback)
{
int r;
- Slapi_PBlock pb;
+ Slapi_PBlock pb = {0};
if (ibase == NULL)
{
@@ -214,8 +214,6 @@ slapi_seq_callback( const char *ibase,
return -1;
}
- pblock_init(&pb);
-
slapi_seq_internal_set_pb(&pb, (char *)ibase, type, attrname, val, attrs, attrsonly, controls,
plugin_get_default_component_id(), 0);
diff --git a/ldap/servers/slapd/plugin_mr.c b/ldap/servers/slapd/plugin_mr.c
index 35874ec..d216d12 100644
--- a/ldap/servers/slapd/plugin_mr.c
+++ b/ldap/servers/slapd/plugin_mr.c
@@ -574,7 +574,7 @@ plugin_mr_filter_create (mr_filter_t* f)
{
int rc = LDAP_UNAVAILABLE_CRITICAL_EXTENSION;
struct slapdplugin* mrp = plugin_mr_find_registered (f->mrf_oid);
- Slapi_PBlock pb;
+ Slapi_PBlock pb = {0};
if (mrp != NULL)
{
@@ -599,7 +599,6 @@ plugin_mr_filter_create (mr_filter_t* f)
if (mrp)
{
/* set the default index create fn */
- pblock_init(&pb);
slapi_pblock_set(&pb, SLAPI_PLUGIN, mrp);
slapi_pblock_set(&pb, SLAPI_PLUGIN_MR_FILTER_CREATE_FN, default_mr_filter_create);
slapi_pblock_set(&pb, SLAPI_PLUGIN_MR_INDEXER_CREATE_FN, default_mr_indexer_create);
diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c
index ff77c92..f50f573 100644
--- a/ldap/servers/slapd/schema.c
+++ b/ldap/servers/slapd/schema.c
@@ -1807,8 +1807,7 @@ schema_list_attributes_callback(struct asyntaxinfo *asi, void *arg)
char **
slapi_schema_list_attribute_names(unsigned long flag)
{
- struct listargs aew;
- memset(&aew,0,sizeof(struct listargs));
+ struct listargs aew = {0};
aew.flag=flag;
attr_syntax_enumerate_attrs(schema_list_attributes_callback, &aew,
@@ -5339,8 +5338,7 @@ init_schema_dse_ext(char *schemadir, Slapi_Backend *be,
int dont_write = 1;
int merge = 1;
int dont_dup_check = 1;
- Slapi_PBlock pb;
- memset(&pb, 0, sizeof(pb));
+ Slapi_PBlock pb = {0};
/* don't write out the file when reading */
slapi_pblock_set(&pb, SLAPI_DSE_DONT_WRITE_WHEN_ADDING, (void*)&dont_write);
/* duplicate entries are allowed */
diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c
index 5ad22fd..f6da414 100644
--- a/ldap/servers/slapd/ssl.c
+++ b/ldap/servers/slapd/ssl.c
@@ -938,7 +938,7 @@ freeChildren( char **list ) {
static void
entrySetValue(Slapi_DN *sdn, char *type, char *value)
{
- Slapi_PBlock mypb;
+ Slapi_PBlock mypb = {0};
LDAPMod attr;
LDAPMod *mods[2];
char *values[2];
@@ -954,7 +954,6 @@ entrySetValue(Slapi_DN *sdn, char *type, char *value)
mods[0] = &attr;
mods[1] = NULL;
- pblock_init(&mypb);
slapi_modify_internal_set_pb_ext(&mypb, sdn, mods, NULL, NULL, (void *)plugin_get_default_component_id(), 0);
slapi_modify_internal_pb(&mypb);
pblock_done(&mypb);
diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c
index 0305ec8..ad52e9d 100644
--- a/ldap/servers/slapd/task.c
+++ b/ldap/servers/slapd/task.c
@@ -701,15 +701,13 @@ static Slapi_Entry *get_internal_entry(Slapi_PBlock *pb, char *dn)
static void modify_internal_entry(char *dn, LDAPMod **mods)
{
- Slapi_PBlock pb;
Slapi_Operation *op;
int ret = 0;
int tries = 0;
int dont_write_file = 1;
do {
-
- pblock_init(&pb);
+ Slapi_PBlock pb = {0};
slapi_modify_internal_set_pb(&pb, dn, mods, NULL, NULL,
(void *)plugin_get_default_component_id(), 0);
@@ -836,7 +834,7 @@ static int task_import_add(Slapi_PBlock *pb, Slapi_Entry *e,
int idx, rv = 0;
const char *do_attr_indexes, *uniqueid_kind_str;
int uniqueid_kind = SLAPI_UNIQUEID_GENERATE_TIME_BASED;
- Slapi_PBlock mypb;
+ Slapi_PBlock mypb = {0};
Slapi_Task *task;
char *nameFrombe_name = NULL;
const char *encrypt_on_import = NULL;
@@ -978,7 +976,6 @@ static int task_import_add(Slapi_PBlock *pb, Slapi_Entry *e,
goto out;
}
- memset(&mypb, 0, sizeof(mypb));
mypb.pb_backend = be;
mypb.pb_plugin = be->be_database;
mypb.pb_removedupvals = atoi(fetch_attr(e, "nsImportChunkSize", "0"));
@@ -1797,7 +1794,7 @@ task_upgradedb_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter,
int rv = SLAPI_DSE_CALLBACK_OK;
Slapi_Backend *be = NULL;
Slapi_Task *task = NULL;
- Slapi_PBlock mypb;
+ Slapi_PBlock mypb = {0};
const char *archive_dir = NULL;
const char *force = NULL;
const char *database_type = "ldbm database";
@@ -1864,7 +1861,6 @@ task_upgradedb_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter,
task->task_work = 1;
task->task_progress = 0;
- memset(&mypb, 0, sizeof(mypb));
mypb.pb_backend = be;
mypb.pb_plugin = be->be_database;
if (force && 0 == strcasecmp(force, "true"))
@@ -1956,17 +1952,14 @@ task_sysconfig_reload_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter,
/* skip comments */
continue;
} else {
- char env_value[sizeof(line)];
- char env_var[sizeof(line)];
+ char env_value[sizeof(line)] = {0};
+ char env_var[sizeof(line)] = {0};
int using_setenv = 0;
int value_index = 0;
int start_value = 0;
int var_index = 0;
int inquotes = 0;
- memset(env_var, 0, sizeof(env_var));
- memset(env_value, 0, sizeof(env_value));
-
/*
* Remove leading spaces and tabs
*/
diff --git a/ldap/servers/slapd/time.c b/ldap/servers/slapd/time.c
index 89bc91d..db17bab 100644
--- a/ldap/servers/slapd/time.c
+++ b/ldap/servers/slapd/time.c
@@ -364,12 +364,11 @@ write_genTime (time_t from, struct berval* into)
time_t
read_genTime(struct berval*from)
{
- struct tm t;
+ struct tm t = {0};
time_t retTime;
time_t diffsec = 0;
int i, gflag = 0, havesec = 0;
- memset (&t, 0, sizeof(t));
t.tm_isdst = -1;
t.tm_year = strntoul (from->bv_val , 4, 10) - 1900L;
t.tm_mon = strntoul (from->bv_val + 4, 2, 10) - 1;
diff --git a/ldap/servers/slapd/uniqueid.c b/ldap/servers/slapd/uniqueid.c
index c878a8f..3f1f6a8 100644
--- a/ldap/servers/slapd/uniqueid.c
+++ b/ldap/servers/slapd/uniqueid.c
@@ -37,16 +37,7 @@ static int isValidFormat (const char * buff);
Slapi_UniqueID *slapi_uniqueIDNew ()
{
Slapi_UniqueID *uId;
- uId = (Slapi_UniqueID*)slapi_ch_malloc (sizeof (Slapi_UniqueID));
-
- if (uId == NULL)
- {
- slapi_log_err(SLAPI_LOG_ERR, MODULE, "uniqueIDNew: "
- "failed to allocate new id.\n");
- return NULL;
- }
-
- memset (uId, 0, sizeof (Slapi_UniqueID));
+ uId = (Slapi_UniqueID*)slapi_ch_calloc (1, sizeof (Slapi_UniqueID));
return uId;
}
diff --git a/ldap/servers/slapd/uniqueidgen.c b/ldap/servers/slapd/uniqueidgen.c
index a6c03d7..6ac0799 100644
--- a/ldap/servers/slapd/uniqueidgen.c
+++ b/ldap/servers/slapd/uniqueidgen.c
@@ -176,16 +176,11 @@ int slapi_uniqueIDGenerateFromNameString (char **uId,
const void *name, int namelen)
{
int rc;
- Slapi_UniqueID idBase;
- Slapi_UniqueID idGen;
+ Slapi_UniqueID idBase = {0};
+ Slapi_UniqueID idGen = {0};
/* just use Id of all 0 as base id */
- if (uIdBase == NULL)
- {
- memset (&idBase, 0, sizeof (idBase));
- memset (&idGen, 0, sizeof (idGen));
- }
- else
+ if (uIdBase != NULL)
{
rc = slapi_uniqueIDScan (&idBase, uIdBase);
if (rc != UID_SUCCESS)
diff --git a/ldap/servers/slapd/util.c b/ldap/servers/slapd/util.c
index 3a77522..48fa3c4 100644
--- a/ldap/servers/slapd/util.c
+++ b/ldap/servers/slapd/util.c
@@ -412,7 +412,7 @@ filter_stuff_func(void *arg, const char *val, PRUint32 slen)
char*
slapi_filter_sprintf(const char *fmt, ...)
{
- struct filter_ctx ctx;
+ struct filter_ctx ctx = {0};
va_list args;
char *buf;
int rc;
@@ -1143,7 +1143,7 @@ int
slapd_chown_if_not_owner(const char *filename, uid_t uid, gid_t gid)
{
int fd = -1;
- struct stat statbuf;
+ struct stat statbuf = {0};
int result = 1;
if (!filename) {
return result;
@@ -1153,7 +1153,6 @@ slapd_chown_if_not_owner(const char *filename, uid_t uid, gid_t gid)
if (fd == -1) {
return result;
}
- memset(&statbuf, '\0', sizeof(statbuf));
if (!(result = fstat(fd, &statbuf)))
{
if (((uid != -1) && (uid != statbuf.st_uid)) ||
@@ -1519,16 +1518,16 @@ int util_info_sys_pages(size_t *pagesize, size_t *pages, size_t *procpages, size
*availpages = util_getvirtualmemsize() / *pagesize;
/* solaris has THE most annoying way to get this info */
{
- struct prpsinfo psi;
+ struct prpsinfo psi = {0};
char fn[40];
int fd;
sprintf(fn, "/proc/%d", getpid());
fd = open(fn, O_RDONLY);
if (fd >= 0) {
- memset(&psi, 0, sizeof(psi));
- if (ioctl(fd, PIOCPSINFO, (void *)&psi) == 0)
+ if (ioctl(fd, PIOCPSINFO, (void *)&psi) == 0) {
*procpages = psi.pr_size;
+ }
close(fd);
}
}
diff --git a/ldap/servers/slapd/uuid.c b/ldap/servers/slapd/uuid.c
index bfa8467..08ca7b3 100644
--- a/ldap/servers/slapd/uuid.c
+++ b/ldap/servers/slapd/uuid.c
@@ -118,7 +118,7 @@ typedef struct
static unsigned int uuid_seed = 0; /* seed for the random generator */
- uuid_state _state; /* generator's state */
+uuid_state _state;/* generator's state */
/* uuid_init -- initializes uuid layer */
int uuid_init (const char *configDir, const Slapi_DN *configDN, PRBool mtGen)
@@ -276,16 +276,13 @@ void uuid_create_from_name(guid_t * uuid, /* resulting UUID */
{
PK11Context *c = NULL;
- unsigned char hash[16];
+ unsigned char hash[16] = {0};
unsigned int hashLen;
- guid_t net_nsid; /* context UUID in network byte order */
-
- memset(hash, 0, 16);
+ guid_t net_nsid = {0}; /* context UUID in network byte order */
/* put name space ID in network byte order so it hashes the same
no matter what endian machine we're on */
- memset(&net_nsid, 0, sizeof(guid_t));
net_nsid.time_low = PR_htonl(nsid.time_low);
net_nsid.time_mid = PR_htons(nsid.time_mid);
net_nsid.time_hi_and_version = PR_htons(nsid.time_hi_and_version);
diff --git a/lib/ldaputil/certmap.c b/lib/ldaputil/certmap.c
index b05bbba..8525f51 100644
--- a/lib/ldaputil/certmap.c
+++ b/lib/ldaputil/certmap.c
@@ -164,11 +164,10 @@ static int certmap_name_to_secoid (const char *str)
NSAPI_PUBLIC int ldapu_list_alloc (LDAPUList_t **list)
{
- *list = (LDAPUList_t *)malloc(sizeof(LDAPUList_t));
+ *list = (LDAPUList_t *)calloc(1, sizeof(LDAPUList_t));
if (!*list) return LDAPU_ERR_OUT_OF_MEMORY;
- memset((void *)*list, 0, sizeof(LDAPUList_t));
return LDAPU_SUCCESS;
}
@@ -193,13 +192,12 @@ NSAPI_PUBLIC int ldapu_list_add_info (LDAPUList_t *list, void *info)
LDAPUListNode_t *node;
/* Allocate the list node and set info in the node. */
- node = (LDAPUListNode_t *)malloc(sizeof(LDAPUListNode_t));
+ node = (LDAPUListNode_t *)calloc(1, sizeof(LDAPUListNode_t));
if (!node) {
return LDAPU_ERR_OUT_OF_MEMORY;
}
- memset((void *)node, 0, sizeof(LDAPUListNode_t));
node->info = info;
return ldapu_list_add_node(list, node);
@@ -281,15 +279,13 @@ static int dbinfo_to_certinfo (DBConfDBInfo_t *db_info,
*certinfo_out = 0;
- certinfo = (LDAPUCertMapInfo_t *)malloc(sizeof(LDAPUCertMapInfo_t));
+ certinfo = (LDAPUCertMapInfo_t *)calloc(1, sizeof(LDAPUCertMapInfo_t));
if (!certinfo) {
rv = LDAPU_ERR_OUT_OF_MEMORY;
goto error;
}
- memset((void *)certinfo, 0, sizeof(LDAPUCertMapInfo_t));
-
/* hijack few structures rather then copy. Make the pointers to the
structures NULL in the original structure so that they don't freed up
when db_info is freed. */
@@ -1453,7 +1449,7 @@ int ldapu_certmap_init (const char *config_file,
LDAPUCertMapInfo_t **certmap_default)
{
int rv;
- certmap_listinfo = (LDAPUCertMapListInfo_t *)malloc(sizeof(LDAPUCertMapListInfo_t));
+ certmap_listinfo = (LDAPUCertMapListInfo_t *)calloc(1, sizeof(LDAPUCertMapListInfo_t));
*certmap_list = 0;
*certmap_default = 0;
@@ -1461,8 +1457,6 @@ int ldapu_certmap_init (const char *config_file,
if (!certmap_listinfo) return LDAPU_ERR_OUT_OF_MEMORY;
- memset((void *)certmap_listinfo, 0, sizeof(LDAPUCertMapListInfo_t));
-
rv = certmap_read_certconfig_file(config_file);
if (rv == LDAPU_SUCCESS) {
commit c27605b2b9fc988e7c28413548930836416b06af
Author: William Brown <firstyear(a)redhat.com>
Date: Fri Nov 18 10:09:50 2016 +1000
Ticket 49021 - Automatic thread tuning
Bug Description: We currently always set 30 threads on systems no matter their
size. We should tune the threads more appropriately out of the box.
Fix Description: Add a thread autotuning. We start at 16 threads, and ramp up,
reducing the rate off addition as we grow to a cap of 512 threads.
https://fedorahosted.org/389/ticket/49021
Author: wibrown
Review by: nhosoi, mreynolds (Thanks)
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index b168506..0e818a9 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -1457,7 +1457,7 @@ FrontendConfig_init(void) {
init_require_secure_binds = cfg->require_secure_binds = LDAP_OFF;
cfg->allow_anon_access = SLAPD_DEFAULT_ALLOW_ANON_ACCESS;
init_slapi_counters = cfg->slapi_counters = LDAP_ON;
- cfg->threadnumber = SLAPD_DEFAULT_MAX_THREADS;
+ cfg->threadnumber = util_get_hardware_threads();
cfg->maxthreadsperconn = SLAPD_DEFAULT_MAX_THREADS_PER_CONN;
cfg->reservedescriptors = SLAPD_DEFAULT_RESERVE_FDS;
cfg->idletimeout = SLAPD_DEFAULT_IDLE_TIMEOUT;
@@ -3867,34 +3867,38 @@ config_set_encryptionalias( const char *attrname, char *value, char *errorbuf, i
return retVal;
}
-int
+int
config_set_threadnumber( const char *attrname, char *value, char *errorbuf, int apply ) {
- int retVal = LDAP_SUCCESS;
- long threadnum = 0;
- char *endp = NULL;
+ int retVal = LDAP_SUCCESS;
+ long threadnum = 0;
+ char *endp = NULL;
- slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
- if ( config_value_is_null( attrname, value, errorbuf, 0 )) {
- return LDAP_OPERATIONS_ERROR;
- }
+ if ( config_value_is_null( attrname, value, errorbuf, 0 )) {
+ return LDAP_OPERATIONS_ERROR;
+ }
- errno = 0;
- threadnum = strtol(value, &endp, 10);
-
- if ( *endp != '\0' || errno == ERANGE || threadnum < 1 || threadnum > 65535 ) {
- slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
- "%s: invalid value \"%s\", maximum thread number must range from 1 to 65535", attrname, value);
- retVal = LDAP_OPERATIONS_ERROR;
- }
-
- if (apply) {
- CFG_LOCK_WRITE(slapdFrontendConfig);
- /* max_threads = threadnum; */
- slapdFrontendConfig->threadnumber = threadnum;
- CFG_UNLOCK_WRITE(slapdFrontendConfig);
- }
- return retVal;
+ errno = 0;
+ threadnum = strtol(value, &endp, 10);
+
+ /* Means we want to re-run the hardware detection. */
+ if (threadnum == -1) {
+ threadnum = util_get_hardware_threads();
+ }
+
+ if ( *endp != '\0' || errno == ERANGE || threadnum < 1 || threadnum > 65535 ) {
+ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "%s: invalid value \"%s\", maximum thread number must range from 1 to 65535", attrname, value);
+ retVal = LDAP_OPERATIONS_ERROR;
+ }
+ if (apply) {
+ CFG_LOCK_WRITE(slapdFrontendConfig);
+ /* max_threads = threadnum; */
+ slapdFrontendConfig->threadnumber = threadnum;
+ CFG_UNLOCK_WRITE(slapdFrontendConfig);
+ }
+ return retVal;
}
int
@@ -5497,16 +5501,25 @@ config_get_encryptionalias(void) {
return retVal;
}
-int
+long
config_get_threadnumber(void) {
- slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
- int retVal;
-
- CFG_LOCK_READ(slapdFrontendConfig);
- retVal = slapdFrontendConfig->threadnumber;
- CFG_UNLOCK_READ(slapdFrontendConfig);
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+ long retVal;
- return retVal;
+ CFG_LOCK_READ(slapdFrontendConfig);
+ retVal = slapdFrontendConfig->threadnumber;
+ CFG_UNLOCK_READ(slapdFrontendConfig);
+
+ if (retVal == -1) {
+ retVal = util_get_hardware_threads();
+ }
+
+ /* We *still* can't detect hardware threads. Okay, return 30 :( */
+ if (retVal == -1) {
+ retVal = 30;
+ }
+
+ return retVal;
}
int
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index 73010c2..af728a6 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -472,7 +472,7 @@ char *config_get_rootpwstoragescheme(void);
char *config_get_localuser(void);
char *config_get_workingdir(void);
char *config_get_encryptionalias(void);
-int config_get_threadnumber(void);
+long config_get_threadnumber(void);
int config_get_maxthreadsperconn(void);
int config_get_maxdescriptors(void);
int config_get_reservedescriptors(void);
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 1066f3e..403bfd9 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -257,8 +257,8 @@ typedef void (*VFPV)(); /* takes undefined arguments */
#define SLAPD_DEFAULT_OUTBOUND_LDAP_IO_TIMEOUT_STR "300000"
#define SLAPD_DEFAULT_RESERVE_FDS 64
#define SLAPD_DEFAULT_RESERVE_FDS_STR "64"
-#define SLAPD_DEFAULT_MAX_THREADS 30 /* connection pool threads */
-#define SLAPD_DEFAULT_MAX_THREADS_STR "30"
+#define SLAPD_DEFAULT_MAX_THREADS -1 /* connection pool threads */
+#define SLAPD_DEFAULT_MAX_THREADS_STR "-1"
#define SLAPD_DEFAULT_MAX_THREADS_PER_CONN 5 /* allowed per connection */
#define SLAPD_DEFAULT_MAX_THREADS_PER_CONN_STR "5"
#define SLAPD_DEFAULT_MAX_BERSIZE_STR "0"
@@ -2405,7 +2405,7 @@ typedef struct _slapdFrontendConfig {
char *SNMPorganization;
char *SNMPlocation;
char *SNMPcontact;
- int threadnumber;
+ long threadnumber;
int timelimit;
char *accesslog;
struct berval **defaultreferral;
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
index fc05b42..406b014 100644
--- a/ldap/servers/slapd/slapi-private.h
+++ b/ldap/servers/slapd/slapi-private.h
@@ -1383,6 +1383,13 @@ int util_info_sys_pages(size_t *pagesize, size_t *pages, size_t *procpages, size
int util_is_cachesize_sane(size_t *cachesize);
/**
+ * Retrieve the number of threads the server should run with based on this hardware.
+ *
+ * \return -1 if the hardware detection failed. Any positive value is threads to use.
+ */
+long util_get_hardware_threads(void);
+
+/**
* Write an error message to the given error buffer.
*
* \param errorbuf. The buffer that the error message is written into. If NULL, nothing happens. It could be a static array or allocated memory. If it is allocated memory, the next param len should be given.
diff --git a/ldap/servers/slapd/util.c b/ldap/servers/slapd/util.c
index 9a8f61d..3a77522 100644
--- a/ldap/servers/slapd/util.c
+++ b/ldap/servers/slapd/util.c
@@ -1829,6 +1829,54 @@ out:
return issane;
}
+long
+util_get_hardware_threads(void) {
+#ifdef LINUX
+ long hw_threads = sysconf(_SC_NPROCESSORS_ONLN);
+ long threads = 0;
+ slapi_log_err(SLAPI_LOG_TRACE, "util_get_hardware_threads", "Detected %lu hardware threads\n", threads);
+ /*
+ * Now we determine the number to run with based on threads. Initially, for
+ * low processor counts we ramp up quickly, we plateau a little, then, we
+ * at high numbers start to plateau and increase slowly.
+ * Should be
+ * 1 -> 16
+ * 2 -> 16
+ * 4 -> 24
+ * 8 -> 32
+ * 16 -> 48
+ * 32 -> 64
+ * 64 -> 96
+ * 128 -> 192
+ * 256 -> 384
+ * 512 -> 512
+ * 1024 -> 512
+ * 2048 -> 512
+ */
+
+ if (hw_threads >= 0 && hw_threads < 4) {
+ threads = 16;
+ } else if (hw_threads >= 4 && hw_threads < 32) {
+ threads = 16 + (hw_threads * 2);
+ } else if (hw_threads >= 32 && hw_threads < 64) {
+ threads = (hw_threads * 2);
+ } else if (hw_threads >= 64 && hw_threads < 512) {
+ /* Same as *1.5 */
+ threads = (hw_threads * 2) - (hw_threads / 2);
+ } else {
+ /* Cap at 512 for now ... */
+ threads = 512;
+ }
+ slapi_log_err(SLAPI_LOG_INFO, "util_get_hardware_threads", "Automatically configuring %lu threads\n", threads);
+
+ return threads;
+#else
+ slapi_log_err(SLAPI_LOG_ERR, "util_get_hardware_threads", "ERROR: Cannot detect hardware threads on this platform. This is probably a bug!\n");
+ /* Can't detect threads on this platform! */
+ return -1;
+#endif
+}
+
void
slapi_create_errormsg(
char *errorbuf,
commit d92394c23f5c12116530834f2519ddfc410e6b8a
Author: William Brown <firstyear(a)redhat.com>
Date: Fri Nov 25 10:27:46 2016 +1000
Ticket 48894 - Issues with delete of entrywsi with large entries.
Bug Description: During delets of entries with a high number of attributes,
a high amount of latency was observed. This was because during the valueset
purge for older csn's the vs->sorted was disposed of. This triggered a rebuild
of the valueset.
Fix Description: During the delete, don't purge the vs->sorted array. We need
to preserve it if possible which prevents the need to resort the array after
the delete.
https://fedorahosted.org/389/ticket/48894
Author: Mohammad N., Burk, J.
Review by: wibrown, mreynolds (Thanks)
diff --git a/ldap/servers/slapd/valueset.c b/ldap/servers/slapd/valueset.c
index 729a924..ac2f1be 100644
--- a/ldap/servers/slapd/valueset.c
+++ b/ldap/servers/slapd/valueset.c
@@ -422,61 +422,6 @@ valuearray_remove_value(const Slapi_Attr *a, Slapi_Value **va, const Slapi_Value
return r;
}
-/*
- * Remove any values older than the CSN.
- */
-int
-valuearray_purge(Slapi_Value ***va, const CSN *csn)
-{
- int numValues=0;
- int i=0;
- int nextValue=0;
-
- PR_ASSERT(va!=NULL && *va!=NULL);
-
- /* Loop over all the values freeing the old ones. */
- for(i=0; (*va)[i]; i++)
- {
- csnset_purge(&((*va)[i]->v_csnset),csn);
- if ((*va)[i]->v_csnset == NULL)
- {
- slapi_value_free(&(*va)[i]);
- (*va)[i] = NULL;
- }
- }
- /* Now compact the value list. */
- numValues=i;
- nextValue = 0;
- i = 0;
- for(i=0;i<numValues;i++)
- {
- while((nextValue < numValues) && (NULL == (*va)[nextValue]))
- {
- nextValue++;
- }
- if(nextValue < numValues)
- {
- (*va)[i] = (*va)[nextValue];
- nextValue++;
- }
- else
- {
- break;
- }
- }
- (*va)[i] = NULL;
-
- /* All the values were deleted, we can discard the whole array. */
- if(NULL == (*va)[0])
- {
- slapi_ch_free((void**)va);
- *va= NULL;
- }
-
- /* return the number of remaining values */
- return(i);
-}
-
size_t
valuearray_size(Slapi_Value **va)
{
@@ -785,6 +730,80 @@ valueset_remove_value(const Slapi_Attr *a, Slapi_ValueSet *vs, const Slapi_Value
return r;
}
+/*
+ * Remove any values older than the CSN from valueset.
+ */
+int
+valueset_array_purge(Slapi_ValueSet *vs, const CSN *csn)
+{
+ size_t i = 0;
+ size_t j = 0;
+ int nextValue = 0;
+ int numValues = 0;
+
+ /* Loop over all the values freeing the old ones. */
+ for(i = 0; i < vs->num; i++)
+ {
+ if (vs->sorted) {
+ j = vs->sorted[i];
+ } else {
+ j = i;
+ }
+ csnset_purge(&(vs->va[j]->v_csnset),csn);
+ if (vs->va[j]->v_csnset == NULL) {
+ slapi_value_free(&vs->va[j]);
+ vs->va[j] = NULL;
+ }
+ }
+ /* Now compact the value/sorted list. */
+ numValues = i;
+ nextValue = 0;
+ for(i = 0; i<numValues; i++) {
+ if (vs->sorted) {
+ j = vs->sorted[nextValue];
+ } else {
+ j = nextValue;
+ }
+ while((nextValue < numValues) && (NULL == vs->va[j])) {
+ if (vs->sorted) {
+ j = vs->sorted[nextValue++];
+ } else {
+ nextValue++;
+ }
+ }
+ if(nextValue < numValues) {
+ if(vs->sorted) {
+ vs->va[vs->sorted[i]] = vs->va[j];
+ vs->sorted[i] = j;
+ } else {
+ vs->va[i] = vs->va[j];
+ }
+ nextValue++;
+ } else {
+ break;
+ }
+ }
+
+ if(vs->sorted) {
+ vs->va[vs->sorted[i]] = NULL;
+ vs->sorted[i] = 0;
+ } else {
+ vs->va[i] = NULL;
+ }
+
+ /* All the values were deleted, we can discard the whole array. */
+ if(NULL == vs->va[0]) {
+ if(vs->sorted) {
+ slapi_ch_free ((void **)&vs->sorted);
+ }
+ slapi_ch_free ((void **)&vs->va);
+ vs->va= NULL;
+ }
+
+ /* return the number of remaining values */
+ return i;
+}
+
/*
* Remove any values older than the CSN.
*/
@@ -793,22 +812,12 @@ valueset_purge(Slapi_ValueSet *vs, const CSN *csn)
{
int r= 0;
if(!valuearray_isempty(vs->va)) {
- /* valuearray_purge is not valueset and sorting aware,
- * maybe need to rewrite, at least keep the valueset
- * consistent
- */
- r= valuearray_purge(&vs->va, csn);
+ r= valueset_array_purge(vs, csn);
vs->num = r;
if (vs->va == NULL) {
/* va was freed */
vs->max = 0;
}
- /* we can no longer rely on the sorting */
- if (vs->sorted != NULL)
- {
- slapi_ch_free ((void **)&vs->sorted);
- vs->sorted = NULL;
- }
PR_ASSERT((vs->sorted == NULL) || (vs->num < VALUESET_ARRAY_SORT_THRESHOLD) || ((vs->num >= VALUESET_ARRAY_SORT_THRESHOLD) && (vs->sorted[0] < vs->num)));
}
return 0;
commit 77f5376a533c01545ddb3fefacd35665b1530600
Author: William Brown <firstyear(a)redhat.com>
Date: Fri Nov 25 10:50:14 2016 +1000
Ticket 49054 - Fix sasl_map unused paramater compiler warnings.
Bug Description: sasl_map reports unused parameter warnings, but because it's
a call back the parameters are needed.
Fix Description: Add the gcc attributes to mask the warnings.
https://fedorahosted.org/389/ticket/49054
Author: wibrown
Review by: mreynolds (Thanks)
diff --git a/ldap/servers/slapd/sasl_map.c b/ldap/servers/slapd/sasl_map.c
index 1b28e4c..84f845d 100644
--- a/ldap/servers/slapd/sasl_map.c
+++ b/ldap/servers/slapd/sasl_map.c
@@ -434,7 +434,7 @@ sasl_map_read_config_startup(sasl_map_private *priv)
}
int
-sasl_map_config_add(Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry* e, int *returncode, char *returntext, void *arg)
+sasl_map_config_add(Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry* e, int *returncode, char *returntext __attribute__((unused)) , void *arg)
{
int ret = 0;
sasl_map_data *dp = NULL;
@@ -485,7 +485,7 @@ sasl_map_config_modify(Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry*
}
int
-sasl_map_config_delete(Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry* e, int *returncode, char *returntext, void *arg)
+sasl_map_config_delete(Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry* e, int *returncode, char *returntext __attribute__((unused)), void *arg)
{
int ret = 0;
sasl_map_private *priv = sasl_map_get_global_priv();
6 years, 10 months
dirsrvtests/tests
by Simon Pichugin
dirsrvtests/tests/suites/acctpolicy_plugin/accpol_test.py | 196 ++++++++++++++
1 file changed, 196 insertions(+)
New commits:
commit 86bffc865080b45090a9b0913e4f70cc23d9d2b3
Author: Sankar Ramalingam <sramling(a)redhat.com>
Date: Thu Nov 24 19:34:25 2016 +0530
Ticket 48050 - Add test suite to acctpolicy_plugin
Description: Verify if user account is inactivated
when accountInactivityLimit is exceeded.
https://fedorahosted.org/389/ticket/48050
Reviewed by: spichugi
Signed-off-by: Simon Pichugin <spichugi(a)redhat.com>
diff --git a/dirsrvtests/tests/suites/acctpolicy_plugin/accpol_test.py b/dirsrvtests/tests/suites/acctpolicy_plugin/accpol_test.py
new file mode 100644
index 0000000..22acc39
--- /dev/null
+++ b/dirsrvtests/tests/suites/acctpolicy_plugin/accpol_test.py
@@ -0,0 +1,196 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import pytest
+import ldif
+import ldap.modlist as modlist
+from ldif import LDIFParser,LDIFWriter
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+LOCAL_CONFIG = 'cn=AccountPolicy1,ou=people,dc=example,dc=com'
+TEMPLT_COS = 'cn=TempltCoS,ou=people,dc=example,dc=com'
+DEFN_COS = 'cn=DefnCoS,ou=people,dc=example,dc=com'
+ACCPOL_DN = "cn={},{}".format(PLUGIN_ACCT_POLICY, DN_PLUGIN)
+CONFIG_DN = "cn=config,{}".format(ACCPOL_DN)
+SUBTREE = 'ou=people'
+SUFFIX = DEFAULT_SUFFIX
+USR_NAME = 'testusr'
+NOF_USERS = 5
+INACT_VAL = 15
+USR_RDN = '{}'.format(USR_NAME)
+USR_DN = 'uid={},{},{}'.format(USR_RDN, SUBTREE, SUFFIX)
+USER_PW = 'Secret1234'
+
+
+class TopologyStandalone(object):
+ def __init__(self, standalone):
+ standalone.open()
+ self.standalone = standalone
+
+
+(a)pytest.fixture(scope="module")
+def topology(request):
+ # Creating standalone instance ...
+ standalone = DirSrv(verbose=False)
+ args_instance[SER_HOST] = HOST_STANDALONE
+ args_instance[SER_PORT] = PORT_STANDALONE
+ args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_standalone = args_instance.copy()
+ standalone.allocate(args_standalone)
+ instance_standalone = standalone.exists()
+ if instance_standalone:
+ log.info("Instance detected")
+ standalone.delete()
+ standalone.create()
+ standalone.open()
+
+ # Delete each instance in the end
+ def fin():
+ standalone.delete()
+ request.addfinalizer(fin)
+
+ # Clear out the tmp dir
+ standalone.clearTmpDir(__file__)
+
+ return TopologyStandalone(standalone)
+
+
+(a)pytest.fixture(scope="module")
+def accpolicy_local(topology):
+ """Configure account policy plugin based
+ on LDIF file and restart the server.
+ """
+
+ log.info('Enabling account policy plugin and restarting the server')
+ try:
+ topology.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY)
+ topology.standalone.modify_s(ACCPOL_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', CONFIG_DN)])
+ topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes')])
+ topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime')])
+ topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'altstateattrname', 'createTimestamp')])
+ topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry')])
+ topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'limitattrname', 'accountInactivityLimit')])
+ except ldap.LDAPError as e:
+ log.error("Failed to modify account policy plugin attrs attrs")
+ raise
+
+ log.info("Adding Local account policy plugin configuration entries")
+ try:
+ topology.standalone.add_s(Entry((LOCAL_CONFIG, {
+ 'objectclass': ['top', 'ldapsubentry', 'extensibleObject', 'accountpolicy'],
+ 'accountInactivityLimit': '15'})))
+ topology.standalone.add_s(Entry((TEMPLT_COS, {
+ 'objectclass': ['top', 'ldapsubentry', 'extensibleObject', 'cosTemplate'],
+ 'acctPolicySubentry': LOCAL_CONFIG})))
+ topology.standalone.add_s(Entry((DEFN_COS, {
+ 'objectclass': ['top', 'ldapsubentry', 'cosSuperDefinition', 'cosPointerDefinition'],
+ 'cosTemplateDn': TEMPLT_COS,
+ 'cosAttribute': 'acctPolicySubentry default operational-default'})))
+ except ldap.LDAPError as e:
+ log.error('Failed to add entry ({}, {}, {}):'.format(LOCAL_CONFIG, TEMPLT_COS, DEFN_COS))
+ raise
+ topology.standalone.restart(timeout=10)
+
+
+(a)pytest.fixture(scope="module")
+def users(topology, request):
+ """Add users to the given SUFFIX and SUBTREE."""
+
+ log.info('Adding {} {} users to {} SUBTREE {} SUFFIX'.format(NOF_USERS, USR_NAME, SUBTREE, SUFFIX))
+ for NUM in range(1, NOF_USERS):
+ USR_RDN = '{}{}'.format(USR_NAME, NUM)
+ USR_DN = 'uid={},{},{}'.format(USR_RDN, SUBTREE, SUFFIX)
+ try:
+ topology.standalone.add_s(Entry((USR_DN, {
+ 'objectclass': 'top person'.split(),
+ 'objectclass': 'inetorgperson',
+ 'cn': USR_RDN,
+ 'sn': USR_RDN,
+ 'userpassword': 'Secret1234',
+ 'mail': '{}(a)redhat.com'.format(USR_RDN)})))
+ except ldap.LDAPError as e:
+ log.error('Failed to add {} user: error {}'.format(USR_DN, e.message['desc']))
+ raise
+
+ def fin():
+ log.info('Deleting {} {} users from {} {}'.format(NOF_USERS, USR_NAME, SUBTREE, SUFFIX))
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ for NUM in range(1, NOF_USERS):
+ USR_RDN = '{}{}'.format(USR_NAME, NUM)
+ USR_DN = 'uid={},{},{}'.format(USR_RDN, SUBTREE, SUFFIX)
+ try:
+ topology.standalone.delete_s(USR_DN)
+ except ldap.LDAPError as e:
+ log.error('Failed to delete {} :error- {}'.format(USR_DN, e.message['desc']))
+ raise
+ request.addfinalizer(fin)
+
+
+def test_inact_plugin(topology, accpolicy_local, users):
+ """Verify if user account is inactivated when accountInactivityLimit is exceeded. User is created in the default SUFFIX.
+
+ :Feature: Account Policy Plugin
+
+ :Setup: Standalone instance, Local account policy plugin configuration,
+ accountInactivityLimit set to 15, Inactivate account by Account policy plugin
+
+ :Steps: 1. Configure account policy plugin with accpol_local for ou=people SUBTREE
+ 2. Set accountInactivityLimit to 15
+ 3. Add few users to ou=people SUBTREE in the default SUFFIX
+ 4. Wait for 12 secs and run ldapsearch as normal user to check if its not inactivated, expected 0.
+ 5. Wait for 3 secs or till accountInactivityLimit is exceeded
+ 6. Run ldapsearch as normal user and check if its inactivated, expected error 19.
+
+ :Assert: Should return error code 19
+ """
+
+ log.info("AccountInactivityLimit set to 15. Account will be inactivated if not accessed in 15 secs")
+ log.info("Sleeping for 12 secs to check if account is not inactivated, expected value 0")
+ time.sleep(12)
+ for NUM in range(2, NOF_USERS):
+ USR_RDN = '{}{}'.format(USR_NAME, NUM)
+ USR_DN = 'uid={},{},{}'.format(USR_RDN, SUBTREE, SUFFIX)
+ try:
+ topology.standalone.simple_bind_s(USR_DN, USER_PW)
+ except ldap.LDAPError as e:
+ log.error('Checking if {} is inactivated: error {}'.format(USR_DN, e.message['desc']))
+ raise
+
+ USR_DN = 'uid={}1,{},{}'.format(USR_NAME, SUBTREE, SUFFIX)
+ log.info("Sleeping for 4 more secs to check if {} is inactivated, expected error 19".format(USR_DN))
+ time.sleep(4)
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION) as e:
+ topology.standalone.simple_bind_s(USR_DN, USER_PW)
+
+ USR_DN = 'uid={}2,{},{}'.format(USR_NAME, SUBTREE, SUFFIX)
+ log.info("Checking if {} is not inactivated, expected value 0".format(USR_DN))
+ try:
+ topology.standalone.simple_bind_s(USR_DN, USER_PW)
+ except ldap.LDAPError as e:
+ log.error('Checking if {} is inactivated : error {}'.format(USR_DN, e.message['desc']))
+ raise
+ time.sleep(12)
+ for NUM in range(3, NOF_USERS):
+ USR_RDN = '{}{}'.format(USR_NAME, NUM)
+ USR_DN = 'uid={},{},{}'.format(USR_RDN, SUBTREE, SUFFIX)
+ log.info("Checking if {} is inactivated, expected error 19".format(USR_DN))
+ with pytest.raises(ldap.CONSTRAINT_VIOLATION) as e:
+ topology.standalone.simple_bind_s(USR_DN, USER_PW)
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s {}".format(CURRENT_FILE))
6 years, 10 months
rpm/389-ds-base.spec.in
by vashirov
rpm/389-ds-base.spec.in | 1 +
1 file changed, 1 insertion(+)
New commits:
commit 6b41a3423a0cdbbb49b1b168ec724f383fba80b0
Author: Viktor Ashirov <vashirov(a)redhat.com>
Date: Tue Nov 22 08:04:29 2016 +0100
Ticket 49048 - Fix rpm build failure
Bug Description:
md_docs_intro.3.gz is installed but unpackaged.
Fix Description:
Exclude file from being installed.
https://fedorahosted.org/389/ticket/49048
Reviewed by: mreynolds (Thanks!)
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
index f954a68..4ba47b3 100644
--- a/rpm/389-ds-base.spec.in
+++ b/rpm/389-ds-base.spec.in
@@ -474,6 +474,7 @@ fi
%exclude %{_libdir}/%{pkgname}/pkgconfig/nunc-stans.pc
%exclude %{_includedir}/nunc-stans/nunc-stans.h
%exclude %{_mandir}/man3/md_docs_job-safety.3.gz
+%exclude %{_mandir}/man3/md_docs_intro.3.gz
%exclude %{_mandir}/man3/ns_job_t.3.gz
%exclude %{_mandir}/man3/ns_thrpool_config.3.gz
%exclude %{_mandir}/man3/nunc-stans.h.3.gz
6 years, 10 months
dirsrvtests/tests
by William Brown
dirsrvtests/tests/tickets/ticket48906_test.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
New commits:
commit d4b1c659c3dd2e6e5d293546db3040d668c1f64a
Author: William Brown <firstyear(a)redhat.com>
Date: Tue Nov 22 10:11:44 2016 +1000
Ticket 49042 - Test failure that expects old default
Bug Description: Test 48906 expected the old 10MB default: Because we just
changed this to 32MB this broke the test.
Fix Description: Update the default.
https://fedorahosted.org/389/ticket/49042
Author: wibrown
Review by: mreynolds (Thanks!)
diff --git a/dirsrvtests/tests/tickets/ticket48906_test.py b/dirsrvtests/tests/tickets/ticket48906_test.py
index b532500..b462395 100644
--- a/dirsrvtests/tests/tickets/ticket48906_test.py
+++ b/dirsrvtests/tests/tickets/ticket48906_test.py
@@ -41,7 +41,7 @@ DBLOCK_ATTR_CONFIG="nsslapd-db-locks"
DBLOCK_ATTR_MONITOR="nsslapd-db-configured-locks"
DBLOCK_ATTR_GUARDIAN="locks"
-DBCACHE_DEFAULT="10000000"
+DBCACHE_DEFAULT="33554432"
DBCACHE_LDAP_UPDATE="20000000"
DBCACHE_EDIT_UPDATE="40000000"
DBCACHE_ATTR_CONFIG="nsslapd-dbcachesize"
6 years, 10 months
ldap/servers
by William Brown
ldap/servers/slapd/back-ldbm/back-ldbm.h | 9 ++++++---
ldap/servers/slapd/back-ldbm/dblayer.c | 2 +-
ldap/servers/slapd/back-ldbm/ldbm_config.c | 2 +-
ldap/servers/slapd/back-ldbm/ldbm_instance_config.c | 4 ++--
4 files changed, 10 insertions(+), 7 deletions(-)
New commits:
commit ed2875b4c8895c85f4f21068fb6a0c146536ae04
Author: William Brown <firstyear(a)redhat.com>
Date: Fri Nov 18 10:11:50 2016 +1000
Ticket 49042 - Increase cache defaults slightly
Bug Description: We have very small defaults for most sites right now. If we
increase these, we will see a benefit to performance and reduction in cache
eviction. It will help freeipa with it's out of box performance issue before
we move to the full automatic tuning.
Fix Description: Change the defaults:
dbcachesize 10MB -> 32MB
entrycachesize 10MB -> 32MB
dncachesize 10MB -> 16MB
https://fedorahosted.org/389/ticket/49042
Author: wibrown
Review by: ???
diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h
index 8b63e2c..ecef956 100644
--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h
@@ -160,11 +160,14 @@ typedef unsigned short u_int16_t;
#define LDBM_VERSION_31 "Netscape-ldbm/3.1"
#define LDBM_FILENAME_SUFFIX LDBM_SUFFIX
#define DBVERSION_FILENAME "DBVERSION"
-#define DEFAULT_CACHE_SIZE (size_t)10485760
+#define DEFAULT_CACHE_SIZE (size_t)33554432
+#define DEFAULT_CACHE_SIZE_STR "33554432"
#define DEFAULT_CACHE_ENTRIES -1 /* no limit */
-#define DEFAULT_DNCACHE_SIZE (size_t)10485760
+#define DEFAULT_DNCACHE_SIZE (size_t)16777216
+#define DEFAULT_DNCACHE_SIZE_STR "16777216"
#define DEFAULT_DNCACHE_MAXCOUNT -1 /* no limit */
-#define DEFAULT_DBCACHE_SIZE 1000000
+#define DEFAULT_DBCACHE_SIZE 33554432
+#define DEFAULT_DBCACHE_SIZE_STR "33554432"
#define DEFAULT_MODE 0600
#define DEFAULT_ALLIDSTHRESHOLD 4000
#define DEFAULT_IDL_TUNE 1
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
index 56792c3..04d31b1 100644
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
@@ -2348,7 +2348,7 @@ dblayer_get_aux_id2entry_ext(backend *be, DB **ppDB, DB_ENV **ppEnv,
}
envflags = DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE;
- cachesize = 10485760; /* 10M */
+ cachesize = DEFAULT_DBCACHE_SIZE;
if (!*ppEnv) {
mypEnv->dblayer_DB_ENV->set_cachesize(mypEnv->dblayer_DB_ENV,
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c
index 0aac833..4b612db 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c
@@ -1501,7 +1501,7 @@ static config_info ldbm_config[] = {
{CONFIG_MODE, CONFIG_TYPE_INT_OCTAL, "0600", &ldbm_config_mode_get, &ldbm_config_mode_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
{CONFIG_IDLISTSCANLIMIT, CONFIG_TYPE_INT, "4000", &ldbm_config_allidsthreshold_get, &ldbm_config_allidsthreshold_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
{CONFIG_DIRECTORY, CONFIG_TYPE_STRING, "", &ldbm_config_directory_get, &ldbm_config_directory_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE|CONFIG_FLAG_SKIP_DEFAULT_SETTING},
- {CONFIG_DBCACHESIZE, CONFIG_TYPE_SIZE_T, "10000000", &ldbm_config_dbcachesize_get, &ldbm_config_dbcachesize_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+ {CONFIG_DBCACHESIZE, CONFIG_TYPE_SIZE_T, DEFAULT_DBCACHE_SIZE_STR, &ldbm_config_dbcachesize_get, &ldbm_config_dbcachesize_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
{CONFIG_DBNCACHE, CONFIG_TYPE_INT, "0", &ldbm_config_dbncache_get, &ldbm_config_dbncache_set, CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
{CONFIG_MAXPASSBEFOREMERGE, CONFIG_TYPE_INT, "100", &ldbm_config_maxpassbeforemerge_get, &ldbm_config_maxpassbeforemerge_set, 0},
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c
index 2d23ea5..e03954d 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c
@@ -297,11 +297,11 @@ ldbm_instance_config_require_index_set(void *arg, void *value, char *errorbuf, i
*----------------------------------------------------------------------*/
static config_info ldbm_instance_config[] = {
{CONFIG_INSTANCE_CACHESIZE, CONFIG_TYPE_LONG, "-1", &ldbm_instance_config_cachesize_get, &ldbm_instance_config_cachesize_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
- {CONFIG_INSTANCE_CACHEMEMSIZE, CONFIG_TYPE_SIZE_T, "10485760", &ldbm_instance_config_cachememsize_get, &ldbm_instance_config_cachememsize_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+ {CONFIG_INSTANCE_CACHEMEMSIZE, CONFIG_TYPE_SIZE_T, DEFAULT_CACHE_SIZE_STR, &ldbm_instance_config_cachememsize_get, &ldbm_instance_config_cachememsize_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
{CONFIG_INSTANCE_READONLY, CONFIG_TYPE_ONOFF, "off", &ldbm_instance_config_readonly_get, &ldbm_instance_config_readonly_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
{CONFIG_INSTANCE_REQUIRE_INDEX, CONFIG_TYPE_ONOFF, "off", &ldbm_instance_config_require_index_get, &ldbm_instance_config_require_index_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
{CONFIG_INSTANCE_DIR, CONFIG_TYPE_STRING, NULL, &ldbm_instance_config_instance_dir_get, &ldbm_instance_config_instance_dir_set, CONFIG_FLAG_ALWAYS_SHOW},
- {CONFIG_INSTANCE_DNCACHEMEMSIZE, CONFIG_TYPE_SIZE_T, "10485760", &ldbm_instance_config_dncachememsize_get, &ldbm_instance_config_dncachememsize_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+ {CONFIG_INSTANCE_DNCACHEMEMSIZE, CONFIG_TYPE_SIZE_T, DEFAULT_DNCACHE_SIZE_STR, &ldbm_instance_config_dncachememsize_get, &ldbm_instance_config_dncachememsize_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
{NULL, 0, NULL, NULL, NULL, 0}
};
6 years, 10 months
ldap/servers
by William Brown
ldap/servers/slapd/slap.h | 6
ldap/servers/slapd/slapi-private.h | 6
ldap/servers/slapd/valueset.c | 255 +++++++++++++++++++++----------------
3 files changed, 157 insertions(+), 110 deletions(-)
New commits:
commit 88b9b9eb0469beb788ad90c2e289855e54381a5c
Author: William Brown <firstyear(a)redhat.com>
Date: Tue Nov 15 15:49:11 2016 +1000
Ticket 48894 - Issue with high number of entry state objects.
Bug Description: With high numbers of entry state objects, we saw performance
degredation with our sorting algorithm in valueset.
Fix Description: This contribution was originally by Mohammad N. converted the
valueset to quicksort. I have changed this to the Hoare quicksort which uses
slightly less operations than the lomuto sort that was provided.
Additionally, this fixes valueset to size_t, which is correct for arrays, fixes
some of our asserts, cleans up braces in code, fixes a misleading indent.
https://fedorahosted.org/389/ticket/48894
Author: Mohammad N., wibrown
Review by: lkrispen (Thanks!)
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index f98c7b5..1066f3e 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -564,9 +564,9 @@ struct slapi_value
#define VALUE_SORT_THRESHOLD 10
struct slapi_value_set
{
- int num; /* The number of values in the array */
- int max; /* The number of slots in the array */
- int *sorted; /* sorted array of indices, if NULL va is not sorted */
+ size_t num; /* The number of values in the array */
+ size_t max; /* The number of slots in the array */
+ size_t *sorted; /* sorted array of indices, if NULL va is not sorted */
struct slapi_value **va;
};
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
index a152d76..fc05b42 100644
--- a/ldap/servers/slapd/slapi-private.h
+++ b/ldap/servers/slapd/slapi-private.h
@@ -839,9 +839,13 @@ int charray_normdn_add(char ***chararray, char *dn, char *errstr);
void valuearray_add_value(Slapi_Value ***vals, const Slapi_Value *addval);
void valuearray_add_valuearray( Slapi_Value ***vals, Slapi_Value **addvals, PRUint32 flags );
void valuearray_add_valuearray_fast( Slapi_Value ***vals, Slapi_Value **addvals, int nvals, int naddvals, int *maxvals, int exact, int passin );
-Slapi_Value * valueset_find_sorted (const Slapi_Attr *a, const Slapi_ValueSet *vs, const Slapi_Value *v, int *index);
+Slapi_Value * valueset_find_sorted (const Slapi_Attr *a, const Slapi_ValueSet *vs, const Slapi_Value *v, size_t *index);
int valueset_insert_value_to_sorted(const Slapi_Attr *a, Slapi_ValueSet *vs, Slapi_Value *vi, int dupcheck);
void valueset_array_to_sorted (const Slapi_Attr *a, Slapi_ValueSet *vs);
+void valueset_big_array_to_sorted (const Slapi_Attr *a, Slapi_ValueSet *vs);
+void valueset_array_to_sorted_quick (const Slapi_Attr *a, Slapi_ValueSet *vs, size_t s, size_t e);
+void valueset_swap_values(size_t *a, size_t *b);
+
/* NOTE: if the flags include SLAPI_VALUE_FLAG_PASSIN and SLAPI_VALUE_FLAG_DUPCHECK
* THE CALLER MUST PROVIDE THE dup_index PARAMETER in order to know where in addval
* the un-copied values start e.g. to free them for cleanup
diff --git a/ldap/servers/slapd/valueset.c b/ldap/servers/slapd/valueset.c
index 7f630aa..729a924 100644
--- a/ldap/servers/slapd/valueset.c
+++ b/ldap/servers/slapd/valueset.c
@@ -577,7 +577,7 @@ slapi_valueset_done(Slapi_ValueSet *vs)
{
if(vs!=NULL)
{
- PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num)));
+ PR_ASSERT((vs->sorted == NULL) || (vs->num < VALUESET_ARRAY_SORT_THRESHOLD) || ((vs->num >= VALUESET_ARRAY_SORT_THRESHOLD) && (vs->sorted[0] < vs->num)));
if(vs->va!=NULL)
{
valuearray_free(&vs->va);
@@ -609,7 +609,7 @@ slapi_valueset_set_from_smod(Slapi_ValueSet *vs, Slapi_Mod *smod)
Slapi_Value **va= NULL;
valuearray_init_bervalarray(slapi_mod_get_ldapmod_byref(smod)->mod_bvalues, &va);
valueset_set_valuearray_passin(vs, va);
- PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num)));
+ PR_ASSERT((vs->sorted == NULL) || (vs->num < VALUESET_ARRAY_SORT_THRESHOLD) || ((vs->num >= VALUESET_ARRAY_SORT_THRESHOLD) && (vs->sorted[0] < vs->num)));
}
void
@@ -630,7 +630,7 @@ valueset_set_valuearray_byval(Slapi_ValueSet *vs, Slapi_Value **addvals)
}
}
vs->va[j] = NULL;
- PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num)));
+ PR_ASSERT((vs->sorted == NULL) || (vs->num < VALUESET_ARRAY_SORT_THRESHOLD) || ((vs->num >= VALUESET_ARRAY_SORT_THRESHOLD) && (vs->sorted[0] < vs->num)));
}
/* WARNING: you must call this function with a new vs - if it points to existing data, it
@@ -643,7 +643,7 @@ valueset_set_valuearray_passin(Slapi_ValueSet *vs, Slapi_Value **addvals)
vs->va= addvals;
vs->num = valuearray_count(addvals);
vs->max = vs->num + 1;
- PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num)));
+ PR_ASSERT((vs->sorted == NULL) || (vs->num < VALUESET_ARRAY_SORT_THRESHOLD) || ((vs->num >= VALUESET_ARRAY_SORT_THRESHOLD) && (vs->sorted[0] < vs->num)));
}
/* WARNING: you must call this function with a new vs1 - if it points to existing data, it
@@ -747,12 +747,13 @@ Slapi_Value *
valueset_remove_value_sorted(const Slapi_Attr *a, Slapi_ValueSet *vs, const Slapi_Value *v)
{
Slapi_Value *r= NULL;
- int i, position = 0;
+ size_t i = 0;
+ size_t position = 0;
r = valueset_find_sorted(a,vs,v,&position);
if (r) {
/* the value was found, remove from valuearray */
- int index = vs->sorted[position];
- memmove(&vs->sorted[position],&vs->sorted[position+1],(vs->num - position)*sizeof(int));
+ size_t index = vs->sorted[position];
+ memmove(&vs->sorted[position],&vs->sorted[position+1],(vs->num - position)*sizeof(size_t));
memmove(&vs->va[index],&vs->va[index+1],(vs->num - index)*sizeof(Slapi_Value *));
vs->num--;
/* unfortunately the references in the sorted array
@@ -761,7 +762,7 @@ valueset_remove_value_sorted(const Slapi_Attr *a, Slapi_ValueSet *vs, const Slap
for (i=0; i < vs->num; i++) {
if (vs->sorted[i] > index) vs->sorted[i]--;
}
- PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num)));
+ PR_ASSERT((vs->sorted == NULL) || (vs->num < VALUESET_ARRAY_SORT_THRESHOLD) || ((vs->num >= VALUESET_ARRAY_SORT_THRESHOLD) && (vs->sorted[0] < vs->num)));
}
return r;
}
@@ -780,7 +781,7 @@ valueset_remove_value(const Slapi_Attr *a, Slapi_ValueSet *vs, const Slapi_Value
vs->num--;
}
}
- PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num)));
+ PR_ASSERT((vs->sorted == NULL) || (vs->num < VALUESET_ARRAY_SORT_THRESHOLD) || ((vs->num >= VALUESET_ARRAY_SORT_THRESHOLD) && (vs->sorted[0] < vs->num)));
return r;
}
@@ -808,7 +809,7 @@ valueset_purge(Slapi_ValueSet *vs, const CSN *csn)
slapi_ch_free ((void **)&vs->sorted);
vs->sorted = NULL;
}
- PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num)));
+ PR_ASSERT((vs->sorted == NULL) || (vs->num < VALUESET_ARRAY_SORT_THRESHOLD) || ((vs->num >= VALUESET_ARRAY_SORT_THRESHOLD) && (vs->sorted[0] < vs->num)));
}
return 0;
}
@@ -933,7 +934,7 @@ valueset_value_cmp( const Slapi_Attr *a, const Slapi_Value *v1, const Slapi_Valu
* If the value is not found, index will contain the place where the value would be inserted
*/
Slapi_Value *
-valueset_find_sorted (const Slapi_Attr *a, const Slapi_ValueSet *vs, const Slapi_Value *v, int *index)
+valueset_find_sorted (const Slapi_Attr *a, const Slapi_ValueSet *vs, const Slapi_Value *v, size_t *index)
{
int cmp = -1;
int bot = -1;
@@ -948,10 +949,11 @@ valueset_find_sorted (const Slapi_Attr *a, const Slapi_ValueSet *vs, const Slapi
}
while (top - bot > 1) {
int mid = (top + bot)/2;
- if ( (cmp = valueset_value_cmp(a, v, vs->va[vs->sorted[mid]])) > 0)
+ if ( (cmp = valueset_value_cmp(a, v, vs->va[vs->sorted[mid]])) > 0) {
bot = mid;
- else
+ } else {
top = mid;
+ }
}
if (index) *index = top;
/* check if the value is found */
@@ -961,62 +963,104 @@ valueset_find_sorted (const Slapi_Attr *a, const Slapi_ValueSet *vs, const Slapi
return (NULL);
}
+
void
valueset_array_to_sorted (const Slapi_Attr *a, Slapi_ValueSet *vs)
{
- int i, j, swap;
+ size_t i;
- /* initialize sort array */
- for (i = 0; i < vs->num; i++)
- vs->sorted[i] = i;
+ /* initialize sort array with indcies */
+ for (i = 0; i < vs->max; i++) {
+ vs->sorted[i] = i;
+ }
- /* now sort it, use a simple insertion sort as the array will always
- * be very small when initially sorted
- */
- for (i = 1; i < vs->num; i++) {
- swap = vs->sorted[i];
- j = i -1;
+ /* This is the index boundaries of the array.
+ * We only need to sort if we have 2 or more elements.
+ */
+ if (vs->num >= 2) {
+ valueset_array_to_sorted_quick(a, vs, 0, vs->num - 1);
+ }
+
+ PR_ASSERT((vs->sorted == NULL) || (vs->num < VALUESET_ARRAY_SORT_THRESHOLD) || ((vs->num >= VALUESET_ARRAY_SORT_THRESHOLD) && (vs->sorted[0] < vs->num)));
+}
+
+void
+valueset_array_to_sorted_quick (const Slapi_Attr *a, Slapi_ValueSet *vs, size_t low, size_t high)
+{
+ if (low >= high) {
+ return;
+ }
+
+ /* Hoare quicksort */
+
+ size_t pivot = vs->sorted[low];
+ size_t i = low - 1;
+ size_t j = high + 1;
+
+ /* This is the partition step */
+ while (1) {
+ do {
+ i++;
+ } while ( valueset_value_cmp(a, vs->va[vs->sorted[i]], vs->va[pivot]) < 0);
+
+ do {
+ j--;
+ } while ( valueset_value_cmp(a, vs->va[vs->sorted[j]], vs->va[pivot]) > 0);
+
+ if (i >= j) {
+ break;
+ }
+
+ valueset_swap_values(&(vs->sorted[i]), &(vs->sorted[j]));
+
+ }
+
+ valueset_array_to_sorted_quick(a, vs, low, j);
+ valueset_array_to_sorted_quick(a, vs, j + 1, high);
- while ( j >= 0 && valueset_value_cmp (a, vs->va[vs->sorted[j]], vs->va[swap]) > 0 ) {
- vs->sorted[j+1] = vs->sorted[j];
- j--;
- }
- vs->sorted[j+1] = swap;
- }
- PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num)));
}
+
+void
+valueset_swap_values(size_t *a, size_t *b)
+{
+ size_t t = *a;
+ *a = *b;
+ *b = t;
+}
+
/* insert a value into a sorted array, if dupcheck is set no duplicate values will be accepted
* (is there a reason to allow duplicates ? LK
- * if the value is inserted the the function returns the index where it was inserted
+ * (OLD) if the value is inserted the the function returns the index where it was inserted
+ * (NEW) If the value is inserted, we return 0. No one checks the return, so don't bother.
* if the value already exists -index is returned to indicate anerror an the index of the existing value
*/
int
valueset_insert_value_to_sorted(const Slapi_Attr *a, Slapi_ValueSet *vs, Slapi_Value *vi, int dupcheck)
{
- int index = -1;
+ size_t index = 0;
Slapi_Value *v;
/* test for pre sorted array and to avoid boundary condition */
if (vs->num == 0) {
vs->sorted[0] = 0;
vs->num++;
- PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num)));
+ PR_ASSERT((vs->sorted == NULL) || (vs->num < VALUESET_ARRAY_SORT_THRESHOLD) || ((vs->num >= VALUESET_ARRAY_SORT_THRESHOLD) && (vs->sorted[0] < vs->num)));
return(0);
} else if (valueset_value_cmp (a, vi, vs->va[vs->sorted[vs->num-1]]) > 0 ) {
vs->sorted[vs->num] = vs->num;
- vs->num++;
- PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num)));
- return (vs->num);
+ vs->num++;
+ PR_ASSERT((vs->sorted == NULL) || (vs->num < VALUESET_ARRAY_SORT_THRESHOLD) || ((vs->num >= VALUESET_ARRAY_SORT_THRESHOLD) && (vs->sorted[0] < vs->num)));
+ return (0);
}
v = valueset_find_sorted (a, vs, vi, &index);
if (v && dupcheck) {
/* value already exists, do not insert duplicates */
return (-1);
} else {
- memmove(&vs->sorted[index+1],&vs->sorted[index],(vs->num - index)* sizeof(int));
+ memmove(&vs->sorted[index+1],&vs->sorted[index],(vs->num - index)* sizeof(size_t));
vs->sorted[index] = vs->num;
- vs->num++;
- PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num)));
- return(index);
+ vs->num++;
+ PR_ASSERT((vs->sorted == NULL) || (vs->num < VALUESET_ARRAY_SORT_THRESHOLD) || ((vs->num >= VALUESET_ARRAY_SORT_THRESHOLD) && (vs->sorted[0] < vs->num)));
+ return(0);
}
}
@@ -1035,32 +1079,34 @@ slapi_valueset_add_attr_valuearray_ext(const Slapi_Attr *a, Slapi_ValueSet *vs,
Slapi_Value **addvals, int naddvals, unsigned long flags, int *dup_index)
{
int rc = LDAP_SUCCESS;
- int i, dup;
- int allocate = 0;
- int need;
+ int dup;
+ size_t allocate = 0;
+ size_t need = 0;
int passin = flags & SLAPI_VALUE_FLAG_PASSIN;
int dupcheck = flags & SLAPI_VALUE_FLAG_DUPCHECK;
- if (naddvals == 0)
+ if (naddvals == 0) {
return (rc);
+ }
need = vs->num + naddvals + 1;
if (need > vs->max) {
/* Expand the array */
- allocate= vs->max;
- if ( allocate == 0 ) /* initial allocation */
+ allocate = vs->max;
+ if ( allocate == 0 ) { /* initial allocation */
allocate = VALUESET_ARRAY_MINSIZE;
+ }
while ( allocate < need )
{
- if (allocate > VALUESET_ARRAY_MAXINCREMENT )
+ if (allocate > VALUESET_ARRAY_MAXINCREMENT ) {
/* do not grow exponentially */
allocate += VALUESET_ARRAY_MAXINCREMENT;
- else
+ } else {
allocate *= 2;
-
+ }
}
}
- if(allocate>0)
+ if(allocate > 0)
{
if(vs->va==NULL)
{
@@ -1070,20 +1116,19 @@ slapi_valueset_add_attr_valuearray_ext(const Slapi_Attr *a, Slapi_ValueSet *vs,
{
vs->va = (Slapi_Value **) slapi_ch_realloc( (char *) vs->va, allocate * sizeof(Slapi_Value *));
if (vs->sorted) {
- vs->sorted = (int *) slapi_ch_realloc( (char *) vs->sorted, allocate * sizeof(int));
+ vs->sorted = (size_t *) slapi_ch_realloc( (char *) vs->sorted, allocate * sizeof(size_t));
}
}
- vs->max= allocate;
+ vs->max = allocate;
}
- if ( (vs->num + naddvals > VALUESET_ARRAY_SORT_THRESHOLD || dupcheck ) &&
- !vs->sorted ) {
+ if ((vs->num + naddvals > VALUESET_ARRAY_SORT_THRESHOLD || dupcheck ) && !vs->sorted && vs->max > 0) {
/* initialize sort array and do initial sort */
- vs->sorted = (int *) slapi_ch_malloc( vs->max* sizeof(int));
+ vs->sorted = (size_t *) slapi_ch_malloc( vs->max * sizeof(size_t));
valueset_array_to_sorted (a, vs);
}
- for ( i = 0; i < naddvals; i++)
+ for (size_t i = 0; i < naddvals; i++)
{
if ( addvals[i]!=NULL )
{
@@ -1118,7 +1163,7 @@ slapi_valueset_add_attr_valuearray_ext(const Slapi_Attr *a, Slapi_ValueSet *vs,
}
(vs->va)[vs->num] = NULL;
- PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num)));
+ PR_ASSERT((vs->sorted == NULL) || (vs->num < VALUESET_ARRAY_SORT_THRESHOLD) || ((vs->num >= VALUESET_ARRAY_SORT_THRESHOLD) && (vs->sorted[0] < vs->num)));
return (rc);
}
@@ -1182,14 +1227,14 @@ valueset_set_valueset(Slapi_ValueSet *vs1, const Slapi_ValueSet *vs2)
}
if (vs2->sorted) {
if ((NULL == vs1->sorted) || (oldmax < vs1->max)) {
- vs1->sorted = (int *)slapi_ch_realloc((char *)vs1->sorted, vs1->max * sizeof(int));
+ vs1->sorted = (size_t *)slapi_ch_realloc((char *)vs1->sorted, vs1->max * sizeof(size_t));
}
- memcpy(&vs1->sorted[0], &vs2->sorted[0], vs1->num * sizeof(int));
+ memcpy(&vs1->sorted[0], &vs2->sorted[0], vs1->num * sizeof(size_t));
} else {
slapi_ch_free((void **)&vs1->sorted);
}
/* post-condition */
- PR_ASSERT((vs1->sorted == NULL) || (vs1->num == 0) || ((vs1->sorted[0] >= 0) && (vs1->sorted[0] < vs1->num)));
+ PR_ASSERT((vs1->sorted == NULL) || (vs1->num < VALUESET_ARRAY_SORT_THRESHOLD) || ((vs1->num >= VALUESET_ARRAY_SORT_THRESHOLD) && (vs1->sorted[0] < vs1->num)));
}
}
@@ -1330,6 +1375,7 @@ valueset_replace_valuearray(Slapi_Attr *a, Slapi_ValueSet *vs, Slapi_Value **val
{
return (valueset_replace_valuearray_ext(a, vs,valstoreplace, 1));
}
+
int
valueset_replace_valuearray_ext(Slapi_Attr *a, Slapi_ValueSet *vs, Slapi_Value **valstoreplace, int dupcheck)
{
@@ -1337,55 +1383,52 @@ valueset_replace_valuearray_ext(Slapi_Attr *a, Slapi_ValueSet *vs, Slapi_Value *
int vals_count = valuearray_count(valstoreplace);
if (vals_count == 0) {
- /* no new values, just clear the valueset */
- slapi_valueset_done(vs);
+ /* no new values, just clear the valueset */
+ slapi_valueset_done(vs);
} else if (vals_count == 1 || !dupcheck) {
- /* just repelace the valuearray and adjus num, max */
- slapi_valueset_done(vs);
- vs->va = valstoreplace;
- vs->num = vals_count;
- vs->max = vals_count + 1;
- PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num)));
+ /* just repelace the valuearray and adjus num, max */
+ slapi_valueset_done(vs);
+ vs->va = valstoreplace;
+ vs->num = vals_count;
+ vs->max = vals_count + 1;
+ PR_ASSERT((vs->sorted == NULL) || (vs->num < VALUESET_ARRAY_SORT_THRESHOLD) || ((vs->num >= VALUESET_ARRAY_SORT_THRESHOLD) && (vs->sorted[0] < vs->num)));
} else {
- /* verify the given values are not duplicated. */
- unsigned long flags = SLAPI_VALUE_FLAG_PASSIN|SLAPI_VALUE_FLAG_DUPCHECK;
- int dupindex = 0;
- Slapi_ValueSet *vs_new = slapi_valueset_new();
- rc = slapi_valueset_add_attr_valuearray_ext (a, vs_new, valstoreplace, vals_count, flags, &dupindex);
+ /* verify the given values are not duplicated. */
+ unsigned long flags = SLAPI_VALUE_FLAG_PASSIN|SLAPI_VALUE_FLAG_DUPCHECK;
+ int dupindex = 0;
+ Slapi_ValueSet *vs_new = slapi_valueset_new();
+ rc = slapi_valueset_add_attr_valuearray_ext (a, vs_new, valstoreplace, vals_count, flags, &dupindex);
- if ( rc == LDAP_SUCCESS )
- {
- /* used passin, so vs_new owns all of the Slapi_Value* in valstoreplace
- * so tell valuearray_free_ext to start at index vals_count, which is
- * NULL, then just free valstoreplace
- */
- valuearray_free_ext(&valstoreplace, vals_count);
- /* values look good - replace the values in the attribute */
- if(!valuearray_isempty(vs->va))
- {
- /* remove old values */
- slapi_valueset_done(vs);
- }
- vs->va = vs_new->va;
- vs_new->va = NULL;
- vs->sorted = vs_new->sorted;
- vs_new->sorted = NULL;
- vs->num = vs_new->num;
- vs->max = vs_new->max;
- slapi_valueset_free (vs_new);
- PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num)));
- }
- else
- {
- /* caller expects us to own valstoreplace - since we cannot
- use them, just delete them */
- /* using PASSIN, some of the Slapi_Value* are in vs_new, and the rest
- * after dupindex are in valstoreplace
- */
- slapi_valueset_free(vs_new);
- valuearray_free_ext(&valstoreplace, dupindex);
- PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num)));
- }
+ if ( rc == LDAP_SUCCESS )
+ {
+ /* used passin, so vs_new owns all of the Slapi_Value* in valstoreplace
+ * so tell valuearray_free_ext to start at index vals_count, which is
+ * NULL, then just free valstoreplace
+ */
+ valuearray_free_ext(&valstoreplace, vals_count);
+ /* values look good - replace the values in the attribute */
+ if(!valuearray_isempty(vs->va)) {
+ /* remove old values */
+ slapi_valueset_done(vs);
+ }
+ vs->va = vs_new->va;
+ vs_new->va = NULL;
+ vs->sorted = vs_new->sorted;
+ vs_new->sorted = NULL;
+ vs->num = vs_new->num;
+ vs->max = vs_new->max;
+ slapi_valueset_free (vs_new);
+ PR_ASSERT((vs->sorted == NULL) || (vs->num < VALUESET_ARRAY_SORT_THRESHOLD) || ((vs->num >= VALUESET_ARRAY_SORT_THRESHOLD) && (vs->sorted[0] < vs->num)));
+ } else {
+ /* caller expects us to own valstoreplace - since we cannot
+ use them, just delete them */
+ /* using PASSIN, some of the Slapi_Value* are in vs_new, and the rest
+ * after dupindex are in valstoreplace
+ */
+ slapi_valueset_free(vs_new);
+ valuearray_free_ext(&valstoreplace, dupindex);
+ PR_ASSERT((vs->sorted == NULL) || (vs->num < VALUESET_ARRAY_SORT_THRESHOLD) || ((vs->num >= VALUESET_ARRAY_SORT_THRESHOLD) && (vs->sorted[0] < vs->num)));
+ }
}
return rc;
}
6 years, 10 months
ldap/servers
by Mark Reynolds
ldap/servers/plugins/pwdstorage/pwd_init.c | 4 -
ldap/servers/slapd/back-ldbm/vlv.c | 4 -
ldap/servers/slapd/pw.c | 18 ++---
ldap/servers/slapd/resourcelimit.c | 96 +++++++++++++----------------
4 files changed, 59 insertions(+), 63 deletions(-)
New commits:
commit 3c537b018d12b60d40e525dab3f584ddc4bdf8c9
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Mon Nov 14 11:46:58 2016 -0500
Ticket 48978 - Fix more log refactoring issues
Description: This patch fixes more log refactoring mistakes/misses.
https://fedorahosted.org/389/ticket/48978
Reviewed by: nhosoi(Thanks!)
diff --git a/ldap/servers/plugins/pwdstorage/pwd_init.c b/ldap/servers/plugins/pwdstorage/pwd_init.c
index d66bb98..779c7fb 100644
--- a/ldap/servers/plugins/pwdstorage/pwd_init.c
+++ b/ldap/servers/plugins/pwdstorage/pwd_init.c
@@ -344,7 +344,7 @@ pbkdf2_sha256_pwd_storage_scheme_init(Slapi_PBlock *pb)
{
int rc;
- slapi_log_error(SLAPI_LOG_PLUGIN, plugin_name, "=> pbkdf2_sha256_pwd_storage_scheme_init\n");
+ slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, "=> pbkdf2_sha256_pwd_storage_scheme_init\n");
rc = slapi_pblock_set(pb, SLAPI_PLUGIN_VERSION, (void *) SLAPI_PLUGIN_VERSION_01);
rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_DESCRIPTION, (void *)&pbkdf2_sha256_pdesc);
@@ -352,7 +352,7 @@ pbkdf2_sha256_pwd_storage_scheme_init(Slapi_PBlock *pb)
rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN, (void *)pbkdf2_sha256_pw_cmp);
rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, PBKDF2_SHA256_SCHEME_NAME);
- slapi_log_error(SLAPI_LOG_PLUGIN, plugin_name, "<= pbkdf2_sha256_pwd_storage_scheme_init %d\n", rc);
+ slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, "<= pbkdf2_sha256_pwd_storage_scheme_init %d\n", rc);
return rc;
}
diff --git a/ldap/servers/slapd/back-ldbm/vlv.c b/ldap/servers/slapd/back-ldbm/vlv.c
index 0007d01..235ed44 100644
--- a/ldap/servers/slapd/back-ldbm/vlv.c
+++ b/ldap/servers/slapd/back-ldbm/vlv.c
@@ -741,8 +741,8 @@ do_vlv_update_index(back_txn *txn, struct ldbminfo *li, Slapi_PBlock *pb, struct
if (rc != 0) {
if(rc != DB_LOCK_DEADLOCK)
slapi_log_err(SLAPI_LOG_ERR, "do_vlv_update_index", "Can't get index file '%s' (err %d)\n",
- pIndex->vlv_attrinfo->ai_type, rc);
- return rc;
+ pIndex->vlv_attrinfo->ai_type, rc);
+ return rc;
}
key = vlv_create_key(pIndex,entry);
diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c
index 5af04d2..5f95cca 100644
--- a/ldap/servers/slapd/pw.c
+++ b/ldap/servers/slapd/pw.c
@@ -112,7 +112,7 @@ slapi_pw_find_sv(
char *valpwd;
int i;
- slapi_log_err(SLAPI_LOG_TRACE, "slapi_pw_find value", "=> \"%s\"\n", slapi_value_get_string(v));
+ slapi_log_err(SLAPI_LOG_TRACE, "slapi_pw_find_sv", "=> \"%s\"\n", slapi_value_get_string(v));
for ( i = 0; vals && vals[i]; i++ )
{
@@ -120,16 +120,16 @@ slapi_pw_find_sv(
if ( pwsp != NULL &&
(*(pwsp->pws_cmp))( (char*)slapi_value_get_string(v), valpwd ) == 0 )
{
- slapi_log_err(SLAPI_LOG_TRACE,
- "<= slapi_pw_find matched \"%s\" using scheme \"%s\"\n",
- valpwd, pwsp->pws_name, 0 );
+ slapi_log_err(SLAPI_LOG_TRACE, "slapi_pw_find_sv",
+ "<= Matched \"%s\" using scheme \"%s\"\n",
+ valpwd, pwsp->pws_name);
free_pw_scheme( pwsp );
return( 0 ); /* found it */
}
free_pw_scheme( pwsp );
}
- slapi_log_err(SLAPI_LOG_TRACE, "slapi_pw_find no matching password", "<=\n");
+ slapi_log_err(SLAPI_LOG_TRACE, "slapi_pw_find_sv", "No matching password <=\n");
return( 1 ); /* no match */
}
@@ -610,7 +610,7 @@ update_pw_info ( Slapi_PBlock *pb , char *old_pw)
slapi_pblock_get( pb, SLAPI_ENTRY_PRE_OP, &e);
if ((NULL == operation) || (NULL == sdn) || (NULL == e)){
slapi_log_err(SLAPI_LOG_ERR, "update_pw_info",
- "Param error - no password entry/target dn/operation\n");
+ "Param error - no password entry/target dn/operation\n");
return -1;
}
internal_op = slapi_operation_is_flag_set(operation, SLAPI_OP_FLAG_INTERNAL);
@@ -818,7 +818,7 @@ check_pw_syntax_ext ( Slapi_PBlock *pb, const Slapi_DN *sdn, Slapi_Value **vals,
}
if (NULL == vals) {
slapi_log_err(SLAPI_LOG_ERR, "check_pw_syntax_ext",
- "No passwords to check\n" );
+ "No passwords to check\n" );
return -1;
}
@@ -1236,8 +1236,8 @@ update_pw_history( Slapi_PBlock *pb, const Slapi_DN *sdn, char *old_pw )
slapi_modify_internal_pb(&mod_pb);
slapi_pblock_get(&mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &res);
if (res != LDAP_SUCCESS){
- slapi_log_err(SLAPI_LOG_ERR,
- "update_pw_history", "Modify error %d on entry '%s'\n", res, dn);
+ slapi_log_err(SLAPI_LOG_ERR, "update_pw_history",
+ "Modify error %d on entry '%s'\n", res, dn);
}
pblock_done(&mod_pb);
slapi_ch_free_string(&str);
diff --git a/ldap/servers/slapd/resourcelimit.c b/ldap/servers/slapd/resourcelimit.c
index 9e3382c..ea22d34 100644
--- a/ldap/servers/slapd/resourcelimit.c
+++ b/ldap/servers/slapd/resourcelimit.c
@@ -171,15 +171,14 @@ reslimit_init( void )
reslimit_connext_destructor,
&reslimit_connext_objtype, &reslimit_connext_handle )
!= 0 ) {
- slapi_log_err(SLAPI_LOG_ERR, SLAPI_RESLIMIT_MODULE,
- "reslimit_init - slapi_register_object_extension()"
- " failed\n" );
+ slapi_log_err(SLAPI_LOG_ERR, "reslimit_init",
+ "slapi_register_object_extension() failed\n" );
return( -1 );
}
if (( reslimit_map_rwlock = slapi_new_rwlock()) == NULL ) {
- slapi_log_err(SLAPI_LOG_ERR, SLAPI_RESLIMIT_MODULE,
- "reslimit_init - slapi_new_rwlock() failed\n" );
+ slapi_log_err(SLAPI_LOG_ERR, "reslimit_init",
+ "slapi_new_rwlock() failed\n" );
return( -1 );
}
@@ -232,8 +231,8 @@ reslimit_connext_constructor( void *object, void *parent )
Slapi_RWLock *rwlock;
if (( rwlock = slapi_new_rwlock()) == NULL ) {
- slapi_log_err(SLAPI_LOG_ERR, SLAPI_RESLIMIT_MODULE,
- "reslimit_connext_constructor - slapi_new_rwlock() failed\n" );
+ slapi_log_err(SLAPI_LOG_ERR, "reslimit_connext_constructor",
+ "slapi_new_rwlock() failed\n" );
return( NULL );
}
@@ -275,7 +274,7 @@ reslimit_get_ext( Slapi_Connection *conn, const char *logname,
{
if ( !reslimit_inited && reslimit_init() != 0 ) {
if ( NULL != logname ) {
- slapi_log_err(SLAPI_LOG_ERR, SLAPI_RESLIMIT_MODULE,
+ slapi_log_err(SLAPI_LOG_ERR, "reslimit_get_ext",
"%s: reslimit_init() failed\n", logname );
}
return( SLAPI_RESLIMIT_STATUS_INIT_FAILURE );
@@ -285,7 +284,7 @@ reslimit_get_ext( Slapi_Connection *conn, const char *logname,
reslimit_connext_objtype, conn,
reslimit_connext_handle )) == NULL ) {
if ( NULL != logname ) {
- slapi_log_err(SLAPI_LOG_ERR, SLAPI_RESLIMIT_MODULE,
+ slapi_log_err(SLAPI_LOG_ERR, "reslimit_get_ext",
"%s: slapi_get_object_extension() returned NULL\n", logname );
}
return( SLAPI_RESLIMIT_STATUS_INTERNAL_ERROR );
@@ -344,15 +343,14 @@ reslimit_update_from_entry( Slapi_Connection *conn, Slapi_Entry *e )
{
SLAPIResLimitConnData *rlcdp = NULL;
Slapi_ValueSet *vs = NULL;
- char *fnname = "reslimit_update_from_entry()";
char *actual_type_name = NULL;
char *get_ext_logname = NULL;
int type_name_disposition = 0;
int free_flags = 0;
int rc, i;
- slapi_log_err(SLAPI_RESLIMIT_TRACELEVEL, "=> %s conn=0x%x, entry=0x%x\n",
- fnname, conn, e );
+ slapi_log_err(SLAPI_RESLIMIT_TRACELEVEL, "reslimit_update_from_entry",
+ "=> conn=0x%p, entry=0x%p\n", conn, e );
rc = SLAPI_RESLIMIT_STATUS_SUCCESS; /* optimistic */
@@ -365,7 +363,7 @@ reslimit_update_from_entry( Slapi_Connection *conn, Slapi_Entry *e )
if ( NULL == e ) {
get_ext_logname = NULL; /* do not log errors if resetting limits */
} else {
- get_ext_logname = fnname;
+ get_ext_logname = "reslimit_update_from_entry";
}
if (( rc = reslimit_get_ext( conn, get_ext_logname, &rlcdp )) !=
SLAPI_RESLIMIT_STATUS_SUCCESS ) {
@@ -392,9 +390,9 @@ reslimit_update_from_entry( Slapi_Connection *conn, Slapi_Entry *e )
continue;
}
- slapi_log_err(SLAPI_RESLIMIT_TRACELEVEL,
- "%s: setting limit for handle %d (based on %s)\n",
- fnname, i, reslimit_map[ i ].rlmap_at );
+ slapi_log_err(SLAPI_RESLIMIT_TRACELEVEL, "reslimit_update_from_entry",
+ "Setting limit for handle %d (based on %s)\n",
+ i, reslimit_map[ i ].rlmap_at );
rlcdp->rlcd_integer_available[ i ] = PR_FALSE;
@@ -409,15 +407,15 @@ reslimit_update_from_entry( Slapi_Connection *conn, Slapi_Entry *e )
rlcdp->rlcd_integer_value[ i ] = slapi_value_get_int( v );
rlcdp->rlcd_integer_available[ i ] = PR_TRUE;
- slapi_log_err(SLAPI_RESLIMIT_TRACELEVEL,
- "%s: set limit based on %s to %d\n",
- fnname, reslimit_map[ i ].rlmap_at,
+ slapi_log_err(SLAPI_RESLIMIT_TRACELEVEL, "reslimit_update_from_entry",
+ "Set limit based on %s to %d\n",
+ reslimit_map[ i ].rlmap_at,
rlcdp->rlcd_integer_value[ i ] );
if ( slapi_valueset_next_value( vs, index, &v ) != -1 ) {
- slapi_log_err(SLAPI_LOG_WARNING, SLAPI_RESLIMIT_MODULE,
- "%s: ignoring multiple values for %s in entry %s\n",
- fnname, reslimit_map[ i ].rlmap_at,
+ slapi_log_err(SLAPI_LOG_WARNING, "reslimit_update_from_entry",
+ "Ignoring multiple values for %s in entry %s\n",
+ reslimit_map[ i ].rlmap_at,
slapi_entry_get_dn_const( e ));
}
}
@@ -432,8 +430,8 @@ reslimit_update_from_entry( Slapi_Connection *conn, Slapi_Entry *e )
/* UNLOCKED -- map lock */
log_and_return:
- slapi_log_err(SLAPI_RESLIMIT_TRACELEVEL, "<= %s returning status %d\n",
- fnname, rc, 0 );
+ slapi_log_err(SLAPI_RESLIMIT_TRACELEVEL, "reslimit_update_from_entry",
+ "<= returning status %d\n", rc);
return( rc );
}
@@ -479,18 +477,17 @@ static char ** reslimit_get_registered_attributes(void)
int
slapi_reslimit_register( int type, const char *attrname, int *handlep )
{
- char *fnname = "slapi_reslimit_register()";
int i, rc;
- slapi_log_err(SLAPI_RESLIMIT_TRACELEVEL, "=> %s attrname=%s\n",
- fnname, attrname, 0 );
+ slapi_log_err(SLAPI_RESLIMIT_TRACELEVEL, "slapi_reslimit_register",
+ "=> attrname=%s\n", attrname);
rc = SLAPI_RESLIMIT_STATUS_SUCCESS; /* optimistic */
/* initialize if necessary */
if ( !reslimit_inited && reslimit_init() != 0 ) {
- slapi_log_err(SLAPI_LOG_ERR, SLAPI_RESLIMIT_MODULE,
- "%s: reslimit_init() failed\n", fnname );
+ slapi_log_err(SLAPI_LOG_ERR, "slapi_reslimit_register",
+ "reslimit_init() failed\n");
rc = SLAPI_RESLIMIT_STATUS_INIT_FAILURE;
goto log_and_return;
}
@@ -498,8 +495,8 @@ slapi_reslimit_register( int type, const char *attrname, int *handlep )
/* sanity check parameters */
if ( type != SLAPI_RESLIMIT_TYPE_INT || attrname == NULL
|| handlep == NULL ) {
- slapi_log_err(SLAPI_LOG_ERR, SLAPI_RESLIMIT_MODULE,
- "%s: parameter error\n", fnname );
+ slapi_log_err(SLAPI_LOG_ERR, "slapi_reslimit_register",
+ "Parameter error\n");
rc = SLAPI_RESLIMIT_STATUS_PARAM_ERROR;
goto log_and_return;
}
@@ -513,9 +510,9 @@ slapi_reslimit_register( int type, const char *attrname, int *handlep )
for ( i = 0; i < reslimit_map_count; ++i ) {
if ( 0 == slapi_attr_type_cmp( reslimit_map[ i ].rlmap_at,
attrname, SLAPI_TYPE_CMP_EXACT )) {
- slapi_log_err(SLAPI_LOG_ERR, SLAPI_RESLIMIT_MODULE,
- "%s: parameter error (%s already registered)\n",
- attrname, fnname );
+ slapi_log_err(SLAPI_LOG_ERR, "slapi_reslimit_register",
+ "Parameter error (%s already registered)\n",
+ attrname);
rc = SLAPI_RESLIMIT_STATUS_PARAM_ERROR;
goto unlock_and_return;
}
@@ -538,8 +535,8 @@ unlock_and_return:
/* UNLOCKED -- map lock */
log_and_return:
- slapi_log_err(SLAPI_RESLIMIT_TRACELEVEL,
- "<= %s returning status=%d, handle=%d\n", fnname, rc,
+ slapi_log_err(SLAPI_RESLIMIT_TRACELEVEL, "slapi_reslimit_register",
+ "<= returning status=%d, handle=%d\n", rc,
(handlep == NULL) ? -1 : *handlep );
return( rc );
@@ -562,19 +559,18 @@ int
slapi_reslimit_get_integer_limit( Slapi_Connection *conn, int handle,
int *limitp )
{
- char *fnname = "slapi_reslimit_get_integer_limit()";
int rc;
SLAPIResLimitConnData *rlcdp;
- slapi_log_err(SLAPI_RESLIMIT_TRACELEVEL, "=> %s conn=0x%x, handle=%d\n",
- fnname, conn, handle );
+ slapi_log_err(SLAPI_RESLIMIT_TRACELEVEL, "slapi_reslimit_get_integer_limit",
+ "=> conn=0x%p, handle=%d\n", conn, handle );
rc = SLAPI_RESLIMIT_STATUS_SUCCESS; /* optimistic */
/* sanity check parameters */
if ( limitp == NULL ) {
- slapi_log_err(SLAPI_LOG_ERR, SLAPI_RESLIMIT_MODULE,
- "slapi_reslimit_get_integer_limit - %s: parameter error\n", fnname );
+ slapi_log_err(SLAPI_LOG_ERR, "slapi_reslimit_get_integer_limit",
+ "Parameter error\n");
rc = SLAPI_RESLIMIT_STATUS_PARAM_ERROR;
goto log_and_return;
}
@@ -584,7 +580,7 @@ slapi_reslimit_get_integer_limit( Slapi_Connection *conn, int handle,
goto log_and_return;
}
- if (( rc = reslimit_get_ext( conn, fnname, &rlcdp )) !=
+ if (( rc = reslimit_get_ext( conn, "slapi_reslimit_get_integer_limit", &rlcdp )) !=
SLAPI_RESLIMIT_STATUS_SUCCESS ) {
goto log_and_return;
}
@@ -595,8 +591,8 @@ slapi_reslimit_get_integer_limit( Slapi_Connection *conn, int handle,
if(rlcdp->rlcd_integer_count==0) {
rc = SLAPI_RESLIMIT_STATUS_NOVALUE;
} else if ( handle < 0 || handle >= rlcdp->rlcd_integer_count ) {
- slapi_log_err(SLAPI_LOG_ERR, SLAPI_RESLIMIT_MODULE,
- "slapi_reslimit_get_integer_limit - %s: unknown handle %d\n", fnname, handle );
+ slapi_log_err(SLAPI_LOG_ERR, "slapi_reslimit_get_integer_limit",
+ "Uunknown handle %d\n", handle );
rc = SLAPI_RESLIMIT_STATUS_UNKNOWN_HANDLE;
} else if ( rlcdp->rlcd_integer_available[ handle ] ) {
*limitp = rlcdp->rlcd_integer_value[ handle ];
@@ -610,14 +606,14 @@ slapi_reslimit_get_integer_limit( Slapi_Connection *conn, int handle,
log_and_return:
if ( loglevel_is_set( LDAP_DEBUG_TRACE )) {
if ( rc == SLAPI_RESLIMIT_STATUS_SUCCESS ) {
- slapi_log_err(SLAPI_RESLIMIT_TRACELEVEL,
- "<= %s returning SUCCESS, value=%d\n", fnname, *limitp, 0 );
+ slapi_log_err(SLAPI_RESLIMIT_TRACELEVEL, "slapi_reslimit_get_integer_limit",
+ "<= returning SUCCESS, value=%d\n", *limitp);
} else if ( rc == SLAPI_RESLIMIT_STATUS_NOVALUE ) {
- slapi_log_err(SLAPI_RESLIMIT_TRACELEVEL, "<= %s returning NO VALUE\n",
- fnname, 0, 0 );
+ slapi_log_err(SLAPI_RESLIMIT_TRACELEVEL, "slapi_reslimit_get_integer_limit",
+ "<= returning NO VALUE\n");
} else {
- slapi_log_err(SLAPI_RESLIMIT_TRACELEVEL, "<= %s returning ERROR %d\n",
- fnname, rc, 0 );
+ slapi_log_err(SLAPI_RESLIMIT_TRACELEVEL, "slapi_reslimit_get_integer_limit",
+ "<= returning ERROR %d\n", rc);
}
}
6 years, 10 months
rfcs/examples rfcs/Makefile rfcs/src
by William Brown
rfcs/Makefile | 13
rfcs/examples/template-bare-06.txt | 426 ++++++++++++++++++++++++++++
rfcs/src/draft-wibrown-ldapssotoken-00.xml | 441 +++++++++++++++++++++++++++++
3 files changed, 880 insertions(+)
New commits:
commit 6ea27bfcd0f3c305b4feabc4f5d90df5c355a893
Author: William Brown <firstyear(a)redhat.com>
Date: Tue Feb 16 15:28:17 2016 +1000
Ticket 48707 - Draft Ldap SSO Token proposal
Description: This is the first revision of the Draft of LDAP SSO Sasl mech
we would like to design and add to DS.
Additionally, this provides the structure and make file to allow easier drafting
and implementation of further rfc topics.
https://fedorahosted.org/389/ticket/48707
Author: wibrown
Review by: mreynolds (Thanks!)
diff --git a/rfcs/Makefile b/rfcs/Makefile
new file mode 100644
index 0000000..e868d38
--- /dev/null
+++ b/rfcs/Makefile
@@ -0,0 +1,13 @@
+
+allrfcs: folders examplerfcs draft-wibrown-ldapssotoken-00
+
+folders:
+ mkdir -p txt
+
+examplerfcs:
+ xml2rfc examples/template-bare-06.txt -o txt/template-bare-06.txt --text
+
+draft-wibrown-ldapssotoken-00:
+ xml2rfc src/draft-wibrown-ldapssotoken-00.xml -o txt/draft-wibrown-ldapssotoken-00.txt --text
+ xml2rfc src/draft-wibrown-ldapssotoken-00.xml -o txt/draft-wibrown-ldapssotoken-00.raw --raw
+
diff --git a/rfcs/examples/template-bare-06.txt b/rfcs/examples/template-bare-06.txt
new file mode 100644
index 0000000..49aa691
--- /dev/null
+++ b/rfcs/examples/template-bare-06.txt
@@ -0,0 +1,426 @@
+<?xml version="1.0" encoding="US-ASCII"?>
+<!-- This template is for creating an Internet Draft using xml2rfc,
+ which is available here: http://xml.resource.org. -->
+<!DOCTYPE rfc SYSTEM "rfc2629.dtd" [
+<!-- One method to get references from the online citation libraries.
+ There has to be one entity for each item to be referenced.
+ An alternate method (rfc include) is described in the references. -->
+
+<!ENTITY RFC2119 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.2119.xml">
+<!ENTITY RFC2629 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.2629.xml">
+<!ENTITY RFC3552 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.3552.xml">
+<!ENTITY RFC5226 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5226.xml">
+]>
+<?xml-stylesheet type='text/xsl' href='rfc2629.xslt' ?>
+<!-- used by XSLT processors -->
+<!-- For a complete list and description of processing instructions (PIs),
+ please see http://xml.resource.org/authoring/README.html. -->
+<!-- Below are generally applicable Processing Instructions (PIs) that most I-Ds might want to use.
+ (Here they are set differently than their defaults in xml2rfc v1.32) -->
+<?rfc strict="yes" ?>
+<!-- give errors regarding ID-nits and DTD validation -->
+<!-- control the table of contents (ToC) -->
+<?rfc toc="yes"?>
+<!-- generate a ToC -->
+<?rfc tocdepth="4"?>
+<!-- the number of levels of subsections in ToC. default: 3 -->
+<!-- control references -->
+<?rfc symrefs="yes"?>
+<!-- use symbolic references tags, i.e, [RFC2119] instead of [1] -->
+<?rfc sortrefs="yes" ?>
+<!-- sort the reference entries alphabetically -->
+<!-- control vertical white space
+ (using these PIs as follows is recommended by the RFC Editor) -->
+<?rfc compact="yes" ?>
+<!-- do not start each main section on a new page -->
+<?rfc subcompact="no" ?>
+<!-- keep one blank line between list items -->
+<!-- end of list of popular I-D processing instructions -->
+<rfc category="info" docName="draft-ietf-xml2rfc-template-06" ipr="trust200902">
+ <!-- category values: std, bcp, info, exp, and historic
+ ipr values: trust200902, noModificationTrust200902, noDerivativesTrust200902,
+ or pre5378Trust200902
+ you can add the attributes updates="NNNN" and obsoletes="NNNN"
+ they will automatically be output with "(if approved)" -->
+
+ <!-- ***** FRONT MATTER ***** -->
+
+ <front>
+ <!-- The abbreviated title is used in the page header - it is only necessary if the
+ full title is longer than 39 characters -->
+
+ <title abbrev="Abbreviated Title">Put Your Internet Draft Title
+ Here</title>
+
+ <!-- add 'role="editor"' below for the editors if appropriate -->
+
+ <!-- Another author who claims to be an editor -->
+
+ <author fullname="Elwyn Davies" initials="E.B." role="editor"
+ surname="Davies">
+ <organization>Folly Consulting</organization>
+
+ <address>
+ <postal>
+ <street></street>
+
+ <!-- Reorder these if your country does things differently -->
+
+ <city>Soham</city>
+
+ <region></region>
+
+ <code></code>
+
+ <country>UK</country>
+ </postal>
+
+ <phone>+44 7889 488 335</phone>
+
+ <email>elwynd(a)dial.pipex.com</email>
+
+ <!-- uri and facsimile elements may also be added -->
+ </address>
+ </author>
+
+ <date year="2010" />
+
+ <!-- If the month and year are both specified and are the current ones, xml2rfc will fill
+ in the current day for you. If only the current year is specified, xml2rfc will fill
+ in the current day and month for you. If the year is not the current one, it is
+ necessary to specify at least a month (xml2rfc assumes day="1" if not specified for the
+ purpose of calculating the expiry date). With drafts it is normally sufficient to
+ specify just the year. -->
+
+ <!-- Meta-data Declarations -->
+
+ <area>General</area>
+
+ <workgroup>Internet Engineering Task Force</workgroup>
+
+ <!-- WG name at the upperleft corner of the doc,
+ IETF is fine for individual submissions.
+ If this element is not present, the default is "Network Working Group",
+ which is used by the RFC Editor as a nod to the history of the IETF. -->
+
+ <keyword>template</keyword>
+
+ <!-- Keywords will be incorporated into HTML output
+ files in a meta tag but they have no effect on text or nroff
+ output. If you submit your draft to the RFC Editor, the
+ keywords will be used for the search engine. -->
+
+ <abstract>
+ <t>Insert an abstract: MANDATORY. This template is for creating an
+ Internet Draft.</t>
+ </abstract>
+ </front>
+
+ <middle>
+ <section title="Introduction">
+ <t>The original specification of xml2rfc format is in <xref
+ target="RFC2629">RFC 2629</xref>.</t>
+
+ <section title="Requirements Language">
+ <t>The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
+ "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this
+ document are to be interpreted as described in <xref
+ target="RFC2119">RFC 2119</xref>.</t>
+ </section>
+ </section>
+
+ <section anchor="simple_list" title="Simple List">
+ <t>List styles: 'empty', 'symbols', 'letters', 'numbers', 'hanging',
+ 'format'.</t>
+
+ <t><list style="symbols">
+ <t>First bullet</t>
+
+ <t>Second bullet</t>
+ </list> You can write text here as well.</t>
+ </section>
+
+ <section title="Figures">
+ <t>Figures should not exceed 69 characters wide to allow for the indent
+ of sections.</t>
+
+ <figure align="center" anchor="xml_happy">
+ <preamble>Preamble text - can be omitted or empty.</preamble>
+
+ <artwork align="left"><![CDATA[
++-----------------------+
+| Use XML, be Happy :-) |
+|_______________________|
+ ]]></artwork>
+
+ <postamble>Cross-references allowed in pre- and postamble. <xref
+ target="min_ref" />.</postamble>
+ </figure>
+
+ <t>The CDATA means you don't need to escape meta-characters (especially
+ < (&lt;) and & (&amp;)) but is not essential.
+ Figures may also have a title attribute but it won't be displayed unless
+ there is also an anchor. White space, both horizontal and vertical, is
+ significant in figures even if you don't use CDATA.</t>
+ </section>
+
+ <!-- This PI places the pagebreak correctly (before the section title) in the text output. -->
+
+ <?rfc needLines="8" ?>
+
+ <section title="Subsections and Tables">
+ <section title="A Subsection">
+ <t>By default 3 levels of nesting show in table of contents but that
+ can be adjusted with the value of the "tocdepth" processing
+ instruction.</t>
+ </section>
+
+ <section title="Tables">
+ <t>.. are very similar to figures:</t>
+
+ <texttable anchor="table_example" title="A Very Simple Table">
+ <preamble>Tables use ttcol to define column headers and widths.
+ Every cell then has a "c" element for its content.</preamble>
+
+ <ttcol align="center">ttcol #1</ttcol>
+
+ <ttcol align="center">ttcol #2</ttcol>
+
+ <c>c #1</c>
+
+ <c>c #2</c>
+
+ <c>c #3</c>
+
+ <c>c #4</c>
+
+ <c>c #5</c>
+
+ <c>c #6</c>
+
+ <postamble>which is a very simple example.</postamble>
+ </texttable>
+ </section>
+ </section>
+
+ <section anchor="nested_lists" title="More about Lists">
+ <t>Lists with 'hanging labels': the list item is indented the amount of
+ the hangIndent: <list hangIndent="8" style="hanging">
+ <t hangText="short">With a label shorter than the hangIndent.</t>
+
+ <t hangText="fantastically long label">With a label longer than the
+ hangIndent.</t>
+
+ <t hangText="vspace_trick"><vspace blankLines="0" />Forces the new
+ item to start on a new line.</t>
+ </list></t>
+
+ <!-- It would be nice to see the next piece (12 lines) all on one page. -->
+
+ <?rfc needLines="12" ?>
+
+ <t>Simulating more than one paragraph in a list item using
+ <vspace>: <list style="letters">
+ <t>First, a short item.</t>
+
+ <t>Second, a longer list item.<vspace blankLines="1" /> And
+ something that looks like a separate pararaph..</t>
+ </list></t>
+
+ <t>Simple indented paragraph using the "empty" style: <list
+ hangIndent="10" style="empty">
+ <t>The quick, brown fox jumped over the lazy dog and lived to fool
+ many another hunter in the great wood in the west.</t>
+ </list></t>
+
+ <section title="Numbering Lists across Lists and Sections">
+ <t>Numbering items continuously although they are in separate
+ <list> elements, maybe in separate sections using the "format"
+ style and a "counter" variable.</t>
+
+ <t>First list: <list counter="reqs" hangIndent="4" style="format R%d">
+ <t>#1</t>
+
+ <t>#2</t>
+
+ <t>#3</t>
+ </list> Specify the indent explicitly so that all the items line up
+ nicely.</t>
+
+ <t>Second list: <list counter="reqs" hangIndent="4" style="format R%d">
+ <t>#4</t>
+
+ <t>#5</t>
+
+ <t>#6</t>
+ </list></t>
+ </section>
+
+ <section title="Where the List Numbering Continues">
+ <t>List continues here.</t>
+
+ <t>Third list: <list counter="reqs" hangIndent="4" style="format R%d">
+ <t>#7</t>
+
+ <t>#8</t>
+
+ <t>#9</t>
+
+ <t>#10</t>
+ </list> The end of the list.</t>
+ </section>
+ </section>
+
+ <section anchor="codeExample"
+ title="Example of Code or MIB Module To Be Extracted">
+ <figure>
+ <preamble>The <artwork> element has a number of extra attributes
+ that can be used to substitute a more aesthetically pleasing rendition
+ into HTML output while continuing to use the ASCII art version in the
+ text and nroff outputs (see the xml2rfc README for details). It also
+ has a "type" attribute. This is currently ignored except in the case
+ 'type="abnf"'. In this case the "artwork" is expected to contain a
+ piece of valid Augmented Backus-Naur Format (ABNF) grammar. This will
+ be syntax checked by xml2rfc and any errors will cause a fatal error
+ if the "strict" processing instruction is set to "yes". The ABNF will
+ also be colorized in HTML output to highlight the syntactic
+ components. Checking of additional "types" may be provided in future
+ versions of xml2rfc.</preamble>
+
+ <artwork><![CDATA[
+
+/**** an example C program */
+
+#include <stdio.h>
+
+void
+main(int argc, char *argv[])
+{
+ int i;
+
+ printf("program arguments are:\n");
+ for (i = 0; i < argc; i++) {
+ printf("%d: \"%s\"\n", i, argv[i]);
+ }
+
+ exit(0);
+} /* main */
+
+/* end of file */
+
+ ]]></artwork>
+ </figure>
+ </section>
+
+ <section anchor="Acknowledgements" title="Acknowledgements">
+ <t>This template was derived from an initial version written by Pekka
+ Savola and contributed by him to the xml2rfc project.</t>
+
+ <t>This document is part of a plan to make xml2rfc indispensable <xref
+ target="DOMINATION"></xref>.</t>
+ </section>
+
+ <!-- Possibly a 'Contributors' section ... -->
+
+ <section anchor="IANA" title="IANA Considerations">
+ <t>This memo includes no request to IANA.</t>
+
+ <t>All drafts are required to have an IANA considerations section (see
+ <xref target="RFC5226">Guidelines for Writing an IANA Considerations Section in RFCs</xref> for a guide). If the draft does not require IANA to do
+ anything, the section contains an explicit statement that this is the
+ case (as above). If there are no requirements for IANA, the section will
+ be removed during conversion into an RFC by the RFC Editor.</t>
+ </section>
+
+ <section anchor="Security" title="Security Considerations">
+ <t>All drafts are required to have a security considerations section.
+ See <xref target="RFC3552">RFC 3552</xref> for a guide.</t>
+ </section>
+ </middle>
+
+ <!-- *****BACK MATTER ***** -->
+
+ <back>
+ <!-- References split into informative and normative -->
+
+ <!-- There are 2 ways to insert reference entries from the citation libraries:
+ 1. define an ENTITY at the top, and use "ampersand character"RFC2629; here (as shown)
+ 2. simply use a PI "less than character"?rfc include="reference.RFC.2119.xml"?> here
+ (for I-Ds: include="reference.I-D.narten-iana-considerations-rfc2434bis.xml")
+
+ Both are cited textually in the same manner: by using xref elements.
+ If you use the PI option, xml2rfc will, by default, try to find included files in the same
+ directory as the including file. You can also define the XML_LIBRARY environment variable
+ with a value containing a set of directories to search. These can be either in the local
+ filing system or remote ones accessed by http (http://domain/dir/... ).-->
+
+ <references title="Normative References">
+ <!--?rfc include="http://xml.resource.org/public/rfc/bibxml/reference.RFC.2119.xml"?-->
+ &RFC2119;
+
+ <reference anchor="min_ref">
+ <!-- the following is the minimum to make xml2rfc happy -->
+
+ <front>
+ <title>Minimal Reference</title>
+
+ <author initials="authInitials" surname="authSurName">
+ <organization></organization>
+ </author>
+
+ <date year="2006" />
+ </front>
+ </reference>
+ </references>
+
+ <references title="Informative References">
+ <!-- Here we use entities that we defined at the beginning. -->
+
+ &RFC2629;
+
+ &RFC3552;
+
+ &RFC5226;
+
+ <!-- A reference written by by an organization not a person. -->
+
+ <reference anchor="DOMINATION"
+ target="http://www.example.com/dominator.html">
+ <front>
+ <title>Ultimate Plan for Taking Over the World</title>
+
+ <author>
+ <organization>Mad Dominators, Inc.</organization>
+ </author>
+
+ <date year="1984" />
+ </front>
+ </reference>
+ </references>
+
+ <section anchor="app-additional" title="Additional Stuff">
+ <t>This becomes an Appendix.</t>
+ </section>
+
+ <!-- Change Log
+
+v00 2006-03-15 EBD Initial version
+
+v01 2006-04-03 EBD Moved PI location back to position 1 -
+ v3.1 of XMLmind is better with them at this location.
+v02 2007-03-07 AH removed extraneous nested_list attribute,
+ other minor corrections
+v03 2007-03-09 EBD Added comments on null IANA sections and fixed heading capitalization.
+ Modified comments around figure to reflect non-implementation of
+ figure indent control. Put in reference using anchor="DOMINATION".
+ Fixed up the date specification comments to reflect current truth.
+v04 2007-03-09 AH Major changes: shortened discussion of PIs,
+ added discussion of rfc include.
+v05 2007-03-10 EBD Added preamble to C program example to tell about ABNF and alternative
+ images. Removed meta-characters from comments (causes problems).
+
+v06 2010-04-01 TT Changed ipr attribute values to latest ones. Changed date to
+ year only, to be consistent with the comments. Updated the
+ IANA guidelines reference from the I-D to the finished RFC. -->
+ </back>
+</rfc>
diff --git a/rfcs/src/draft-wibrown-ldapssotoken-00.xml b/rfcs/src/draft-wibrown-ldapssotoken-00.xml
new file mode 100644
index 0000000..c503744
--- /dev/null
+++ b/rfcs/src/draft-wibrown-ldapssotoken-00.xml
@@ -0,0 +1,441 @@
+<?xml version="1.0" encoding="US-ASCII"?>
+<!DOCTYPE rfc SYSTEM "rfc2629.dtd" [
+
+<!ENTITY RFC2119 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.2119.xml">
+<!ENTITY RFC2222 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.2222.xml">
+<!ENTITY RFC4511 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.4511.xml">
+]>
+
+<?xml-stylesheet type='text/xsl' href='rfc2629.xslt' ?>
+
+<?rfc strict="yes" ?>
+<?rfc toc="yes"?>
+<?rfc tocdepth="4"?>
+<?rfc symrefs="yes"?>
+<?rfc sortrefs="yes" ?>
+<?rfc compact="yes" ?>
+<?rfc subcompact="no" ?>
+<rfc category="std" docName="draft-wibrown-ldapssotoken-01" ipr="trust200902">
+
+<front>
+
+ <title abbrev="LDAP SSO Token">Draft LDAP Single Sign On Token Processing</title>
+
+
+ <author fullname="William Brown" initials="W.B." surname="Brown">
+ <organization>Red Hat Asia-Pacific Pty Ltd</organization>
+
+ <address>
+ <postal>
+ <street>Level 1, 193 North Quay</street>
+ <city>Brisbane</city>
+ <code>4000</code>
+ <region>Queensland</region>
+ <country>AU</country>
+ </postal>
+
+ <phone></phone>
+
+ <email>wibrown(a)redhat.com</email>
+
+ <!-- uri and facsimile elements may also be added -->
+ </address>
+ </author>
+
+ <author fullname="Simo Sorce" initials="S.S." surname="Sorce" role="editor">
+ <organization>Red Hat, Inc.</organization>
+
+ <address>
+ <postal>
+ <street></street>
+
+ <city></city>
+
+ <region></region>
+
+ <code></code>
+
+ <country></country>
+ </postal>
+
+ <phone></phone>
+
+ <email>simo(a)redhat.com</email>
+
+ <!-- uri and facsimile elements may also be added -->
+ </address>
+ </author>
+
+ <author fullname="Kieran Andrews" initials="K.A." surname="Andrews" role="editor">
+ <organization>The University of Adelaide</organization>
+
+ <address>
+ <postal>
+ <street></street>
+
+ <city>Adelaide</city>
+
+ <region>South Australia</region>
+
+ <code>5005</code>
+
+ <country>AU</country>
+ </postal>
+
+ <phone></phone>
+
+ <email>kieran.andrews(a)adelaide.edu.au</email>
+
+ <!-- uri and facsimile elements may also be added -->
+ </address>
+ </author>
+
+ <date year="2016"></date>
+
+
+ <area>General</area>
+
+ <workgroup>Internet Engineering Task Force</workgroup>
+
+ <!-- I am not sure of the appropriate keywords here -->
+ <keyword>draft-wibrown-ldapssotoken</keyword>
+
+ <abstract>
+ <t>LDAP Single Sign On Token is a SASL (Simple Authentication and Security Layer
+ <xref target="RFC2222">RFC 2222</xref>) mechanism to allow single sign-on to an LDAP
+ Directory Server environment. Tokens generated by the LDAP server can be transmitted through other
+ protocols and channels, allowing a broad range of clients and middleware to take advantage
+ of single sign-on in environments where Kerberos v5 or other Single Sign On mechanisms may not be avaliable.</t>
+ </abstract>
+
+</front>
+
+
+<middle>
+
+ <section title="Introduction">
+ <t>The need for new, simple single sign-on capable systems has arisen
+ with the development of new technologies and systems. For these systems
+ we should be able to provide a simple, localised and complete single
+ sign-on service. This does not aim to replace Kerberos V5. It is designed for when Kerberos
+ is too invasive for installation in an environment.
+ </t>
+
+
+ <t>Tokens generated by this system should be able to be transmitted over
+ different protocols allowing middleware to relay tokens to clients.
+ Clients can then contact the middleware natively and the middleware can
+ negotiate the client authentication with the LDAP server.</t>
+
+ <!-- Use terms to describe instead -->
+ <t>This implementation will provide an LDAP extended operation to create
+ tokens which a client may cache, or relay to a further client. The token
+ can then be sent in a SASL bind request to the LDAP server. The token
+ remains valid over many binds. Finally, Tokens
+ for a client are always able to be revoked at the LDAP Server using an
+ LDAP extended operation, allowing global
+ logout by the user or administrator.</t>
+
+
+ </section>
+
+ <section title="Requirements Language">
+ <t>The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
+ "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this
+ document are to be interpreted as described in <xref
+ target="RFC2119">RFC 2119</xref>.</t>
+ </section>
+
+ <section title="Format">
+ <t>This document has two components. A SASL Mechanism, and LDAP extended operations.</t>
+ <t>There is no strict requirement for the two to coexist: The LDAP Operation
+ is an implementation of the service providing tokens, and the SASL Mechanism to authenticate them.</t>
+ <t>In theory, an alternate protocol and database could generate and authenticate these tokens.</t>
+ </section>
+
+ <section title="SASL Component">
+ <section title="Token formats">
+ <t>Token formats are server implementation specific: As they
+ are the only entity that will decrypt and consume them, they have
+ the option to provide these in any format they wish. </t>
+ <t>This means the client will only see an opaque data structure, and will only
+ need to transmit this opaque structure as part of the authentication request.</t>
+
+ <t>For the token system to operate correctly the server MUST
+ generate tokens that contain at least these three values:</t>
+ <t>
+ <list style="symbols">
+ <t>Date Time Issued</t>
+ <t>Date Time Until</t>
+ <t>User Unique Id</t>
+ </list>
+ </t>
+ <t>As the client does not ever see the contents the User Unique Id can be
+ anything within the database that uniquely identifies the user
+ that is the holder of the token.</t>
+ <t>The User Unique Id MUST be an UTF8 String.</t>
+ <t>The token format MUST be encrypted. The token format can be
+ decrypted with either a asymmetric or symmetric keying system. </t>
+ <t>The token format MUST have a form of data authentication.
+ This can be through authenticated encryption, or validation of a hash.</t>
+ <t>The Date Time Issued MUST be a complete timestamp in UTC, to
+ prevent issues with changing timezones.</t>
+ <t>Without these guarantees, the token system is not secure,
+ and is vulnerable to credential forgery attacks.</t>
+
+ <t>Here is an EXAMPLE ASN.1 format that would be encrypted and
+ sent to the client:</t>
+ <figure align="left" anchor="asn_token_example">
+ <artwork align="left"><![CDATA[
+LDAPSSOToken ::= SEQUENCE {
+ DateTimeIssued GeneralizedTime,
+ DateTimeUntil GeneralizedTime,
+ UserUniqueId UTF8String }
+ ]]></artwork>
+ </figure>
+ <t>This would be encrypted with AES-GCM and transmitted to the
+ client.</t>
+
+ <!-- make this an xref -->
+ <t>Another example would be to use a fernet token
+ <xref target="FERNETSPEC">Fernet Specification</xref>.</t>
+
+ <figure align="left" anchor="fernet_token_example">
+ <artwork align="left"><![CDATA[
+Version || Timestamp || IV || Ciphertext || HMAC
+ ]]></artwork>
+ </figure>
+
+ <t>Timestamp can be considered to be the DateTimeIssued as:</t>
+
+ <t>"This field is a 64-bit unsigned big-endian integer. It records
+ the number of seconds elapsed between January 1, 1970 UTC and the
+ time the token was created."</t>
+
+ <t>We can then create a Cipher text containing:</t>
+
+ <figure align="left" anchor="fernet_tokendata_example">
+ <artwork align="left"><![CDATA[
+Date Time Until || User Unique Id
+ ]]></artwork>
+ </figure>
+
+ <t>The Date Time Until is a 64-bit unsigned big-endian integer. It is,
+ like Date Time Issued, the number of seconds since January 1, 1970
+ UTC, and the token creation time added to the number of seconds of
+ the requested life time. </t>
+
+ <t>This example format satisfies all of our data requirements for the sso token
+ system.</t>
+
+ </section>
+
+ <section title="SASL Client">
+ <t>The client will request a token from the authentication server.
+ The acquisition method for the token is discussed in section XXX.</t>
+ <t>For authentication, the client MUST send the token as it was received.
+ IE changes to formatting are not permitted.</t>
+ <t>The client MAY transform the token if acting in a proxy fashion.
+ However this transformation must be deterministic and able to be
+ reversed to satisfy the previous requirement.</t>
+ <figure align="left" anchor="server_transform_example">
+ <artwork align="left"><![CDATA[
++-------+ +-------------+ +--------+
+| LDAP | | HTTP server | | Client |
+| | | | <- Login -- | |
+| | <-- Bind -- | | | |
+| | - Success -> | | | |
+| | <- Req Token | | | |
+| | -- Token --> | | | |
+| | <- Unbind - | | | |
+| | - Success -> | | | |
+| | | Html Escape | | |
+| | | | -- Safe --> | |
+| | | | Token | |
+| | | | | Store |
+| | | | < Request +- | |
+| | | Reverse esc | Token | |
+| | < Token Bind | | | |
+| | - Success -> | | | |
+| | <- Operation | | | |
+| | <- Unbind - | | | |
+| | - Success -> | | | |
+| | | | - Response > | |
++-------+ +-------------+ +--------+
+ ]]></artwork>
+ </figure>
+ <t>This example shows how a client is issued with a token when
+ communicating with a web server via the HTTP intermediate. The Client
+ does not need to be aware of the SASL/LDAP system in the background,
+ or the token's formatting rules. Provided the HTTP server in
+ proxy, if required to transform the token, is able to undo the
+ transformations, this is a valid scenario. For example, HTML escaping
+ a base64 token.</t>
+ </section>
+
+ <section title="SASL Authentication">
+ <t>The client issues a SASL bind request with the mechanism name
+ LDAPSSOTOKEN.</t>
+ <t>The client provides the encrypted token that was provided in
+ the LDAPSSOTokenResponse Token Field.</t>
+ <t>The token is decrypted and authenticated based on the token
+ format selected by the server. The server MAY attempt multiple
+ token keys and or formats to find the correct issuing format and
+ key.</t>
+ <t>If the token decryption fails, the attempt with this key and
+ format MUST be considered to fail.</t>
+ <t>If the values have been tampered with, IE hash authentication fails, the attempt with the key
+ and format MUST be considered to fail. </t>
+ <t>The token decryption MUST return a valid DateTimeUntil,
+ DateTimeIssued and User Unique Id. If this is not returned, the decryption
+ MUST be considered to fail.</t>
+ <t>If all token formats and keys fail to decrypt, this MUST cause an
+ invalidCredentials error.</t>
+ <t>The DateTimeUntil field is checked against the servers current
+ time. If the current time exceeds or is equal to DateTimeUntil,
+ invalidCredentials MUST be returned.</t>
+ <t>The User Unique Id is validated to exist on the server. If the User Unique Id
+ does not exist, invalidCredentials MUST be returned.</t>
+ <t>The DateTimeIssued field is validated against the User Unique Id object's
+ attribute or related attribute that contains "Valid Not Before". If the value of
+ "Valid Not Before" exceeds or is equal to DateTimeIssued,
+ invalidCredentials MUST be returned.</t>
+ <t>Only if all of these steps have succeeded, then the authentication is considered successful. </t>
+ </section>
+
+ <section title="Valid Not Before Attribute">
+ <t>The management and details of the "Valid Not Before" attribute
+ are left to the implementation to decide how to implement and
+ manage. The implementation should consider how an administrator
+ or responsible party could revoke tokens for users other than their
+ own. The Valid Not Before SHOULD be replicated between LDAP servers
+ to allow correct revocation across many LDAP servers. For example,
+ Valid Not Before MAY be an attribute on the User Unique Id object, or MAY be on another
+ object with a unique relation to the User Unique Id.</t>
+ </section>
+ </section>
+
+ <section title="LDAP Component">
+
+ <section title="Token Generation">
+ <t>An ldap extended operation is issued as per Section 4.12 of
+ <xref target="RFC4511">RFC 4511</xref>.</t>
+ <t>The LDAP OID to be used for the LDAPSSOTokenRequest is 2.16.840.1.113730.3.5.14.</t>
+ <t>The LDAP OID to be used for the LDAPSSOTokenResponse is 2.16.840.1.113730.3.5.15.</t>
+ <t>A User Unique Id is selected. This may be the Bind DN, UUID or other
+ utf8 identifier that uniquely determines an object.</t>
+ <t>The extended operation must fail if the LDAP connection security stregth factors is 0.</t>
+ <t>Tokens must not be generated for Anonymous binds. This means,
+ tokens may only be generated for connections with a valid bind dn set.</t>
+ <t>Token requests MUST contain a requested lifetime in seconds.
+ The server MAY choose to ignore this lifetime and set it's own
+ value.</t>
+ <t>A token request of a negative or zero value SHOULD default to
+ a server definied minimum lifetime.</t>
+ <t>The token is created as per an example token format in 4.1. This value
+ is then encrypted with an encryption algorithm of the servers
+ choosing. The client does not need to be aware of the encryption
+ algorithm.</t>
+ <t>The DateTimeIssued, DateTimeUntil and User Unique Id are collected in
+ the format required by the token format we are choosing to use in
+ the server. The token is then generated by the chosen
+ algorithm.</t>
+ <t>The encrypted token is sent to the client in the
+ LDAPSSOTokenResponse structure, along with the servers chosen valid
+ life time as a guide for the client to approximate the expiry of the
+ token. This valid life time value is in seconds.</t>
+ <t>If the token cannot be generated due to a server error, LDAP_OPERATION_ERROR MUST be returned.</t>
+
+ <section title="Token Generation Extended Operation">
+ <figure align="left" anchor="token_generation_ext_op">
+ <artwork align="left"><![CDATA[
+LDAPSSOTokenRequest ::= SEQUENCE {
+ ValidLifeTime INTEGER }
+
+LDAPSSOTokenResponse ::= SEQUENCE {
+ ValidLifeTime INTEGER,
+ EncryptedToken OCTET STRING
+}
+ ]]></artwork>
+ </figure>
+ </section>
+ </section>
+
+ <section title="Token Revocation">
+ <t>An ldap extended operation is issued as per Section 4.12
+ <xref target="RFC4511">RFC 4511</xref>. </t>
+ <t>The LDAP OID to be used for LDAPSSOTOKENRevokeRequest is 2.16.840.1.113730.3.5.16.</t>
+ <t>The extended operation MUST fail if the connection is
+ anonymous.</t>
+ <t>The extended operation MUST fail if the LDAP connection security strength factors is 0.</t>
+ <t>The extended operation MUST only act upon the "Valid Not Before"
+ attribute related to the bind DN of the connection.</t>
+ <t>Upon recieving the extended operation to revoke tokens, the
+ directory server MUST set the current BindDN's related "Valid Not Before" attribute timestamp to the current datetime. This will
+ have the effect, that all previously issued tokens are invalidated.</t>
+ <t>This revocation option must work regardless of directory server
+ access controls on the attribute containing "Valid Not Before".</t>
+ <section title="Token Revocation Extended Operation">
+ <t>The extended operation requestValue MUST not be set for
+ LDAP SSO Token revocation.</t>
+ <t>The extended operation does not provide a response OID. The result is set in the LDAPResult.</t>
+ </section>
+ </section>
+
+ <section title="Binding">
+ <t>The SASL bind attempt MUST fail if the LDAP connection security strength factors is 0.</t>
+ <t>The SASL Authentication is attempted as per Section 4.3. If this does not succeed, the bind attempt MUST fail.</t>
+ <t>The LDAP Object is retrived from the User Unique Id, and a Bind DN Determined. If no Bind DN can be determined, the bind attempt MUST fail.</t>
+ <t>The current Bind DN MUST be set to the Bind DN of the LDAP object that is determined, and the result ldap success is returned to the LDAP client.</t>
+ </section>
+
+ </section>
+
+ <section title="Security Considerations">
+ <t>Due to the design of this token, it is possible to use it in a replay
+ attack. Notable threats are storage on the client and man in the middle attacks.
+ To minimise the man in the middle attack thread, LDAP security strength factor of greater than 0 is a requirement.
+ Client security is not covered by this document.</t>
+ </section>
+
+ <section title="Requirements">
+ <t>The SASL mechanism, LDAPSSOTOKEN, MUST be registered to IANA as per
+ <xref target="RFC2222">RFC 2222</xref> Section 6.4</t>
+ </section>
+
+
+</middle>
+
+<back>
+
+ <references title="Normative References">
+ &RFC2119;
+
+ </references>
+
+ <references title="Informative References">
+ &RFC2222;
+
+ &RFC4511;
+ <!-- Add the fernet reference -->
+
+ <reference anchor="FERNETSPEC"
+ target="https://github.com/fernet/spec/blob/master/Spec.md">
+ <front>
+ <title>Fernet Specification</title>
+
+ <author fullname="Tom Maher" initials="T.M." surname="Maher">
+ </author>
+
+ <author fullname="Keith Rarick" initials="K.R." surname="Rarick">
+ </author>
+
+ <date year="2013" />
+ </front>
+ </reference>
+
+ </references>
+
+</back>
+
+</rfc>
6 years, 10 months