dirsrvtests/create_test.py
by Simon Pichugin
dirsrvtests/create_test.py | 21 ++++++---------------
1 file changed, 6 insertions(+), 15 deletions(-)
New commits:
commit 8a9955215e5a4a626fc861e33ff67f6a30bbadaf
Author: Simon Pichugin <spichugi(a)redhat.com>
Date: Tue Jan 10 11:47:16 2017 +0100
Ticket 49055 - Fix create_test.py issues
Bug description: It is impossible to create a test case with a default
set of instances. Also, a replications set up creation fails.
Fix description: Fix the default set condition to check for default values.
Make one common docstring for replicationa and standalone deployments.
https://fedorahosted.org/389/ticket/49055
Reviewed by: vashirov (Thanks!)
diff --git a/dirsrvtests/create_test.py b/dirsrvtests/create_test.py
index 887580e..4c2876e 100755
--- a/dirsrvtests/create_test.py
+++ b/dirsrvtests/create_test.py
@@ -185,7 +185,8 @@ if len(sys.argv) > 0:
# Extract usable values
ticket = args.ticket
suite = args.suite
- if not args.inst and not args.masters and not args.hubs and not args.consumers:
+ if args.inst == '0' and args.masters == '0' and args.hubs == '0' \
+ and args.consumers == '0':
instances = 1
my_topology = [True, 'topology_st']
else:
@@ -656,20 +657,10 @@ if len(sys.argv) > 0:
# Write the test function
if ticket:
TEST.write('def test_ticket{}({}):\n'.format(ticket, my_topology[1]))
- if repl_deployment:
- TEST.write(' """Write your replication test here.\n\n')
- TEST.write(' To access each DirSrv instance use: ' +
- 'topology.master1, topology.master2,\n' +
- ' ..., topology.hub1, ..., topology.consumer1' +
- ',...\n\n')
- TEST.write(' Also, if you need any testcase initialization,\n')
- TEST.write(' please, write additional fixture for that' +
- '(including finalizer).\n')
- else:
- TEST.write(' """Write your testcase here...\n\n')
- TEST.write(' Also, if you need any testcase initialization,\n')
- TEST.write(' please, write additional fixture for that' +
- '(include finalizer).\n')
+ TEST.write(' """Write your testcase here...\n\n')
+ TEST.write(' Also, if you need any testcase initialization,\n')
+ TEST.write(' please, write additional fixture for that' +
+ '(include finalizer).\n')
TEST.write(' """\n\n')
else:
TEST.write('def test_something({}):\n'.format(my_topology[1]))
7 years, 3 months
ldap/servers
by William Brown
ldap/servers/plugins/pwdstorage/crypt_pwd.c | 2 -
ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 7 +++++-
ldap/servers/slapd/daemon.c | 30 ++++++++++++++++++++-------
ldap/servers/slapd/getsocketpeer.c | 7 ++++++
ldap/servers/slapd/ldaputil.c | 26 +++++++++++++++++++++++
ldap/servers/slapd/localhost.c | 2 -
ldap/servers/slapd/main.c | 11 ++++++++-
ldap/servers/slapd/tools/pwenc.c | 2 -
ldap/servers/slapd/uniqueidgen.c | 5 ++++
ldap/servers/slapd/uuid.c | 4 +++
10 files changed, 83 insertions(+), 13 deletions(-)
New commits:
commit 4ce95a75b83db054aff205aeae22794bc309a426
Author: William Brown <wibrown(a)redhat.com>
Date: Thu May 12 16:50:46 2016 +1000
Ticket 48797 - Add freebsd support to ns-slapd: main
Fix Description: This patch adds the final component of support to allow
operation on freebsd. This includes the disk monitoring code, heimdal kerberos
changes and correction of an ldbm error code.
https://fedorahosted.org/389/ticket/48797
Author: wibrown
Review by: nhosoi, lslebodn (thanks!)
diff --git a/ldap/servers/plugins/pwdstorage/crypt_pwd.c b/ldap/servers/plugins/pwdstorage/crypt_pwd.c
index 0fd3b85..dfd5af9 100644
--- a/ldap/servers/plugins/pwdstorage/crypt_pwd.c
+++ b/ldap/servers/plugins/pwdstorage/crypt_pwd.c
@@ -20,7 +20,7 @@
#include <string.h>
#include <sys/types.h>
#include <sys/socket.h>
-#if defined( hpux ) || defined (LINUX)
+#if defined( hpux ) || defined (LINUX) || defined (__FreeBSD__)
#ifndef __USE_XOPEN
#define __USE_XOPEN /* linux */
#endif /* __USE_XOPEN */
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
index ad33efa..e6e0af3 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
@@ -1601,7 +1601,12 @@ _entryrdn_open_index(backend *be, struct attrinfo **ai, DB **dbp)
/* Open the entryrdn index */
ainfo_get(be, LDBM_ENTRYRDN_STR, ai);
if (NULL == *ai) {
- rc = ENODATA;
+ /*
+ * ENODATA exists on linux, but not other platforms. Change to -1, as
+ * all callers to this function only ever check != 0.
+ */
+ slapi_log_err(SLAPI_LOG_ERR, "_entryrdn_open_index", "EntryRDN str for attrinfo is null, unable to proceed.\n");
+ rc = -1;
goto bail;
}
inst = (ldbm_instance *)be->be_instance_info;
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 4e1dc5e..a37c8c6 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -39,10 +39,14 @@
#endif /* NEED_FILIO */
/* for some reason, linux tty stuff defines CTIME */
#include <stdio.h>
+#if defined(LINUX) || defined(__FreeBSD__)
#ifdef LINUX
#undef CTIME
#include <sys/statfs.h>
-#else
+#endif /* linux*/
+#include <sys/param.h>
+#include <sys/mount.h>
+#else /* Linux or fbsd */
#include <sys/statvfs.h>
#include <sys/mnttab.h>
#endif
@@ -346,7 +350,7 @@ disk_mon_get_mount_point(char *dir)
return NULL;
}
-#else /* Linux */
+#elif LINUX /* Linux */
char *
disk_mon_get_mount_point(char *dir)
{
@@ -376,6 +380,17 @@ disk_mon_get_mount_point(char *dir)
return NULL;
}
+#elif __FreeBSD__
+char *
+disk_mon_get_mount_point(char *dir)
+{
+ struct statfs sb;
+ if (statfs(dir, &sb) != 0) {
+ return NULL;
+ }
+
+ return slapi_ch_strdup(sb.f_mntonname);
+}
#endif
/*
@@ -387,8 +402,9 @@ disk_mon_add_dir(char ***list, char *directory)
{
char *dir = disk_mon_get_mount_point(directory);
- if(dir == NULL)
+ if(dir == NULL) {
return;
+ }
if(!charray_inlist(*list,dir)){
slapi_ch_array_add(list, dir);
@@ -444,7 +460,7 @@ disk_mon_get_dirs(char ***list, int logs_critical){
char *
disk_mon_check_diskspace(char **dirs, PRUint64 threshold, PRUint64 *disk_space)
{
-#ifdef LINUX
+#if defined(LINUX) || defined(__FreeBSD__)
struct statfs buf;
#else
struct statvfs buf;
@@ -457,10 +473,10 @@ disk_mon_check_diskspace(char **dirs, PRUint64 threshold, PRUint64 *disk_space)
int i = 0;
for(i = 0; dirs && dirs[i]; i++){
-#ifndef LINUX
- if (statvfs(dirs[i], &buf) != -1)
-#else
+#if defined(LINUX) || defined(__FreeBSD__)
if (statfs(dirs[i], &buf) != -1)
+#else
+ if (statvfs(dirs[i], &buf) != -1)
#endif
{
LL_UI2L(freeBytes, buf.f_bavail);
diff --git a/ldap/servers/slapd/getsocketpeer.c b/ldap/servers/slapd/getsocketpeer.c
index 2a738a1..ce43b40 100644
--- a/ldap/servers/slapd/getsocketpeer.c
+++ b/ldap/servers/slapd/getsocketpeer.c
@@ -94,10 +94,13 @@ int slapd_get_socket_peer(PRFileDesc *nspr_fd, uid_t *uid, gid_t *gid)
iov.iov_len = sizeof(dummy);
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
+#ifndef __FreeBSD__
msg.msg_accrights = (caddr_t)&pass_sd;
msg.msg_accrightslen = sizeof(pass_sd); /* Initialize it with 8 bytes.
If recvmsg is successful,
4 is supposed to be returned. */
+
+#endif
/*
Since PR_SockOpt_Nonblocking is set to the socket,
recvmsg returns immediately if no data is waiting to be received.
@@ -107,7 +110,11 @@ int slapd_get_socket_peer(PRFileDesc *nspr_fd, uid_t *uid, gid_t *gid)
while ((rc = recvmsg(fd, &msg, MSG_PEEK)) < 0 && (EAGAIN == (myerrno = errno)) && retrycnt-- >= 0)
;
+#ifdef __FreeBSD__
+ if (rc >= 0)
+#else
if (rc >= 0 && msg.msg_accrightslen == sizeof(int))
+#endif
{
struct stat st;
diff --git a/ldap/servers/slapd/ldaputil.c b/ldap/servers/slapd/ldaputil.c
index e5b7c56..ca16588 100644
--- a/ldap/servers/slapd/ldaputil.c
+++ b/ldap/servers/slapd/ldaputil.c
@@ -66,6 +66,15 @@
#include <ldap_ssl.h>
#include <ldappr.h>
#else
+
+#ifdef HAVE_HEIMDAL_KERBEROS
+#include <com_err.h>
+#endif
+
+#ifndef MAX_KEYTAB_NAME_LEN
+#define MAX_KEYTAB_NAME_LEN 1100
+#endif
+
/* need mutex around ldap_initialize - see https://fedorahosted.org/389/ticket/348 */
static PRCallOnceType ol_init_callOnce = {0,0,0};
static PRLock *ol_init_lock = NULL;
@@ -1667,7 +1676,11 @@ show_one_credential(int authtracelevel,
char *logname = "show_one_credential";
krb5_error_code rc;
char *name = NULL, *sname = NULL;
+#ifdef HAVE_HEIMDAL_KERBEROS
+ krb5_timestamp startts, endts, renewts;
+#else
char startts[BUFSIZ], endts[BUFSIZ], renewts[BUFSIZ];
+#endif
if ((rc = krb5_unparse_name(ctx, cred->client, &name))) {
slapi_log_err(SLAPI_LOG_ERR, logname,
@@ -1684,6 +1697,13 @@ show_one_credential(int authtracelevel,
if (!cred->times.starttime) {
cred->times.starttime = cred->times.authtime;
}
+#ifdef HAVE_HEIMDAL_KERBEROS
+ slapi_log_error(authtracelevel, logname,
+ "\tKerberos credential: client [%s] server [%s] "
+ "start time [%s] end time [%s] renew time [%s] "
+ "flags [0x%x]\n", name, sname, ctime(&startts), ctime(&endts),
+ ctime(&renewts), (uint32_t)cred->flags.i);
+#else
krb5_timestamp_to_sfstring((krb5_timestamp)cred->times.starttime,
startts, sizeof(startts), NULL);
krb5_timestamp_to_sfstring((krb5_timestamp)cred->times.endtime,
@@ -1696,6 +1716,7 @@ show_one_credential(int authtracelevel,
"start time [%s] end time [%s] renew time [%s] "
"flags [0x%x]\n", name, sname, startts, endts,
renewts, (uint32_t)cred->ticket_flags);
+#endif
cleanup:
krb5_free_unparsed_name(ctx, name);
@@ -1796,8 +1817,13 @@ credentials_are_valid(
order to set mcreds.server required in order
to use krb5_cc_retrieve_creds() */
/* get default realm first */
+#ifdef HAVE_HEIMDAL_KERBEROS
+ realm_str = krb5_principal_get_realm(ctx, princ);
+ realm_len = krb5_realm_length(realm_str);
+#else
realm_len = krb5_princ_realm(ctx, princ)->length;
realm_str = krb5_princ_realm(ctx, princ)->data;
+#endif
tgs_princ_name = slapi_ch_smprintf("%s/%*s@%*s", KRB5_TGS_NAME,
realm_len, realm_str,
realm_len, realm_str);
diff --git a/ldap/servers/slapd/localhost.c b/ldap/servers/slapd/localhost.c
index f48fa49..e702c70 100644
--- a/ldap/servers/slapd/localhost.c
+++ b/ldap/servers/slapd/localhost.c
@@ -24,7 +24,7 @@
#include <resolv.h>
#include <errno.h>
#include "slap.h"
-#if defined(USE_SYSCONF) || defined(LINUX)
+#if defined(USE_SYSCONF) || defined(LINUX) || defined( __FreeBSD__ )
#include <unistd.h>
#endif /* USE_SYSCONF */
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index a59b7d5..2f12b2f 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -11,7 +11,9 @@
# include <config.h>
#endif
-#include <malloc.h>
+/* This was malloc.h - but it's moved to stdlib.h on most platforms, and FBSD is strict */
+/* Make it stdlib.h, and revert to malloc.h with ifdefs if we have issues here. WB 2016 */
+#include <stdlib.h>
#include <ldap.h>
#undef OFF
#undef LITTLE_ENDIAN
@@ -31,7 +33,7 @@
#include <arpa/inet.h>
#include <netdb.h>
#include <pwd.h> /* getpwnam */
-#if !defined(LINUX)
+#if !defined(LINUX) && !defined(__FreeBSD__)
union semun {
int val;
struct semid_ds *buf;
@@ -60,6 +62,11 @@ union semun {
#include "smrtheap.h"
#endif
+#ifdef LINUX
+/* For mallopt. Should be removed soon. */
+#include <malloc.h>
+#endif
+
/* Forward Declarations */
static void register_objects(void);
static void process_command_line(int argc, char **argv, char *myname, char **extraname);
diff --git a/ldap/servers/slapd/tools/pwenc.c b/ldap/servers/slapd/tools/pwenc.c
index caea653..58a38cc 100644
--- a/ldap/servers/slapd/tools/pwenc.c
+++ b/ldap/servers/slapd/tools/pwenc.c
@@ -16,7 +16,7 @@
#include <sys/errno.h>
#include <sys/param.h>
#include <sys/types.h>
-#if defined(LINUX) /* I bet other Unix would like
+#if defined(LINUX) || defined(__FreeBSD__) /* I bet other Unix would like
* this flag. But don't want to
* break other builds so far */
#include <unistd.h>
diff --git a/ldap/servers/slapd/uniqueidgen.c b/ldap/servers/slapd/uniqueidgen.c
index 6ac0799..372ab69 100644
--- a/ldap/servers/slapd/uniqueidgen.c
+++ b/ldap/servers/slapd/uniqueidgen.c
@@ -16,7 +16,12 @@
#include <string.h>
#include <sys/types.h>
#include <sys/time.h>
+
+/* What platforms actually need this? */
+#ifdef HAVE_SYS_SYSINFO_H
#include <sys/sysinfo.h>
+#endif
+
#include <sys/utsname.h>
#include "nspr.h"
#include "slap.h"
diff --git a/ldap/servers/slapd/uuid.c b/ldap/servers/slapd/uuid.c
index 9b1b561..96b4c26 100644
--- a/ldap/servers/slapd/uuid.c
+++ b/ldap/servers/slapd/uuid.c
@@ -40,7 +40,11 @@
#include <pk11func.h>
#include <sys/types.h>
#include <sys/time.h>
+
+#ifdef HAVE_SYS_SYSINFO_H
#include <sys/sysinfo.h>
+#endif
+
#include <sys/utsname.h>
#include <unistd.h> /* gethostname() */
#include "slap.h"
7 years, 3 months
dirsrvtests/cmd dirsrvtests/create_test.py
by Simon Pichugin
dirsrvtests/cmd/dsadm/dsadm.py | 543 -----------------------------------------
dirsrvtests/create_test.py | 121 +++++----
2 files changed, 75 insertions(+), 589 deletions(-)
New commits:
commit 98c88d0520ec7ce9324917d388dceda3737ae4d8
Author: Simon Pichugin <spichugi(a)redhat.com>
Date: Thu Jan 5 16:01:54 2017 +0100
Ticket 49055 - Refactor create_test.py
Description: Now create_test.py works considering new
changes in tickets, suites and topology fixtures.
- If you choose a topology that already exists, create_test.py will just
import the suitable fixture.
- And if you choose a non existing topology, create_test.py will make it
for you. So you can move it to lib389/topologies.py later.
Also, remove dirsrvtests/cmd dir, because it conflicts with another
Python module and lib389 already has this functionality.
https://fedorahosted.org/389/ticket/49055
Reviewed by: wibrown (Thanks!)
diff --git a/dirsrvtests/cmd/__init__.py b/dirsrvtests/cmd/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/dirsrvtests/cmd/dsadm/__init__.py b/dirsrvtests/cmd/dsadm/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/dirsrvtests/cmd/dsadm/dsadm.py b/dirsrvtests/cmd/dsadm/dsadm.py
deleted file mode 100755
index 247a295..0000000
--- a/dirsrvtests/cmd/dsadm/dsadm.py
+++ /dev/null
@@ -1,543 +0,0 @@
-#! /usr/bin/python2
-#
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-
-# Authors:
-# Thierry Bordaz <tbordaz(a)redhat.com>
-
-import sys
-import os
-import argparse
-import pdb
-import tempfile
-import time
-import pwd
-import grp
-import platform
-import socket
-import shutil
-from subprocess import Popen, PIPE, STDOUT
-import string
-
-SETUP_DS = "/sbin/setup-ds.pl"
-REMOVE_DS = "/sbin/remove-ds.pl"
-INITCONFIGDIR = ".dirsrv"
-SCRIPT_START = "start-slapd"
-SCRIPT_STOP = "stop-slapd"
-SCRIPT_RESTART = "restart-slapd"
-ENVIRON_SERVERID = '389-SERVER-ID'
-ENVIRON_USER = '389-USER'
-ENVIRON_GROUP = '389-GROUP'
-ENVIRON_DIRECTORY = '389-DIRECTORY'
-ENVIRON_PORT = '389-PORT'
-ENVIRON_SECURE_PORT = '389-SECURE-PORT'
-DEFAULT_PORT_ROOT = str(389)
-DEFAULT_PORT_NON_ROOT = str(1389)
-DEFAULT_SECURE_PORT_ROOT = str(636)
-DEFAULT_SECURE_PORT_NON_ROOT = str(1636)
-DEFAULT_USER = 'nobody'
-DEFAULT_GROUP = 'nobody'
-DEFAULT_ROOT_DN = 'cn=Directory Manager'
-DEFAULT_HOSTNAME = socket.gethostname()
-
-
-
-def validate_user(user):
- '''
- If a user is provided it returns its username
- else it returns the current username.
- It checks that the userId or userName exists
-
- :param: user (optional) can be a userName or userId
- :return: userName of the provided user, if none is provided, it returns current user name
- '''
- assert(user)
- if user.isdigit():
- try:
- username = pwd.getpwuid(int(user)).pw_name
- except KeyError:
- raise KeyError('Unknown userId %d' % user)
- return username
- else:
- try:
- pwd.getpwnam(user).pw_uid
- except KeyError:
- raise KeyError('Unknown userName %s' % user)
- return user
-
-def get_default_user():
- user = os.environ.get(ENVIRON_USER, None)
- if not user:
- user = os.getuid()
- return str(user)
-
-def get_default_group():
- '''
- If a group is provided it returns its groupname
- else it returns the current groupname.
- It checks that the groupId or groupName exists
-
- :param: group (optional) can be a groupName or groupId
- :return: groupName of the provided group, if none is provided, it returns current group name
- '''
- group = os.environ.get(ENVIRON_GROUP, None)
- if not group:
- return pwd.getpwuid(os.getuid()).pw_name
- return group
-
-def validate_group(group):
- assert(group)
- if str(group).isdigit():
- try:
- groupname = grp.getgrgid(group).gr_name
- return groupname
- except:
- raise KeyError('Unknown groupId %d' % group)
- else:
- try:
- groupname = grp.getgrnam(group).gr_name
- return groupname
- except:
- raise KeyError('Unknown groupName %s' % group)
-
-def test_get_group():
- try:
- grpname = get_default_group()
- print('get_group: %s' % grpname)
- except:
- raise
- print("Can not find user group")
- pass
- try:
- grpname = get_default_group(group='tbordaz')
- print('get_group: %s' % grpname)
- except:
- raise
- print("Can not find user group")
- pass
- try:
- grpname = get_default_group(group='coucou')
- print('get_group: %s' % grpname)
- except:
- print("Can not find user group coucou")
- pass
- try:
- grpname = get_default_group('thierry')
- print('get_group: %s' % grpname)
- except:
- raise
- print("Can not find user group thierry")
- pass
- try:
- grpname = get_default_group(1000)
- print('get_group: %s' % grpname)
- except:
- raise
- print("Can not find user group 1000")
- pass
- try:
- grpname = get_default_group(20532)
- print('get_group: %s' % grpname)
- except:
- raise
- print("Can not find user group 20532")
- pass
- try:
- grpname = get_default_group(123)
- print('get_group: %s' % grpname)
- except:
- print("Can not find user group 123")
- pass
-
-def get_default_port():
- port = os.environ.get(ENVIRON_PORT, None)
- if port:
- return port
-
- if os.getuid() == 0:
- return DEFAULT_PORT_ROOT
- else:
- return DEFAULT_PORT_NON_ROOT
-
-def validate_port(port):
- assert port
- if not port.isdigit() or int(port) <= 0 :
- raise Exception("port number is invalid: %s" % port)
-
-def get_default_directory():
- directory = os.environ.get(ENVIRON_DIRECTORY, None)
- if not directory:
- directory = os.getcwd()
- return directory
-
-def validate_directory(directory):
- assert directory
- if not os.path.isdir(directory):
- raise Exception("Supplied directory path is not a directory")
-
- if not os.access(directory, os.W_OK):
- raise Exception("Supplied directory is not writable")
-
-def get_default_serverid():
- serverid = os.environ.get(ENVIRON_SERVERID, None)
- if not serverid:
- serverid = socket.gethostname().split('.')[0]
- return serverid
-
-def validate_serverid(serverid):
- if not serverid:
- raise Exception("Server id is not defined")
- return serverid
-
-
-def get_inst_dir(serverid):
- assert serverid
- home = os.getenv("HOME")
- inst_initconfig_file = "%s/%s/dirsrv-%s" % (home, INITCONFIGDIR, serverid)
- if not os.path.isfile(inst_initconfig_file):
- raise Exception("%s config file not found" % inst_initconfig_file)
- f = open(inst_initconfig_file, "r")
- for line in f:
- if line.startswith("INST_DIR"):
- inst_dir = line.split("=")[1]
- inst_dir = inst_dir.replace("\r", "")
- inst_dir = inst_dir.replace("\n", "")
- return inst_dir
-
-def sanity_check():
- if os.getuid() == 0:
- raise Exception("Not tested for root user.. sorry")
-
- home = os.getenv("HOME")
- inst_initconfig_dir = "%s/%s" % (home, INITCONFIGDIR)
- if not os.path.isdir(inst_initconfig_dir):
- raise Exception("Please create the directory \'%s\' and retry." % inst_initconfig_dir )
-
-class DSadmCmd(object):
- def __init__(self):
- self.version = '0.1'
-
- def _start_subparser(self, subparsers):
- start_parser = subparsers.add_parser(
- 'start',
- help='Start a Directory Server Instance')
- start_parser.add_argument('-I', '--server-id', dest='server_id', type=str, nargs='?',
- metavar='SERVER-ID',
- help='Server Identifier (Default: %s) ' % get_default_serverid())
- start_parser.set_defaults(func=self.start_action)
-
- def _stop_subparser(self, subparsers):
- start_parser = subparsers.add_parser(
- 'stop',
- help='Stop a Directory Server Instance')
- start_parser.add_argument('-I', '--server-id', dest='server_id', type=str, nargs='?',
- metavar='SERVER-ID',
- help='Server Identifier (Default: %s) ' % get_default_serverid())
- start_parser.set_defaults(func=self.stop_action)
-
- def _restart_subparser(self, subparsers):
- start_parser = subparsers.add_parser(
- 'restart',
- help='Retart a Directory Server Instance')
- start_parser.add_argument('-I', '--server-id', dest='server_id', type=str, nargs='?',
- metavar='SERVER-ID',
- help='Server Identifier (Default: %s) ' % get_default_serverid())
- start_parser.set_defaults(func=self.restart_action)
-
- def _delete_subparser(self, subparsers):
- delete_parser = subparsers.add_parser(
- 'delete',
- help='Delete a Directory Server Instance')
- delete_parser.add_argument('-I', '--server-id', dest='server_id', type=str, nargs='?',
- metavar='SERVER-ID',
- help='Server Identifier (Default: %s) ' % get_default_serverid())
- delete_parser.add_argument('-debug', '--debug', dest='debug_level', type=int, nargs='?',
- metavar='DEBUG_LEVEL',
- help='Debug level (Default: 0)')
- delete_parser.set_defaults(func=self.delete_action)
-
- def _create_subparser(self, subparsers):
- create_parser = subparsers.add_parser(
- 'create',
- help='Create a Directory Server Instance')
- create_parser.add_argument('-I', '--server-id', dest='server_id', type=str, nargs='?',
- metavar='SERVER-ID',
- help='Server Identifier (Default: %s) ' % get_default_serverid())
- create_parser.add_argument('-s', '--suffix', dest='suffix', type=str, nargs='?',
- metavar='SUFFIX-DN',
- help='Suffix (Default: create no suffix)')
- create_parser.add_argument('-p', '--port', dest='port', type=int, nargs='?',
- metavar='NON-SECURE-PORT',
- help='Normal Port to listen (Default: %s(root)/%s(non-root)) ' % (DEFAULT_PORT_ROOT, DEFAULT_PORT_NON_ROOT))
-
- create_parser.add_argument('-P', '--secure-port', dest='secure_port', type=int, nargs='?',
- metavar='SECURE-PORT',
- help='Secure Port to listen (Default: %s(root)/%s(non-root))' % (DEFAULT_SECURE_PORT_ROOT, DEFAULT_SECURE_PORT_NON_ROOT))
-
- create_parser.add_argument('-D', '--rootDN', dest='root_dn', type=str, nargs='?',
- metavar='ROOT-DN',
- help='Uses DN as Directory Manager DN (Default: \'%s\')' % (DEFAULT_ROOT_DN))
-
- create_parser.add_argument('-u', '--user-name', dest='user_name', type=str, nargs='?',
- metavar='USER-NAME',
- help='User name of the instance owner (Default: %s)' % DEFAULT_USER)
-
- create_parser.add_argument('-g', '--group-name', dest='group_name', type=str, nargs='?',
- metavar='GROUP-NAME',
- help='Group name of the instance owner (Default: %s)' % DEFAULT_GROUP)
-
- create_parser.add_argument('-d', '--directory-path', dest='directory_path', type=str, nargs='?',
- metavar='DIRECTORY-PATH',
- help='Installation directory path (Default: %s)' % get_default_directory())
- create_parser.add_argument('-debug', '--debug', dest='debug_level', type=int, nargs='?',
- metavar='DEBUG_LEVEL',
- help='Debug level (Default: 0)')
- create_parser.add_argument('-k', '--keep_template', dest='keep_template', type=str, nargs='?',
- help='Keep template file')
-
- create_parser.set_defaults(func=self.create_action)
-
- #
- # common function for start/stop/restart actions
- #
- def script_action(self, args, script, action_str):
- args = vars(args)
- serverid = args.get('server_id', None)
- if not serverid:
- serverid = get_default_serverid()
-
- script_file = "%s/%s" % (get_inst_dir(serverid), script)
- if not os.path.isfile(script_file):
- raise Exception("%s not found" % script_file)
-
- if not os.access(script_file, os.X_OK):
- raise Exception("%s not executable" % script_file)
-
- env = os.environ.copy()
- prog = [ script_file ]
- pipe = Popen(prog, cwd=os.getcwd(), env=env,
- stdin=PIPE, stdout=PIPE, stderr=STDOUT)
- child_stdin = pipe.stdin
- child_stdout = pipe.stdout
- for line in child_stdout:
- sys.stdout.write(line)
- child_stdout.close()
- child_stdin.close()
-
- rc = pipe.wait()
- if rc == 0:
- print("Directory %s %s" % (serverid, action_str))
- else:
- print("Failure: directory %s not %s (%s)" % (serverid, action_str, rc))
- return
-
- def start_action(self, args):
- self.script_action(args, SCRIPT_START, "started")
-
-
- def stop_action(self, args):
- self.script_action(args, SCRIPT_STOP, "stopped")
-
-
- def restart_action(self, args):
-
- self.script_action(args, SCRIPT_RESTART, "restarted")
-
- def delete_action(self, args):
- args = vars(args)
- serverid = args.get('server_id', None)
- if not serverid:
- serverid = get_default_serverid()
-
- #prepare the remove-ds options
- debug_level = args.get('debug_level', None)
- if debug_level:
- debug_str = ['-d']
- for i in range(1, int(debug_level)):
- debug_str.append('d')
- debug_str = ''.join(debug_str)
-
- env = os.environ.copy()
- prog = [REMOVE_DS]
- if debug_level:
- prog.append(debug_str)
- prog.append("-i")
- prog.append("slapd-%s" % serverid)
-
- # run the REMOVE_DS command and print the possible output
- pipe = Popen(prog, cwd=os.getcwd(), env=env,
- stdin=PIPE, stdout=PIPE, stderr=STDOUT)
- child_stdin = pipe.stdin
- child_stdout = pipe.stdout
- for line in child_stdout:
- if debug_level:
- sys.stdout.write(line)
- child_stdout.close()
- child_stdin.close()
-
- rc = pipe.wait()
- if rc == 0:
- print("Directory server \'%s\' successfully deleted" % serverid)
- else:
- print("Fail to delete directory \'%s\': %d" % (serverid, rc))
- return
-
- #
- # used by create subcommand to build the template file
- #
- def _create_setup_ds_file(self, args, user=None, group=None):
- # Get/checks the argument with the following order
- # - parameter
- # - Environment
- # - default
- serverid = args.get('server_id', None)
- if not serverid:
- serverid = get_default_serverid()
- serverid = validate_serverid(serverid)
-
- username = args.get('user_name', None)
- if not username:
- username = get_default_user()
- username = validate_user(username)
-
- groupname = args.get('group_name', None)
- if not groupname:
- groupname = get_default_group()
- groupname = validate_group(groupname)
-
- directoryname = args.get('directory_path', None)
- if not directoryname:
- directoryname = get_default_directory()
- validate_directory(directoryname)
-
- portnumber = args.get('port', None)
- if not portnumber:
- portnumber = get_default_port()
- validate_port(portnumber)
-
- suffix = args.get('suffix', None)
-
- tempf = tempfile.NamedTemporaryFile(delete=False)
-
- tempf.write('[General]\n')
- tempf.write('FullMachineName=%s\n' % DEFAULT_HOSTNAME)
- tempf.write('SuiteSpotUserID=%s\n' % username)
- tempf.write('SuiteSpotGroup=%s\n' % groupname)
- tempf.write('ServerRoot=%s\n' % directoryname)
- tempf.write('\n')
- tempf.write('[slapd]\n')
- tempf.write('ServerPort=1389\n')
- tempf.write('ServerIdentifier=%s\n' % serverid)
- if suffix:
- tempf.write('Suffix=%s\n' % suffix)
- tempf.write('RootDN=cn=Directory Manager\n')
- tempf.write('RootDNPwd=Secret12\n')
- tempf.write('sysconfdir=%s/etc\n' % directoryname)
- tempf.write('localstatedir=%s/var\n' % directoryname)
- tempf.write('inst_dir=%s/lib/dirsrv/slapd-%s\n'% (directoryname, serverid))
- tempf.write('config_dir=%s/etc/dirsrv/slapd-%s\n' % (directoryname, serverid))
- tempf.close()
-
- keep_template = args.get('keep_template', None)
- if keep_template:
- shutil.copy(tempf.name, keep_template)
-
-
- return tempf
-
- #
- # It silently creates an instance.
- # After creation the instance is started
- #
- def create_action(self, args):
- args = vars(args)
-
- # retrieve the serverid here just to log the final status
- serverid = args.get('server_id', None)
- if not serverid:
- serverid = get_default_serverid()
-
- # prepare the template file
- tempf = self._create_setup_ds_file(args)
-
- #prepare the setup-ds options
- debug_level = args.get('debug_level', None)
- if debug_level:
- debug_str = ['-d']
- for i in range(1, int(debug_level)):
- debug_str.append('d')
- debug_str = ''.join(debug_str)
-
- #
- # run the SETUP_DS command and print the possible output
- #
- env = os.environ.copy()
- prog = [SETUP_DS]
- if debug_level:
- prog.append(debug_str)
- prog.append("--silent")
- prog.append("--file=%s" % tempf.name)
- tempf.close()
-
- pipe = Popen(prog, cwd=os.getcwd(), env=env,
- stdin=PIPE, stdout=PIPE, stderr=STDOUT)
- child_stdin = pipe.stdin
- child_stdout = pipe.stdout
- for line in child_stdout:
- if debug_level:
- sys.stdout.write(line)
- child_stdout.close()
- child_stdin.close()
-
- os.unlink(tempf.name)
- rc = pipe.wait()
- if rc == 0:
- print("Directory server \'%s\' successfully created" % serverid)
- else:
- print("Fail to create directory \'%s\': %d" % (serverid, rc))
- return
-
- #
- # parser of the main command. It contains subcommands
- #
- def get_parser(self, argv):
-
-
- parser = argparse.ArgumentParser(
- description='Managing a local directory server instance')
-
- subparsers = parser.add_subparsers(
- metavar='SUBCOMMAND',
- help='The action to perform')
-
- #pdb.set_trace()
- # subcommands
- self._create_subparser(subparsers)
- self._delete_subparser(subparsers)
- self._start_subparser(subparsers)
- self._stop_subparser(subparsers)
- self._restart_subparser(subparsers)
-
- # Sanity check that the debug level is valid
- args = vars(parser.parse_args(argv))
- debug_level = args.get('debug_level', None)
- if debug_level and (int(debug_level) < 1 or int(debug_level > 5)):
- raise Exception("invalid debug level: range 1..5")
-
- return parser
-
- def main(self, argv):
- sanity_check()
- parser = self.get_parser(argv)
- args = parser.parse_args(argv)
- args.func(args)
- return
-
-if __name__ == '__main__':
- DSadmCmd().main(sys.argv[1:])
diff --git a/dirsrvtests/create_test.py b/dirsrvtests/create_test.py
index 423d9d2..887580e 100755
--- a/dirsrvtests/create_test.py
+++ b/dirsrvtests/create_test.py
@@ -8,13 +8,13 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
-import sys
import optparse
+import sys
+from lib389 import topologies
"""This script generates a template test script that handles the
non-interesting parts of a test script:
-- topology fixture (only for tickets),
- for suites we have predefined fixtures in lib389/topologies.py
+- topology fixture that doesn't exist in in lib389/topologies.py
- test function (to be completed by the user),
- run-isolated function
"""
@@ -81,6 +81,38 @@ def writeFinalizer():
TEST.write('\n\n')
+def get_existing_topologies(inst, masters, hubs, consumers):
+ """Check if the requested topology exists"""
+
+ if inst:
+ if inst == 1:
+ i = 'st'
+ else:
+ i = 'i{}'.format(inst)
+ else:
+ i = ''
+ if masters:
+ ms = 'm{}'.format(masters)
+ else:
+ ms = ''
+ if hubs:
+ hs = 'h{}'.format(hubs)
+ else:
+ hs = ''
+ if consumers:
+ cs = 'c{}'.format(consumers)
+ else:
+ cs = ''
+
+ my_topology = 'topology_{}{}{}{}'.format(i, ms, hs, cs)
+
+ # Returns True in the first element of a list, if topology was found
+ if my_topology in dir(topologies):
+ return [True, my_topology]
+ else:
+ return [False, my_topology]
+
+
desc = 'Script to generate an initial lib389 test script. ' + \
'This generates the topology, test, final, and run-isolated functions.'
@@ -90,7 +122,7 @@ if len(sys.argv) > 0:
# Script options
parser.add_option('-t', '--ticket', dest='ticket', default=None)
parser.add_option('-s', '--suite', dest='suite', default=None)
- parser.add_option('-i', '--instances', dest='inst', default=None)
+ parser.add_option('-i', '--instances', dest='inst', default='0')
parser.add_option('-m', '--masters', dest='masters', default='0')
parser.add_option('-h', '--hubs', dest='hubs', default='0')
parser.add_option('-c', '--consumers', dest='consumers', default='0')
@@ -139,27 +171,29 @@ if len(sys.argv) > 0:
if args.inst:
if not args.inst.isdigit() or \
int(args.inst) > 99 or \
- int(args.inst) < 1:
+ int(args.inst) < 0:
print('Invalid value for "--instances", it must be a number ' +
'greater than 0 and not greater than 99')
displayUsage()
if int(args.inst) > 0:
if int(args.masters) > 0 or \
- int(args.hubs) > 0 or \
- int(args.consumers) > 0:
+ int(args.hubs) > 0 or \
+ int(args.consumers) > 0:
print('You can not mix "--instances" with replication.')
displayUsage()
# Extract usable values
- masters = int(args.masters)
- hubs = int(args.hubs)
- consumers = int(args.consumers)
ticket = args.ticket
suite = args.suite
- if not args.inst:
+ if not args.inst and not args.masters and not args.hubs and not args.consumers:
instances = 1
+ my_topology = [True, 'topology_st']
else:
instances = int(args.inst)
+ masters = int(args.masters)
+ hubs = int(args.hubs)
+ consumers = int(args.consumers)
+ my_topology = get_existing_topologies(instances, masters, hubs, consumers)
filename = args.filename
# Create/open the new test script file
@@ -176,23 +210,28 @@ if len(sys.argv) > 0:
exit(1)
# Write the imports
- TEST.write('import os\nimport sys\nimport time\nimport ldap\n' +
+ if my_topology[0]:
+ topology_import = 'from lib389.topologies import {}\n'.format(my_topology[1])
+ else:
+ topology_import = ''
+
+ TEST.write('import time\nimport ldap\n' +
'import logging\nimport pytest\n')
TEST.write('from lib389 import DirSrv, Entry, tools, tasks\nfrom ' +
'lib389.tools import DirSrvTools\nfrom lib389._constants ' +
'import *\nfrom lib389.properties import *\n' +
- 'from lib389.tasks import *\nfrom lib389.utils import *\n\n')
-
- # Add topology function for a ticket only.
- # Suites have presetuped fixtures in lib389/topologies.py
- if ticket:
- TEST.write('DEBUGGING = False\n\n')
- TEST.write('if DEBUGGING:\n')
- TEST.write(' logging.getLogger(__name__).setLevel(logging.DEBUG)\n')
- TEST.write('else:\n')
- TEST.write(' logging.getLogger(__name__).setLevel(logging.INFO)\n')
- TEST.write('log = logging.getLogger(__name__)\n\n\n')
-
+ 'from lib389.tasks import *\nfrom lib389.utils import *\n' +
+ '{}\n'.format(topology_import))
+
+ TEST.write('DEBUGGING = os.getenv("DEBUGGING", default=False)\n')
+ TEST.write('if DEBUGGING:\n')
+ TEST.write(' logging.getLogger(__name__).setLevel(logging.DEBUG)\n')
+ TEST.write('else:\n')
+ TEST.write(' logging.getLogger(__name__).setLevel(logging.INFO)\n')
+ TEST.write('log = logging.getLogger(__name__)\n\n\n')
+
+ # Add topology function for non existing (in lib389/topologies.py) topologies only
+ if not my_topology[0]:
# Write the replication or standalone classes
repl_deployment = False
@@ -247,7 +286,7 @@ if len(sys.argv) > 0:
# Write the 'topology function'
TEST.write('@pytest.fixture(scope="module")\n')
- TEST.write('def topology(request):\n')
+ TEST.write('def {}(request):\n'.format(my_topology[1]))
if repl_deployment:
TEST.write(' """Create Replication Deployment"""\n')
@@ -363,9 +402,9 @@ if len(sys.argv) > 0:
"defaultProperties[REPLICATION_TRANSPORT]}\n")
TEST.write(' m' + str(master_idx) + '_m' + str(idx) +
'_agmt = master' + str(master_idx) +
- '.agreement.create(suffix=SUFFIX, host=master' +
- str(idx) + '.host, port=master' + str(idx) +
- '.port, properties=properties)\n')
+ '.agreement.create(suffix=SUFFIX, host=master' +
+ str(idx) + '.host, port=master' + str(idx) +
+ '.port, properties=properties)\n')
TEST.write(' if not m' + str(master_idx) + '_m' + str(idx) +
'_agmt:\n')
TEST.write(' log.fatal("Fail to create a master -> ' +
@@ -504,7 +543,7 @@ if len(sys.argv) > 0:
for idx in range(hubs):
idx += 1
TEST.write(' master1.agreement.init(SUFFIX, HOST_HUB_' +
- str(idx) + ', PORT_HUB_' + str(idx) + ')\n')
+ str(idx) + ', PORT_HUB_' + str(idx) + ')\n')
TEST.write(' master1.waitForReplInit(m1_h' + str(idx) +
'_agmt)\n')
for idx in range(consumers):
@@ -561,7 +600,7 @@ if len(sys.argv) > 0:
TEST.write(', hub' + str(idx + 1))
for idx in range(consumers):
TEST.write(', consumer' + str(idx + 1))
- TEST.write(')\n\n')
+ TEST.write(')\n\n\n')
# Standalone servers
else:
@@ -612,12 +651,11 @@ if len(sys.argv) > 0:
if idx == 1:
continue
TEST.write(', standalone' + str(idx))
- TEST.write(')\n\n')
- TEST.write('\n')
+ TEST.write(')\n\n\n')
# Write the test function
if ticket:
- TEST.write('def test_ticket' + ticket + '(topology):\n')
+ TEST.write('def test_ticket{}({}):\n'.format(ticket, my_topology[1]))
if repl_deployment:
TEST.write(' """Write your replication test here.\n\n')
TEST.write(' To access each DirSrv instance use: ' +
@@ -626,7 +664,7 @@ if len(sys.argv) > 0:
',...\n\n')
TEST.write(' Also, if you need any testcase initialization,\n')
TEST.write(' please, write additional fixture for that' +
- '(include ' + 'finalizer).\n')
+ '(including finalizer).\n')
else:
TEST.write(' """Write your testcase here...\n\n')
TEST.write(' Also, if you need any testcase initialization,\n')
@@ -634,20 +672,11 @@ if len(sys.argv) > 0:
'(include finalizer).\n')
TEST.write(' """\n\n')
else:
- TEST.write('def test_something(topology_XX):\n')
+ TEST.write('def test_something({}):\n'.format(my_topology[1]))
TEST.write(' """Write a single test here...\n\n')
TEST.write(' Also, if you need any test suite initialization,\n')
- TEST.write(' please, write additional fixture for that(include finalizer).\n' +
- ' Topology for suites are predefined in lib389/topologies.py.\n\n'
- ' Choose one of the options:\n'
- ' 1) topology_st for standalone\n'
- ' topology.standalone\n'
- ' 2) topology_m2 for two masters\n'
- ' topology.ms["master{1,2}"]\n'
- ' each master has agreements\n'
- ' topology.ms["master{1,2}_agmts"][m{1,2}_m{2,1}]\n'
- ' 3) topology_m4 for four masters\n'
- ' the same as topology_m2 but has more masters and agreements\n'
+ TEST.write(' please, write additional fixture for that (including finalizer).\n'
+ ' Topology for suites are predefined in lib389/topologies.py.\n'
' """\n\n')
TEST.write(' if DEBUGGING:\n')
7 years, 3 months
Branch '389-ds-base-1.2.11' - ldap/admin
by Noriko Hosoi
ldap/admin/src/scripts/repl-monitor.pl.in | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
New commits:
commit 436616e7f4dbd81598d98be8c51c0720922d49d0
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Thu May 12 16:10:02 2016 -0400
Ticket 48220 - The "repl-monitor" web page does not display "year" in date.
Bug Description: The year is not displayed in the header when the day
is less than 10. Appears to be an issue with localtime().
Fix Description: Instead of strftime for displaying the date.
https://fedorahosted.org/389/ticket/48220
Reviewed by: nhosoi(Thanks!)
(cherry picked from commit 77e6044ee5e44fa86e44280d46f36d63a30458b0)
diff --git a/ldap/admin/src/scripts/repl-monitor.pl.in b/ldap/admin/src/scripts/repl-monitor.pl.in
index de0efa5..3f0a429 100755
--- a/ldap/admin/src/scripts/repl-monitor.pl.in
+++ b/ldap/admin/src/scripts/repl-monitor.pl.in
@@ -192,6 +192,7 @@ use Mozilla::LDAP::Conn; # LDAP module for Perl
use Mozilla::LDAP::Utils qw(normalizeDN); # LULU, utilities.
use Mozilla::LDAP::API qw(:api :ssl :apiv3 :constant); # Direct access to C API
use Time::Local; # to convert GMT Z strings to localtime
+use POSIX;
#
# Global variables
@@ -228,7 +229,7 @@ my %ld; # ldap connection hash
#
my ($opt_f, $opt_h, $opt_p, $opt_u, $opt_t, $opt_r, $opt_s);
my (@conns, @alias, @color);
-my ($section, $interval, $nowraw, $now, $mm, $dd, $tt, $yy, $wday);
+my ($section, $interval, $now, $mm, $dd, $tt, $yy, $wday);
my ($fn, $rc, $prompt, $last_sidx);
my %passwords = ();
my $passwd = "";
@@ -262,9 +263,7 @@ $prompt = "";
$interval = 300 if ( !$interval || $interval <= 0 );
# Get current date/time
- $nowraw = localtime();
- ($wday, $mm, $dd, $tt, $yy) = split(/ /, $nowraw);
- $now = "$wday $mm $dd $yy $tt";
+ $now = strftime "%a %b %e %Y %H:%M:%S", localtime;
# if no -r (Reenter and skip html header), print html header
if (!$opt_r) {
7 years, 3 months
dirsrvtests/create_test.py
by vashirov
dirsrvtests/create_test.py | 28 ++++++++++++++--------------
1 file changed, 14 insertions(+), 14 deletions(-)
New commits:
commit e3618371566cd58338250a9bf7a7d4d1d36c8bb0
Author: Viktor Ashirov <vashirov(a)redhat.com>
Date: Fri Dec 2 12:25:41 2016 +0100
Ticket 49060 - Increase number of masters, hubs and consumers in topology
Bug Description:
Currently only 10 masters, hubs and consumers can be created for
testing using lib389. We should increase that number to at least
60 (number of supported replicas in RHEL7.3).
Fix Description:
Allow create_test.py to create up to 99 instances (standalone, masters,
hubs and consumers).
https://fedorahosted.org/389/ticket/49060
Reviewed by: spichugi (Thanks!)
diff --git a/dirsrvtests/create_test.py b/dirsrvtests/create_test.py
index 0eb3b01..423d9d2 100755
--- a/dirsrvtests/create_test.py
+++ b/dirsrvtests/create_test.py
@@ -29,14 +29,14 @@ def displayUsage():
'[ -m|--masters <number of masters> -h|--hubs <number of hubs> ' +
'-c|--consumers <number of consumers> ] -o|--outputfile ]\n')
print ('If only "-t" is provided then a single standalone instance is ' +
- 'created. Or you can create a test suite script using ' +
- '"-s|--suite" instead of using "-t|--ticket". The "-i" option ' +
- 'can add mulitple standalone instances(maximum 10). However, you' +
- ' can not mix "-i" with the replication options(-m, -h , -c). ' +
- 'There is a maximum of 10 masters, 10 hubs, and 10 consumers.')
+ 'created. Or you can create a test suite script using ' +
+ '"-s|--suite" instead of using "-t|--ticket". The "-i" option ' +
+ 'can add mulitple standalone instances (maximum 99). However, you' +
+ ' can not mix "-i" with the replication options (-m, -h , -c). ' +
+ 'There is a maximum of 99 masters, 99 hubs, and 99 consumers.')
print('If "-s|--suite" option was chosen, then no topology would be added ' +
'to the test script. You can find predefined fixtures in the lib389/topologies.py ' +
- 'and usem them or write new one if you have special case.')
+ 'and use them or write a new one if you have a special case.')
exit(1)
@@ -118,30 +118,30 @@ if len(sys.argv) > 0:
displayUsage()
if not args.masters.isdigit() or \
- int(args.masters) > 10 or \
+ int(args.masters) > 99 or \
int(args.masters) < 0:
print('Invalid value for "--masters", it must be a number and it can' +
- ' not be greater than 10')
+ ' not be greater than 99')
displayUsage()
- if not args.hubs.isdigit() or int(args.hubs) > 10 or int(args.hubs) < 0:
+ if not args.hubs.isdigit() or int(args.hubs) > 99 or int(args.hubs) < 0:
print('Invalid value for "--hubs", it must be a number and it can ' +
- 'not be greater than 10')
+ 'not be greater than 99')
displayUsage()
if not args.consumers.isdigit() or \
- int(args.consumers) > 10 or \
+ int(args.consumers) > 99 or \
int(args.consumers) < 0:
print('Invalid value for "--consumers", it must be a number and it ' +
- 'can not be greater than 10')
+ 'can not be greater than 99')
displayUsage()
if args.inst:
if not args.inst.isdigit() or \
- int(args.inst) > 10 or \
+ int(args.inst) > 99 or \
int(args.inst) < 1:
print('Invalid value for "--instances", it must be a number ' +
- 'greater than 0 and not greater than 10')
+ 'greater than 0 and not greater than 99')
displayUsage()
if int(args.inst) > 0:
if int(args.masters) > 0 or \
7 years, 3 months
dirsrvtests/tests
by Simon Pichugin
dirsrvtests/tests/suites/acctpolicy_plugin/accpol_test.py | 12
dirsrvtests/tests/suites/acl/acl_test.py | 11
dirsrvtests/tests/suites/attr_uniqueness_plugin/attr_uniqueness_test.py | 6
dirsrvtests/tests/suites/basic/basic_test.py | 13
dirsrvtests/tests/suites/betxns/betxn_test.py | 9
dirsrvtests/tests/suites/clu/clu_test.py | 9
dirsrvtests/tests/suites/config/config_test.py | 9
dirsrvtests/tests/suites/dna_plugin/dna_test.py | 9
dirsrvtests/tests/suites/ds_logs/ds_logs_test.py | 45
dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py | 432 ++---
dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py | 16
dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py | 14
dirsrvtests/tests/suites/filter/filter_test.py | 29
dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py | 7
dirsrvtests/tests/suites/gssapi_repl/gssapi_repl_test.py | 10
dirsrvtests/tests/suites/ldapi/__init__.py | 1
dirsrvtests/tests/suites/memberof_plugin/memberof_test.py | 639 ++++----
dirsrvtests/tests/suites/memory_leaks/range_search_test.py | 9
dirsrvtests/tests/suites/paged_results/paged_results_test.py | 12
dirsrvtests/tests/suites/paged_results/sss_control.py | 76 -
dirsrvtests/tests/suites/password/password_test.py | 9
dirsrvtests/tests/suites/password/pwdAdmin_test.py | 9
dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py | 8
dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py | 13
dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py | 7
dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py | 10
dirsrvtests/tests/suites/password/pwd_algo_test.py | 9
dirsrvtests/tests/suites/password/pwp_history_test.py | 7
dirsrvtests/tests/suites/replication/cleanallruv_test.py | 13
dirsrvtests/tests/suites/replication/tombstone_test.py | 9
dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py | 12
dirsrvtests/tests/suites/rootdn_plugin/rootdn_plugin_test.py | 72
dirsrvtests/tests/suites/schema/test_schema.py | 15
dirsrvtests/tests/tickets/finalizer.py | 52
dirsrvtests/tests/tickets/ticket1347760_test.py | 227 +--
dirsrvtests/tests/tickets/ticket365_test.py | 94 -
dirsrvtests/tests/tickets/ticket397_test.py | 97 -
dirsrvtests/tests/tickets/ticket47313_test.py | 103 -
dirsrvtests/tests/tickets/ticket47384_test.py | 80 -
dirsrvtests/tests/tickets/ticket47431_test.py | 131 -
dirsrvtests/tests/tickets/ticket47462_test.py | 255 ---
dirsrvtests/tests/tickets/ticket47490_test.py | 314 +---
dirsrvtests/tests/tickets/ticket47536_test.py | 234 ---
dirsrvtests/tests/tickets/ticket47553_test.py | 107 -
dirsrvtests/tests/tickets/ticket47560_test.py | 87 -
dirsrvtests/tests/tickets/ticket47573_test.py | 185 --
dirsrvtests/tests/tickets/ticket47619_test.py | 144 -
dirsrvtests/tests/tickets/ticket47640_test.py | 75 -
dirsrvtests/tests/tickets/ticket47653MMR_test.py | 320 +---
dirsrvtests/tests/tickets/ticket47653_test.py | 266 +--
dirsrvtests/tests/tickets/ticket47669_test.py | 175 --
dirsrvtests/tests/tickets/ticket47676_test.py | 264 ---
dirsrvtests/tests/tickets/ticket47714_test.py | 168 --
dirsrvtests/tests/tickets/ticket47721_test.py | 289 +--
dirsrvtests/tests/tickets/ticket47781_test.py | 103 -
dirsrvtests/tests/tickets/ticket47787_test.py | 333 +---
dirsrvtests/tests/tickets/ticket47808_test.py | 105 -
dirsrvtests/tests/tickets/ticket47815_test.py | 104 -
dirsrvtests/tests/tickets/ticket47819_test.py | 106 -
dirsrvtests/tests/tickets/ticket47823_test.py | 743 ++++------
dirsrvtests/tests/tickets/ticket47828_test.py | 661 ++++----
dirsrvtests/tests/tickets/ticket47829_test.py | 651 ++++----
dirsrvtests/tests/tickets/ticket47833_test.py | 257 +--
dirsrvtests/tests/tickets/ticket47838_test.py | 577 +++----
dirsrvtests/tests/tickets/ticket47869MMR_test.py | 260 ---
dirsrvtests/tests/tickets/ticket47871_test.py | 149 --
dirsrvtests/tests/tickets/ticket47900_test.py | 206 +-
dirsrvtests/tests/tickets/ticket47910_test.py | 79 -
dirsrvtests/tests/tickets/ticket47920_test.py | 134 -
dirsrvtests/tests/tickets/ticket47921_test.py | 98 -
dirsrvtests/tests/tickets/ticket47927_test.py | 239 +--
dirsrvtests/tests/tickets/ticket47931_test.py | 116 -
dirsrvtests/tests/tickets/ticket47937_test.py | 122 -
dirsrvtests/tests/tickets/ticket47950_test.py | 95 -
dirsrvtests/tests/tickets/ticket47953_test.py | 62
dirsrvtests/tests/tickets/ticket47963_test.py | 101 -
dirsrvtests/tests/tickets/ticket47966_test.py | 114 -
dirsrvtests/tests/tickets/ticket47970_test.py | 67
dirsrvtests/tests/tickets/ticket47973_test.py | 76 -
dirsrvtests/tests/tickets/ticket47976_test.py | 197 +-
dirsrvtests/tests/tickets/ticket47980_test.py | 513 +++---
dirsrvtests/tests/tickets/ticket47981_test.py | 147 -
dirsrvtests/tests/tickets/ticket47988_test.py | 397 +----
dirsrvtests/tests/tickets/ticket48005_test.py | 182 --
dirsrvtests/tests/tickets/ticket48013_test.py | 54
dirsrvtests/tests/tickets/ticket48026_test.py | 94 -
dirsrvtests/tests/tickets/ticket48109_test.py | 217 +-
dirsrvtests/tests/tickets/ticket48170_test.py | 50
dirsrvtests/tests/tickets/ticket48194_test.py | 371 ++--
dirsrvtests/tests/tickets/ticket48212_test.py | 144 -
dirsrvtests/tests/tickets/ticket48214_test.py | 120 -
dirsrvtests/tests/tickets/ticket48226_test.py | 170 --
dirsrvtests/tests/tickets/ticket48228_test.py | 193 +-
dirsrvtests/tests/tickets/ticket48233_test.py | 64
dirsrvtests/tests/tickets/ticket48234_test.py | 74
dirsrvtests/tests/tickets/ticket48252_test.py | 91 -
dirsrvtests/tests/tickets/ticket48265_test.py | 76 -
dirsrvtests/tests/tickets/ticket48266_test.py | 247 ---
dirsrvtests/tests/tickets/ticket48270_test.py | 124 -
dirsrvtests/tests/tickets/ticket48272_test.py | 116 -
dirsrvtests/tests/tickets/ticket48294_test.py | 160 --
dirsrvtests/tests/tickets/ticket48295_test.py | 111 -
dirsrvtests/tests/tickets/ticket48312_test.py | 120 -
dirsrvtests/tests/tickets/ticket48325_test.py | 201 --
dirsrvtests/tests/tickets/ticket48342_test.py | 296 ---
dirsrvtests/tests/tickets/ticket48354_test.py | 70
dirsrvtests/tests/tickets/ticket48362_test.py | 203 --
dirsrvtests/tests/tickets/ticket48366_test.py | 187 --
dirsrvtests/tests/tickets/ticket48370_test.py | 174 --
dirsrvtests/tests/tickets/ticket48383_test.py | 104 -
dirsrvtests/tests/tickets/ticket48497_test.py | 113 -
dirsrvtests/tests/tickets/ticket48637_test.py | 77 -
dirsrvtests/tests/tickets/ticket48665_test.py | 79 -
dirsrvtests/tests/tickets/ticket48745_test.py | 138 -
dirsrvtests/tests/tickets/ticket48746_test.py | 174 --
dirsrvtests/tests/tickets/ticket48755_test.py | 129 -
dirsrvtests/tests/tickets/ticket48759_test.py | 144 -
dirsrvtests/tests/tickets/ticket48784_test.py | 206 --
dirsrvtests/tests/tickets/ticket48798_test.py | 84 -
dirsrvtests/tests/tickets/ticket48799_test.py | 135 -
dirsrvtests/tests/tickets/ticket48808_test.py | 252 +--
dirsrvtests/tests/tickets/ticket48844_test.py | 193 --
dirsrvtests/tests/tickets/ticket48891_test.py | 108 -
dirsrvtests/tests/tickets/ticket48893_test.py | 66
dirsrvtests/tests/tickets/ticket48896_test.py | 94 -
dirsrvtests/tests/tickets/ticket48906_test.py | 397 ++---
dirsrvtests/tests/tickets/ticket48916_test.py | 172 --
dirsrvtests/tests/tickets/ticket48956_test.py | 132 -
dirsrvtests/tests/tickets/ticket48961_test.py | 112 -
dirsrvtests/tests/tickets/ticket49073_test.py | 203 --
dirsrvtests/tests/tickets/ticket548_test.py | 222 +-
131 files changed, 6673 insertions(+), 12610 deletions(-)
New commits:
commit 54e90366c1bf4bad01389b3fb6776435f209bb5a
Author: Simon Pichugin <spichugi(a)redhat.com>
Date: Fri Dec 23 19:04:57 2016 +0100
Ticket 49055 - Clean up test tickets and suites
Description: Add all topology fixture imports
for all tickets and refactor them accordingly.
Fix PEP8 and some logic issues.
Optimize imports in tickets and suites.
https://fedorahosted.org/389/ticket/49055
Reviewed by: mreynolds (Thanks!)
diff --git a/dirsrvtests/tests/suites/acctpolicy_plugin/accpol_test.py b/dirsrvtests/tests/suites/acctpolicy_plugin/accpol_test.py
index 3cf93b0..1ed13c7 100644
--- a/dirsrvtests/tests/suites/acctpolicy_plugin/accpol_test.py
+++ b/dirsrvtests/tests/suites/acctpolicy_plugin/accpol_test.py
@@ -1,16 +1,4 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-import ldif
-import ldap.modlist as modlist
-from ldif import LDIFParser, LDIFWriter
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
from lib389.topologies import topology_st
diff --git a/dirsrvtests/tests/suites/acl/acl_test.py b/dirsrvtests/tests/suites/acl/acl_test.py
index cb58352..cd7b177 100644
--- a/dirsrvtests/tests/suites/acl/acl_test.py
+++ b/dirsrvtests/tests/suites/acl/acl_test.py
@@ -6,20 +6,11 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
+from ldap.controls.simple import GetEffectiveRightsControl
from lib389.tasks import *
from lib389.utils import *
from lib389.topologies import topology_m2
-from ldap.controls.simple import GetEffectiveRightsControl
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
diff --git a/dirsrvtests/tests/suites/attr_uniqueness_plugin/attr_uniqueness_test.py b/dirsrvtests/tests/suites/attr_uniqueness_plugin/attr_uniqueness_test.py
index 5da6fe3..32a7327 100644
--- a/dirsrvtests/tests/suites/attr_uniqueness_plugin/attr_uniqueness_test.py
+++ b/dirsrvtests/tests/suites/attr_uniqueness_plugin/attr_uniqueness_test.py
@@ -6,13 +6,7 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
from lib389.topologies import topology_st
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
index 82cd8e5..21db6c5 100644
--- a/dirsrvtests/tests/suites/basic/basic_test.py
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
@@ -6,19 +6,10 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
+from subprocess import check_output
+
import ldap.sasl
-import logging
import pytest
-import shutil
-from subprocess import check_output
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
from lib389.topologies import topology_st
diff --git a/dirsrvtests/tests/suites/betxns/betxn_test.py b/dirsrvtests/tests/suites/betxns/betxn_test.py
index 40021e5..f487757 100644
--- a/dirsrvtests/tests/suites/betxns/betxn_test.py
+++ b/dirsrvtests/tests/suites/betxns/betxn_test.py
@@ -6,17 +6,8 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
import six
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
from lib389.topologies import topology_st
diff --git a/dirsrvtests/tests/suites/clu/clu_test.py b/dirsrvtests/tests/suites/clu/clu_test.py
index abf477c..4567a5d 100644
--- a/dirsrvtests/tests/suites/clu/clu_test.py
+++ b/dirsrvtests/tests/suites/clu/clu_test.py
@@ -6,16 +6,7 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
from lib389.topologies import topology_st
diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py
index 0323427..361a23d 100644
--- a/dirsrvtests/tests/suites/config/config_test.py
+++ b/dirsrvtests/tests/suites/config/config_test.py
@@ -6,16 +6,9 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
import logging
+
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.topologies import topology_m2
diff --git a/dirsrvtests/tests/suites/dna_plugin/dna_test.py b/dirsrvtests/tests/suites/dna_plugin/dna_test.py
index 84a000e..3bd5cd7 100644
--- a/dirsrvtests/tests/suites/dna_plugin/dna_test.py
+++ b/dirsrvtests/tests/suites/dna_plugin/dna_test.py
@@ -6,16 +6,7 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
from lib389.topologies import topology_st
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
index 7cc6df4..e052112 100644
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
@@ -6,11 +6,9 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import ldap
-import logging
-import pytest
from random import sample
-from lib389.properties import *
+
+import pytest
from lib389.tasks import *
from lib389.utils import *
from lib389.topologies import topology_st
@@ -34,15 +32,15 @@ def add_users(topology_st, users_num):
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
- 'objectclass': 'top person'.split(),
- 'objectclass': 'organizationalPerson',
- 'objectclass': 'inetorgperson',
- 'cn': USER_NAME,
- 'sn': USER_NAME,
- 'userpassword': 'pass%s' % num_ran,
- 'mail': '%s(a)redhat.com' % USER_NAME,
- 'uid': USER_NAME
- })))
+ 'objectclass': 'top person'.split(),
+ 'objectclass': 'organizationalPerson',
+ 'objectclass': 'inetorgperson',
+ 'cn': USER_NAME,
+ 'sn': USER_NAME,
+ 'userpassword': 'pass%s' % num_ran,
+ 'mail': '%s(a)redhat.com' % USER_NAME,
+ 'uid': USER_NAME
+ })))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
@@ -50,15 +48,15 @@ def add_users(topology_st, users_num):
def search_users(topology_st):
- try:
- entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(cn=*)', ['cn'])
- for entry in entries:
- if 'user1' in entry.data['cn']:
- log.info('Search found "user1"')
+ try:
+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(cn=*)', ['cn'])
+ for entry in entries:
+ if 'user1' in entry.data['cn']:
+ log.info('Search found "user1"')
- except ldap.LDAPError as e:
- log.fatal('Search failed, error: ' + e.message['desc'])
- raise e
+ except ldap.LDAPError as e:
+ log.fatal('Search failed, error: ' + e.message['desc'])
+ raise e
def test_check_default(topology_st):
@@ -72,7 +70,7 @@ def test_check_default(topology_st):
default = topology_st.standalone.config.get_attr_val(PLUGIN_TIMESTAMP)
# Now check it should be ON by default
- assert(default == "on")
+ assert (default == "on")
log.debug(default)
@@ -81,7 +79,7 @@ def test_plugin_set_invalid(topology_st):
log.info('test_plugin_set_invalid - Expect to fail with junk value')
with pytest.raises(ldap.OPERATIONS_ERROR):
- result = topology_st.standalone.config.set(PLUGIN_TIMESTAMP,'JUNK')
+ result = topology_st.standalone.config.set(PLUGIN_TIMESTAMP, 'JUNK')
def test_log_plugin_on(topology_st):
@@ -126,6 +124,7 @@ def test_log_plugin_off(topology_st):
assert len(access_log_lines) > 0
assert not topology_st.standalone.ds_access_log.match('^\[.+\d{9}.+\].+')
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py b/dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py
index 99559cc..c32f3e5 100644
--- a/dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py
+++ b/dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py
@@ -11,16 +11,8 @@ Created on Dec 09, 2014
@author: mreynolds
'''
-import os
-import sys
-import time
-import ldap
import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
+
from lib389.tasks import *
log = logging.getLogger(__name__)
@@ -146,16 +138,16 @@ def test_acctpolicy(inst, args=None):
# Add the config entry
try:
inst.add_s(Entry((CONFIG_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'cn': 'config',
- 'alwaysrecordlogin': 'yes',
- 'stateattrname': 'lastLoginTime'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'cn': 'config',
+ 'alwaysrecordlogin': 'yes',
+ 'stateattrname': 'lastLoginTime'
+ })))
except ldap.ALREADY_EXISTS:
try:
inst.modify_s(CONFIG_DN,
- [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'),
- (ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime')])
+ [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'),
+ (ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime')])
except ldap.LDAPError as e:
log.fatal('test_acctpolicy: Failed to modify config entry: error ' + e.message['desc'])
assert False
@@ -171,10 +163,10 @@ def test_acctpolicy(inst, args=None):
time.sleep(1)
try:
inst.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(),
- 'sn': '1',
- 'cn': 'user 1',
- 'uid': 'user1',
- 'userpassword': 'password'})))
+ 'sn': '1',
+ 'cn': 'user 1',
+ 'uid': 'user1',
+ 'userpassword': 'password'})))
except ldap.LDAPError as e:
log.fatal('test_acctpolicy: Failed to add test user' + USER1_DN + ': error ' + e.message['desc'])
assert False
@@ -308,7 +300,7 @@ def test_attruniq(inst, args=None):
'cn': 'user 1',
'uid': 'user1',
'mail': 'user1(a)example.com',
- 'mailAlternateAddress' : 'user1(a)alt.example.com',
+ 'mailAlternateAddress': 'user1(a)alt.example.com',
'userpassword': 'password'})))
except ldap.LDAPError as e:
log.fatal('test_attruniq: Failed to add test user' + USER1_DN + ': error ' + e.message['desc'])
@@ -347,11 +339,11 @@ def test_attruniq(inst, args=None):
try:
inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
- 'sn': '2',
- 'cn': 'user 2',
- 'uid': 'user2',
- 'mail': 'user1(a)example.com',
- 'userpassword': 'password'})))
+ 'sn': '2',
+ 'cn': 'user 2',
+ 'uid': 'user2',
+ 'mail': 'user1(a)example.com',
+ 'userpassword': 'password'})))
except ldap.CONSTRAINT_VIOLATION:
pass
else:
@@ -369,7 +361,8 @@ def test_attruniq(inst, args=None):
'mailAlternateAddress')])
except ldap.LDAPError as e:
- log.error('test_attruniq: Failed to reconfigure plugin for "mail mailAlternateAddress": error ' + e.message['desc'])
+ log.error(
+ 'test_attruniq: Failed to reconfigure plugin for "mail mailAlternateAddress": error ' + e.message['desc'])
assert False
############################################################################
@@ -378,11 +371,11 @@ def test_attruniq(inst, args=None):
try:
inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
- 'sn': '2',
- 'cn': 'user 2',
- 'uid': 'user2',
- 'mail': 'user1(a)example.com',
- 'userpassword': 'password'})))
+ 'sn': '2',
+ 'cn': 'user 2',
+ 'uid': 'user2',
+ 'mail': 'user1(a)example.com',
+ 'userpassword': 'password'})))
except ldap.CONSTRAINT_VIOLATION:
pass
else:
@@ -395,11 +388,11 @@ def test_attruniq(inst, args=None):
try:
inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
- 'sn': '2',
- 'cn': 'user 2',
- 'uid': 'user2',
- 'mailAlternateAddress': 'user1(a)alt.example.com',
- 'userpassword': 'password'})))
+ 'sn': '2',
+ 'cn': 'user 2',
+ 'uid': 'user2',
+ 'mailAlternateAddress': 'user1(a)alt.example.com',
+ 'userpassword': 'password'})))
except ldap.CONSTRAINT_VIOLATION:
pass
else:
@@ -412,11 +405,11 @@ def test_attruniq(inst, args=None):
try:
inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
- 'sn': '2',
- 'cn': 'user 2',
- 'uid': 'user2',
- 'mail': 'user1(a)alt.example.com',
- 'userpassword': 'password'})))
+ 'sn': '2',
+ 'cn': 'user 2',
+ 'uid': 'user2',
+ 'mail': 'user1(a)alt.example.com',
+ 'userpassword': 'password'})))
except ldap.CONSTRAINT_VIOLATION:
pass
else:
@@ -429,11 +422,11 @@ def test_attruniq(inst, args=None):
try:
inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
- 'sn': '2',
- 'cn': 'user 2',
- 'uid': 'user2',
- 'mailAlternateAddress': 'user1(a)example.com',
- 'userpassword': 'password'})))
+ 'sn': '2',
+ 'cn': 'user 2',
+ 'uid': 'user2',
+ 'mailAlternateAddress': 'user1(a)example.com',
+ 'userpassword': 'password'})))
except ldap.CONSTRAINT_VIOLATION:
pass
else:
@@ -488,9 +481,9 @@ def test_automember(inst, args=None):
# Add the automember group
try:
inst.add_s(Entry((GROUP_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'cn': 'group'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'cn': 'group'
+ })))
except ldap.LDAPError as e:
log.fatal('test_automember: Failed to add group: error ' + e.message['desc'])
assert False
@@ -498,9 +491,9 @@ def test_automember(inst, args=None):
# Add ou=branch1
try:
inst.add_s(Entry((BRANCH1_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'ou': 'branch1'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'ou': 'branch1'
+ })))
except ldap.LDAPError as e:
log.fatal('test_automember: Failed to add branch1: error ' + e.message['desc'])
assert False
@@ -508,9 +501,9 @@ def test_automember(inst, args=None):
# Add ou=branch2
try:
inst.add_s(Entry((BRANCH2_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'ou': 'branch2'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'ou': 'branch2'
+ })))
except ldap.LDAPError as e:
log.fatal('test_automember: Failed to add branch2: error ' + e.message['desc'])
assert False
@@ -518,13 +511,13 @@ def test_automember(inst, args=None):
# Add the automember config entry
try:
inst.add_s(Entry((CONFIG_DN, {
- 'objectclass': 'top autoMemberDefinition'.split(),
- 'cn': 'config',
- 'autoMemberScope': 'ou=branch1,' + DEFAULT_SUFFIX,
- 'autoMemberFilter': 'objectclass=top',
- 'autoMemberDefaultGroup': 'cn=group,' + DEFAULT_SUFFIX,
- 'autoMemberGroupingAttr': 'member:dn'
- })))
+ 'objectclass': 'top autoMemberDefinition'.split(),
+ 'cn': 'config',
+ 'autoMemberScope': 'ou=branch1,' + DEFAULT_SUFFIX,
+ 'autoMemberFilter': 'objectclass=top',
+ 'autoMemberDefaultGroup': 'cn=group,' + DEFAULT_SUFFIX,
+ 'autoMemberGroupingAttr': 'member:dn'
+ })))
except ldap.LDAPError as e:
log.fatal('test_automember: Failed to add config entry: error ' + e.message['desc'])
assert False
@@ -536,9 +529,9 @@ def test_automember(inst, args=None):
# Add a user that should get added to the group
try:
inst.add_s(Entry((BUSER1_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user1'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
except ldap.LDAPError as e:
log.fatal('test_automember: Failed to add user: error ' + e.message['desc'])
assert False
@@ -574,9 +567,9 @@ def test_automember(inst, args=None):
# Add a user that should get added to the group
try:
inst.add_s(Entry((BUSER2_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user2'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user2'
+ })))
except ldap.LDAPError as e:
log.fatal('test_automember: Failed to user to branch2: error ' + e.message['desc'])
assert False
@@ -602,9 +595,9 @@ def test_automember(inst, args=None):
# Add an entry that should be picked up by automember - verify it is not(yet)
try:
inst.add_s(Entry((BUSER3_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user3'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user3'
+ })))
except ldap.LDAPError as e:
log.fatal('test_automember: Failed to user3 to branch2: error ' + e.message['desc'])
assert False
@@ -627,9 +620,9 @@ def test_automember(inst, args=None):
# Add the task
try:
inst.add_s(Entry((TASK_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'basedn': 'ou=branch2,' + DEFAULT_SUFFIX,
- 'filter': 'objectclass=top'})))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'basedn': 'ou=branch2,' + DEFAULT_SUFFIX,
+ 'filter': 'objectclass=top'})))
except ldap.LDAPError as e:
log.fatal('test_automember: Failed to add task: error ' + e.message['desc'])
assert False
@@ -730,15 +723,15 @@ def test_dna(inst, args=None):
try:
inst.add_s(Entry((CONFIG_DN, {
- 'objectclass': 'top dnaPluginConfig'.split(),
- 'cn': 'config',
- 'dnatype': 'uidNumber',
- 'dnafilter': '(objectclass=top)',
- 'dnascope': DEFAULT_SUFFIX,
- 'dnaMagicRegen': '-1',
- 'dnaMaxValue': '50000',
- 'dnaNextValue': '1'
- })))
+ 'objectclass': 'top dnaPluginConfig'.split(),
+ 'cn': 'config',
+ 'dnatype': 'uidNumber',
+ 'dnafilter': '(objectclass=top)',
+ 'dnascope': DEFAULT_SUFFIX,
+ 'dnaMagicRegen': '-1',
+ 'dnaMaxValue': '50000',
+ 'dnaNextValue': '1'
+ })))
except ldap.ALREADY_EXISTS:
try:
inst.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'dnaNextValue', '1'),
@@ -756,9 +749,9 @@ def test_dna(inst, args=None):
try:
inst.add_s(Entry((USER1_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user1'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
except ldap.LDAPError as e:
log.fatal('test_dna: Failed to user1: error ' + e.message['desc'])
assert False
@@ -872,18 +865,18 @@ def test_linkedattrs(inst, args=None):
# Add test entries
try:
inst.add_s(Entry((USER1_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user1'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
except ldap.LDAPError as e:
log.fatal('test_linkedattrs: Failed to user1: error ' + e.message['desc'])
assert False
try:
inst.add_s(Entry((USER2_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user2'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user2'
+ })))
except ldap.LDAPError as e:
log.fatal('test_linkedattrs: Failed to user1: error ' + e.message['desc'])
assert False
@@ -891,11 +884,11 @@ def test_linkedattrs(inst, args=None):
# Add the linked attrs config entry
try:
inst.add_s(Entry((CONFIG_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'cn': 'config',
- 'linkType': 'directReport',
- 'managedType': 'manager'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'cn': 'config',
+ 'linkType': 'directReport',
+ 'managedType': 'manager'
+ })))
except ldap.LDAPError as e:
log.fatal('test_linkedattrs: Failed to add config entry: error ' + e.message['desc'])
assert False
@@ -1033,9 +1026,9 @@ def test_linkedattrs(inst, args=None):
TASK_DN = 'cn=task-' + str(int(time.time())) + ',cn=fixup linked attributes,cn=tasks,cn=config'
try:
inst.add_s(Entry(('cn=task-' + str(int(time.time())) + ',cn=fixup linked attributes,cn=tasks,cn=config', {
- 'objectclass': 'top extensibleObject'.split(),
- 'basedn': DEFAULT_SUFFIX,
- 'filter': 'objectclass=top'})))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'basedn': DEFAULT_SUFFIX,
+ 'filter': 'objectclass=top'})))
except ldap.LDAPError as e:
log.fatal('test_linkedattrs: Failed to add task: error ' + e.message['desc'])
assert False
@@ -1123,29 +1116,29 @@ def test_memberof(inst, args=None):
# Add our test entries
try:
inst.add_s(Entry((USER1_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user1'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
except ldap.LDAPError as e:
log.fatal('test_memberof: Failed to add user1: error ' + e.message['desc'])
assert False
try:
inst.add_s(Entry((GROUP_DN, {
- 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
- 'cn': 'group',
- 'member': USER1_DN
- })))
+ 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
+ 'cn': 'group',
+ 'member': USER1_DN
+ })))
except ldap.LDAPError as e:
log.fatal('test_memberof: Failed to add group: error ' + e.message['desc'])
assert False
try:
inst.add_s(Entry((SHARED_CONFIG_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'memberofgroupattr': 'member',
- 'memberofattr': 'memberof'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'memberofgroupattr': 'member',
+ 'memberofattr': 'memberof'
+ })))
except ldap.LDAPError as e:
log.fatal('test_memberof: Failed to shared config entry: error ' + e.message['desc'])
assert False
@@ -1250,19 +1243,19 @@ def test_memberof(inst, args=None):
try:
inst.add_s(Entry((USER1_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user1'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
except ldap.LDAPError as e:
log.fatal('test_memberof: Failed to add user1: error ' + e.message['desc'])
assert False
try:
inst.add_s(Entry((GROUP_DN, {
- 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
- 'cn': 'group',
- 'member': USER1_DN
- })))
+ 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
+ 'cn': 'group',
+ 'member': USER1_DN
+ })))
except ldap.LDAPError as e:
log.fatal('test_memberof: Failed to add group: error ' + e.message['desc'])
assert False
@@ -1303,7 +1296,7 @@ def test_memberof(inst, args=None):
inst.modify_s(SHARED_CONFIG_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'uniquemember')])
except ldap.LDAPError as e:
log.fatal('test_memberof: Failed to set shared plugin entry(uniquemember): error '
- + e.message['desc'])
+ + e.message['desc'])
assert False
try:
@@ -1433,9 +1426,9 @@ def test_memberof(inst, args=None):
TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK
try:
inst.add_s(Entry((TASK_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'basedn': DEFAULT_SUFFIX + "bad",
- 'filter': 'objectclass=top'})))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'basedn': DEFAULT_SUFFIX + "bad",
+ 'filter': 'objectclass=top'})))
except ldap.LDAPError as e:
log.fatal('test_memberof: Failed to add task(bad dn): error ' +
e.message['desc'])
@@ -1453,9 +1446,9 @@ def test_memberof(inst, args=None):
TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK
try:
inst.add_s(Entry((TASK_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'basedn': "bad",
- 'filter': 'objectclass=top'})))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'basedn': "bad",
+ 'filter': 'objectclass=top'})))
except ldap.LDAPError as e:
log.fatal('test_memberof: Failed to add task(invalid dn syntax): ' +
e.message['desc'])
@@ -1474,9 +1467,9 @@ def test_memberof(inst, args=None):
TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK
try:
inst.add_s(Entry((TASK_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'basedn': DEFAULT_SUFFIX,
- 'filter': '(objectclass=top'})))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'basedn': DEFAULT_SUFFIX,
+ 'filter': '(objectclass=top'})))
except ldap.LDAPError as e:
log.fatal('test_memberof: Failed to add task(bad filter: error ' +
e.message['desc'])
@@ -1499,9 +1492,9 @@ def test_memberof(inst, args=None):
TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK
try:
inst.add_s(Entry((TASK_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'basedn': DEFAULT_SUFFIX,
- 'filter': 'objectclass=top'})))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'basedn': DEFAULT_SUFFIX,
+ 'filter': 'objectclass=top'})))
except ldap.LDAPError as e:
log.fatal('test_memberof: Failed to add task: error ' + e.message['desc'])
assert False
@@ -1585,8 +1578,8 @@ def test_mep(inst, args=None):
# Add our org units
try:
inst.add_s(Entry((PEOPLE_OU, {
- 'objectclass': 'top extensibleObject'.split(),
- 'ou': 'people'})))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'ou': 'people'})))
except ldap.ALREADY_EXISTS:
pass
except ldap.LDAPError as e:
@@ -1595,8 +1588,8 @@ def test_mep(inst, args=None):
try:
inst.add_s(Entry((GROUP_OU, {
- 'objectclass': 'top extensibleObject'.split(),
- 'ou': 'people'})))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'ou': 'people'})))
except ldap.ALREADY_EXISTS:
pass
except ldap.LDAPError as e:
@@ -1606,12 +1599,12 @@ def test_mep(inst, args=None):
# Add the template entry
try:
inst.add_s(Entry((TEMPLATE_DN, {
- 'objectclass': 'top mepTemplateEntry extensibleObject'.split(),
- 'cn': 'MEP Template',
- 'mepRDNAttr': 'cn',
- 'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'),
- 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|')
- })))
+ 'objectclass': 'top mepTemplateEntry extensibleObject'.split(),
+ 'cn': 'MEP Template',
+ 'mepRDNAttr': 'cn',
+ 'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'),
+ 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|')
+ })))
except ldap.LDAPError as e:
log.fatal('test_mep: Failed to add template entry: error ' + e.message['desc'])
assert False
@@ -1619,13 +1612,13 @@ def test_mep(inst, args=None):
# Add the config entry
try:
inst.add_s(Entry((CONFIG_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'cn': 'config',
- 'originScope': PEOPLE_OU,
- 'originFilter': 'objectclass=posixAccount',
- 'managedBase': GROUP_OU,
- 'managedTemplate': TEMPLATE_DN
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'cn': 'config',
+ 'originScope': PEOPLE_OU,
+ 'originFilter': 'objectclass=posixAccount',
+ 'managedBase': GROUP_OU,
+ 'managedTemplate': TEMPLATE_DN
+ })))
except ldap.LDAPError as e:
log.fatal('test_mep: Failed to add config entry: error ' + e.message['desc'])
assert False
@@ -1637,13 +1630,13 @@ def test_mep(inst, args=None):
# Add an entry that meets the MEP scope
try:
inst.add_s(Entry((USER_DN, {
- 'objectclass': 'top posixAccount extensibleObject'.split(),
- 'uid': 'user1',
- 'cn': 'user1',
- 'uidNumber': '1',
- 'gidNumber': '1',
- 'homeDirectory': '/home/user1'
- })))
+ 'objectclass': 'top posixAccount extensibleObject'.split(),
+ 'uid': 'user1',
+ 'cn': 'user1',
+ 'uidNumber': '1',
+ 'gidNumber': '1',
+ 'homeDirectory': '/home/user1'
+ })))
except ldap.LDAPError as e:
log.fatal('test_mep: Failed to user1: error ' + e.message['desc'])
assert False
@@ -1662,12 +1655,12 @@ def test_mep(inst, args=None):
# Add a new template entry
try:
inst.add_s(Entry((TEMPLATE_DN2, {
- 'objectclass': 'top mepTemplateEntry extensibleObject'.split(),
- 'cn': 'MEP Template2',
- 'mepRDNAttr': 'uid',
- 'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'),
- 'mepMappedAttr': 'cn: $uid|uid: $cn|gidNumber: $gidNumber'.split('|')
- })))
+ 'objectclass': 'top mepTemplateEntry extensibleObject'.split(),
+ 'cn': 'MEP Template2',
+ 'mepRDNAttr': 'uid',
+ 'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'),
+ 'mepMappedAttr': 'cn: $uid|uid: $cn|gidNumber: $gidNumber'.split('|')
+ })))
except ldap.LDAPError as e:
log.fatal('test_mep: Failed to add template entry2: error ' + e.message['desc'])
assert False
@@ -1686,13 +1679,13 @@ def test_mep(inst, args=None):
# Add an entry that meets the MEP scope
try:
inst.add_s(Entry((USER_DN2, {
- 'objectclass': 'top posixAccount extensibleObject'.split(),
- 'uid': 'user 1',
- 'cn': 'user 1',
- 'uidNumber': '1',
- 'gidNumber': '1',
- 'homeDirectory': '/home/user2'
- })))
+ 'objectclass': 'top posixAccount extensibleObject'.split(),
+ 'uid': 'user 1',
+ 'cn': 'user 1',
+ 'uidNumber': '1',
+ 'gidNumber': '1',
+ 'homeDirectory': '/home/user2'
+ })))
except ldap.LDAPError as e:
log.fatal('test_mep: Failed to user2: error ' + e.message['desc'])
assert False
@@ -1800,8 +1793,8 @@ def test_passthru(inst, args=None):
# Create the top of the tree
try:
passthru_inst.add_s(Entry((PASS_SUFFIX2, {
- 'objectclass': 'top domain'.split(),
- 'dc': 'pass2'})))
+ 'objectclass': 'top domain'.split(),
+ 'dc': 'pass2'})))
except ldap.ALREADY_EXISTS:
pass
except ldap.LDAPError as e:
@@ -1812,10 +1805,10 @@ def test_passthru(inst, args=None):
# Add user to suffix1
try:
passthru_inst.add_s(Entry((PASSTHRU_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'admin',
- 'userpassword': 'password'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'admin',
+ 'userpassword': 'password'
+ })))
except ldap.LDAPError as e:
log.fatal('test_passthru: Failed to admin1: error ' + e.message['desc'])
passthru_inst.delete()
@@ -1824,10 +1817,10 @@ def test_passthru(inst, args=None):
# Add user to suffix 2
try:
passthru_inst.add_s(Entry((PASSTHRU_DN2, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'admin2',
- 'userpassword': 'password'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'admin2',
+ 'userpassword': 'password'
+ })))
except ldap.LDAPError as e:
log.fatal('test_passthru: Failed to admin2 : error ' + e.message['desc'])
passthru_inst.delete()
@@ -1952,29 +1945,29 @@ def test_referint(inst, args=None):
# Add some users and a group
try:
inst.add_s(Entry((USER1_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user1'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
except ldap.LDAPError as e:
log.fatal('test_referint: Failed to add user1: error ' + e.message['desc'])
assert False
try:
inst.add_s(Entry((USER2_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user2'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user2'
+ })))
except ldap.LDAPError as e:
log.fatal('test_referint: Failed to add user2: error ' + e.message['desc'])
assert False
try:
inst.add_s(Entry((GROUP_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'cn': 'group',
- 'member': USER1_DN,
- 'uniquemember': USER2_DN
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'cn': 'group',
+ 'member': USER1_DN,
+ 'uniquemember': USER2_DN
+ })))
except ldap.LDAPError as e:
log.fatal('test_referint: Failed to add group: error ' + e.message['desc'])
assert False
@@ -1991,12 +1984,12 @@ def test_referint(inst, args=None):
# Add shared config entry
try:
inst.add_s(Entry((SHARED_CONFIG_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'referint-membership-attr': 'member',
- 'referint-update-delay': '0',
- 'referint-logfile': REFERINT_LOGFILE,
- 'referint-logchanges': '0'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'referint-membership-attr': 'member',
+ 'referint-update-delay': '0',
+ 'referint-logfile': REFERINT_LOGFILE,
+ 'referint-logchanges': '0'
+ })))
except ldap.LDAPError as e:
log.fatal('test_referint: Failed to shared config entry: error ' + e.message['desc'])
assert False
@@ -2069,29 +2062,29 @@ def test_referint(inst, args=None):
try:
inst.add_s(Entry((USER1_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user1'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
except ldap.LDAPError as e:
log.fatal('test_referint: Failed to add user1: error ' + e.message['desc'])
assert False
try:
inst.add_s(Entry((USER2_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user2'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user2'
+ })))
except ldap.LDAPError as e:
log.fatal('test_referint: Failed to add user2: error ' + e.message['desc'])
assert False
try:
inst.add_s(Entry((GROUP_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'cn': 'group',
- 'member': USER1_DN,
- 'uniquemember': USER2_DN
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'cn': 'group',
+ 'member': USER1_DN,
+ 'uniquemember': USER2_DN
+ })))
except ldap.LDAPError as e:
log.fatal('test_referint: Failed to add group: error ' + e.message['desc'])
assert False
@@ -2121,7 +2114,7 @@ def test_referint(inst, args=None):
inst.modify_s(SHARED_CONFIG_DN, [(ldap.MOD_REPLACE, 'referint-membership-attr', 'uniquemember')])
except ldap.LDAPError as e:
log.fatal('test_referint: Failed to set shared plugin entry(uniquemember): error '
- + e.message['desc'])
+ + e.message['desc'])
assert False
# Delete a user
@@ -2162,9 +2155,9 @@ def test_referint(inst, args=None):
# Add test user
try:
inst.add_s(Entry((USER1_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user1'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
except ldap.LDAPError as e:
log.fatal('test_referint: Failed to add user1: error ' + e.message['desc'])
assert False
@@ -2259,9 +2252,9 @@ def test_retrocl(inst, args=None):
# Add a user
try:
inst.add_s(Entry((USER1_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user1'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
except ldap.LDAPError as e:
log.fatal('test_retrocl: Failed to add user1: error ' + e.message['desc'])
assert False
@@ -2299,7 +2292,7 @@ def test_retrocl(inst, args=None):
entry = inst.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(changenumber=*)')
if len(entry) != entry_count:
log.fatal('test_retrocl: changelog incorrectly updated - change count: '
- + str(len(entry)) + ' - expected 1')
+ + str(len(entry)) + ' - expected 1')
assert False
except ldap.LDAPError as e:
log.fatal('test_retrocl: Unable to search retro changelog: ' + e.message['desc'])
@@ -2351,10 +2344,10 @@ def test_rootdn(inst, args=None):
# Add an user and aci to open up cn=config
try:
inst.add_s(Entry((USER1_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user1',
- 'userpassword': 'password'
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1',
+ 'userpassword': 'password'
+ })))
except ldap.LDAPError as e:
log.fatal('test_rootdn: Failed to add user1: error ' + e.message['desc'])
assert False
@@ -2480,4 +2473,3 @@ def test_all_plugins(inst, args=None):
func(inst, args)
return
-
diff --git a/dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py b/dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py
index a869e98..f913fc5 100644
--- a/dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py
+++ b/dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py
@@ -11,15 +11,11 @@ Created on Dec 16, 2014
@author: mreynolds
'''
-import os
-import sys
-import time
-import ldap
import logging
-import pytest
import threading
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
+
+import ldap
+from lib389 import DirSrv, Entry
from lib389._constants import *
from lib389.properties import *
@@ -114,8 +110,8 @@ class AddUsers(threading.Thread):
if self.addToGroup:
try:
conn.add_s(Entry((GROUP_DN,
- {'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
- 'uid': 'user' + str(idx)})))
+ {'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
+ 'uid': 'user' + str(idx)})))
except ldap.LDAPError as e:
if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN:
log.fatal('AddUsers: failed to add group (' + USER_DN + ') error: ' + e.message['desc'])
@@ -127,7 +123,7 @@ class AddUsers(threading.Thread):
USER_DN = 'uid=' + self.rdnval + str(idx) + ',' + DEFAULT_SUFFIX
try:
conn.add_s(Entry((USER_DN, {'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user' + str(idx)})))
+ 'uid': 'user' + str(idx)})))
except ldap.LDAPError as e:
if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN:
log.fatal('AddUsers: failed to add (' + USER_DN + ') error: ' + e.message['desc'])
diff --git a/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py b/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py
index e55bc85..dec6e87 100644
--- a/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py
+++ b/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py
@@ -11,20 +11,14 @@ Created on Dec 09, 2014
@author: mreynolds
'''
-import os
-import sys
-import time
-import ldap
-import ldap.sasl
import logging
+
+import ldap.sasl
import pytest
+from lib389.tasks import *
+
import plugin_tests
import stress_tests
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
from lib389.topologies import topology_st
log = logging.getLogger(__name__)
diff --git a/dirsrvtests/tests/suites/filter/filter_test.py b/dirsrvtests/tests/suites/filter/filter_test.py
index 81a3ee7..1979baf 100644
--- a/dirsrvtests/tests/suites/filter/filter_test.py
+++ b/dirsrvtests/tests/suites/filter/filter_test.py
@@ -6,16 +6,9 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
import logging
+
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.topologies import topology_st
@@ -35,10 +28,10 @@ def test_filter_escaped(topology_st):
try:
topology_st.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(),
- 'sn': '1',
- 'cn': 'test * me',
- 'uid': 'test_entry',
- 'userpassword': PASSWORD})))
+ 'sn': '1',
+ 'cn': 'test * me',
+ 'uid': 'test_entry',
+ 'userpassword': PASSWORD})))
except ldap.LDAPError as e:
log.fatal('test_filter_escaped: Failed to add test user ' + USER1_DN + ': error ' +
e.message['desc'])
@@ -46,10 +39,10 @@ def test_filter_escaped(topology_st):
try:
topology_st.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
- 'sn': '2',
- 'cn': 'test me',
- 'uid': 'test_entry2',
- 'userpassword': PASSWORD})))
+ 'sn': '2',
+ 'cn': 'test me',
+ 'uid': 'test_entry2',
+ 'userpassword': PASSWORD})))
except ldap.LDAPError as e:
log.fatal('test_filter_escaped: Failed to add test user ' + USER2_DN + ': error ' + e.message['desc'])
assert False
@@ -61,7 +54,7 @@ def test_filter_escaped(topology_st):
assert False
except ldap.LDAPError as e:
log.fatal('test_filter_escaped: Failed to search for user(%s), error: %s' %
- (USER1_DN, e.message('desc')))
+ (USER1_DN, e.message('desc')))
assert False
log.info('test_filter_escaped: PASSED')
@@ -77,7 +70,7 @@ def test_filter_search_original_attrs(topology_st):
try:
entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_BASE,
- 'objectclass=top', ['objectclass-EXTRA'])
+ 'objectclass=top', ['objectclass-EXTRA'])
if entry[0].hasAttr('objectclass-EXTRA'):
log.fatal('test_filter_search_original_attrs: Entry does not have the original attribute')
assert False
diff --git a/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py b/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py
index 9812fae..3fc94d7 100644
--- a/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py
+++ b/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py
@@ -6,14 +6,7 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
-import os
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
from lib389.topologies import topology_st
diff --git a/dirsrvtests/tests/suites/gssapi_repl/gssapi_repl_test.py b/dirsrvtests/tests/suites/gssapi_repl/gssapi_repl_test.py
index 2bae81d..1938236 100644
--- a/dirsrvtests/tests/suites/gssapi_repl/gssapi_repl_test.py
+++ b/dirsrvtests/tests/suites/gssapi_repl/gssapi_repl_test.py
@@ -6,19 +6,9 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
-from lib389.mit_krb5 import MitKrb5
from lib389.topologies import topology_m2
#########################################
diff --git a/dirsrvtests/tests/suites/ldapi/__init__.py b/dirsrvtests/tests/suites/ldapi/__init__.py
index 40a96af..e69de29 100644
--- a/dirsrvtests/tests/suites/ldapi/__init__.py
+++ b/dirsrvtests/tests/suites/ldapi/__init__.py
@@ -1 +0,0 @@
-# -*- coding: utf-8 -*-
diff --git a/dirsrvtests/tests/suites/memberof_plugin/memberof_test.py b/dirsrvtests/tests/suites/memberof_plugin/memberof_test.py
index ce1f5ed..5880964 100644
--- a/dirsrvtests/tests/suites/memberof_plugin/memberof_test.py
+++ b/dirsrvtests/tests/suites/memberof_plugin/memberof_test.py
@@ -6,20 +6,13 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
from lib389.topologies import topology_st
+DEBUGGING = os.getenv('DEBUGGING', False)
+
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
@@ -27,7 +20,6 @@ MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config')
USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX
USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX
GROUP_DN = 'cn=group,' + DEFAULT_SUFFIX
-DEBUGGING=True
PLUGIN_TYPE = 'nsslapd-pluginType'
PLUGIN_MEMBEROF_GRP_ATTR = 'memberofgroupattr'
@@ -39,51 +31,62 @@ USERS_CONTAINER = "ou=people,%s" % SUFFIX
GROUP_RDN = "group"
GROUPS_CONTAINER = "ou=groups,%s" % SUFFIX
+
def _set_memberofgroupattr_add(topology_st, values):
topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN,
- [(ldap.MOD_ADD,
- PLUGIN_MEMBEROF_GRP_ATTR,
- values)])
+ [(ldap.MOD_ADD,
+ PLUGIN_MEMBEROF_GRP_ATTR,
+ values)])
+
+
def _get_user_rdn(ext):
return "uid=%s_%s" % (USER_RDN, ext)
+
def _get_user_dn(ext):
return "%s,%s" % (_get_user_rdn(ext), USERS_CONTAINER)
+
def _get_group_rdn(ext):
return "cn=%s_%s" % (GROUP_RDN, ext)
+
def _get_group_dn(ext):
return "%s,%s" % (_get_group_rdn(ext), GROUPS_CONTAINER)
+
def _create_user(topology_st, ext):
user_dn = _get_user_dn(ext)
topology_st.standalone.add_s(Entry((user_dn, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': _get_user_rdn(ext)
- })))
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': _get_user_rdn(ext)
+ })))
log.info("Create user %s" % user_dn)
return user_dn
+
def _delete_user(topology_st, ext):
user_dn = _get_user_dn(ext)
topology_st.standalone.delete_s(user_dn)
log.info("Delete user %s" % user_dn)
+
def _create_group(topology_st, ext):
group_dn = _get_group_dn(ext)
topology_st.standalone.add_s(Entry((group_dn, {
- 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
- 'ou': _get_group_rdn(ext)
- })))
+ 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
+ 'ou': _get_group_rdn(ext)
+ })))
log.info("Create group %s" % group_dn)
return group_dn
+
def _delete_group(topology_st, ext):
group_dn = _get_group_dn(ext)
topology_st.standalone.delete_s(group_dn)
log.info("Delete group %s" % group_dn)
+
def _check_memberattr(topology_st, entry, memberattr, value):
log.info("Check %s.%s = %s" % (entry, memberattr, value))
entry = topology_st.standalone.getEntry(entry, ldap.SCOPE_BASE, '(objectclass=*)', [memberattr])
@@ -99,6 +102,7 @@ def _check_memberattr(topology_st, entry, memberattr, value):
break
return found
+
def _check_memberof(topology_st, member, group):
log.info("Lookup memberof from %s" % member)
entry = topology_st.standalone.getEntry(member, ldap.SCOPE_BASE, '(objectclass=*)', ['memberof'])
@@ -115,32 +119,37 @@ def _check_memberof(topology_st, member, group):
break
return found
+
def text_memberof_683241_01(topology_st):
"""
Test Modify the memberof plugin to use the new type
"""
topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN,
- [(ldap.MOD_REPLACE,
- PLUGIN_TYPE,
- 'betxnpostoperation')])
+ [(ldap.MOD_REPLACE,
+ PLUGIN_TYPE,
+ 'betxnpostoperation')])
topology_st.standalone.restart(timeout=10)
ent = topology_st.standalone.getEntry(MEMBEROF_PLUGIN_DN, ldap.SCOPE_BASE, "(objectclass=*)", [PLUGIN_TYPE])
assert ent.hasAttr(PLUGIN_TYPE)
assert ent.getValue(PLUGIN_TYPE) == 'betxnpostoperation'
+
def test_memberof_setloging(topology_st):
topology_st.standalone.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(65536))])
+
def test_memberof_MultiGrpAttr_001(topology_st):
"""
Checking multiple grouping attributes supported
"""
_set_memberofgroupattr_add(topology_st, 'uniqueMember')
- ent = topology_st.standalone.getEntry(MEMBEROF_PLUGIN_DN, ldap.SCOPE_BASE, "(objectclass=*)", [PLUGIN_MEMBEROF_GRP_ATTR])
+ ent = topology_st.standalone.getEntry(MEMBEROF_PLUGIN_DN, ldap.SCOPE_BASE, "(objectclass=*)",
+ [PLUGIN_MEMBEROF_GRP_ATTR])
assert ent.hasAttr(PLUGIN_MEMBEROF_GRP_ATTR)
- assert 'member'.lower() in [x.lower() for x in ent.getValues(PLUGIN_MEMBEROF_GRP_ATTR)]
+ assert 'member'.lower() in [x.lower() for x in ent.getValues(PLUGIN_MEMBEROF_GRP_ATTR)]
assert 'uniqueMember'.lower() in [x.lower() for x in ent.getValues(PLUGIN_MEMBEROF_GRP_ATTR)]
+
def test_memberof_MultiGrpAttr_003(topology_st):
"""
Check the plug-in is started
@@ -163,8 +172,7 @@ def test_memberof_MultiGrpAttr_004(topology_st):
memofegrp1 = _create_group(topology_st, 'memofegrp1')
memofegrp2 = _create_group(topology_st, 'memofegrp2')
-
- mods = [(ldap.MOD_ADD, 'member', memofenh1),(ldap.MOD_ADD, 'uniqueMember', memofenh2)]
+ mods = [(ldap.MOD_ADD, 'member', memofenh1), (ldap.MOD_ADD, 'uniqueMember', memofenh2)]
log.info("Update %s is memberof %s (member)" % (memofenh1, memofegrp1))
log.info("Update %s is memberof %s (uniqueMember)" % (memofenh2, memofegrp1))
topology_st.standalone.modify_s(memofegrp1, mods)
@@ -181,6 +189,7 @@ def test_memberof_MultiGrpAttr_004(topology_st):
assert _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+
def test_memberof_MultiGrpAttr_005(topology_st):
"""
Partial removal of memberofgroupattr: removing member attribute from Group1
@@ -191,17 +200,18 @@ def test_memberof_MultiGrpAttr_005(topology_st):
memofegrp1 = _get_group_dn('memofegrp1')
memofegrp2 = _get_group_dn('memofegrp2')
- log.info("Update %s is no longer memberof %s (member)" % (memofenh1, memofegrp1))
+ log.info("Update %s is no longer memberof %s (member)" % (memofenh1, memofegrp1))
mods = [(ldap.MOD_DELETE, 'member', memofenh1)]
topology_st.standalone.modify_s(memofegrp1, mods)
# assert enh1 is NOT member of grp1 and is member of grp2
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
# assert enh2 is member of grp1 and is member of grp2
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+
def test_memberof_MultiGrpAttr_006(topology_st):
"""
@@ -213,18 +223,19 @@ def test_memberof_MultiGrpAttr_006(topology_st):
memofegrp1 = _get_group_dn('memofegrp1')
memofegrp2 = _get_group_dn('memofegrp2')
- log.info("Update %s is no longer memberof %s (uniqueMember)" % (memofenh1, memofegrp1))
+ log.info("Update %s is no longer memberof %s (uniqueMember)" % (memofenh1, memofegrp1))
mods = [(ldap.MOD_DELETE, 'uniqueMember', memofenh2)]
topology_st.standalone.modify_s(memofegrp2, mods)
# assert enh1 is NOT member of grp1 and is member of grp2
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
# assert enh2 is member of grp1 and is NOT member of grp2
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+
def test_memberof_MultiGrpAttr_007(topology_st):
"""
Complete removal of memberofgroupattr
@@ -235,11 +246,11 @@ def test_memberof_MultiGrpAttr_007(topology_st):
memofegrp1 = _get_group_dn('memofegrp1')
memofegrp2 = _get_group_dn('memofegrp2')
- log.info("Update %s is no longer memberof %s (uniqueMember)" % (memofenh2, memofegrp1))
+ log.info("Update %s is no longer memberof %s (uniqueMember)" % (memofenh2, memofegrp1))
mods = [(ldap.MOD_DELETE, 'uniqueMember', memofenh2)]
topology_st.standalone.modify_s(memofegrp1, mods)
- log.info("Update %s is no longer memberof %s (member)" % (memofenh1, memofegrp2))
+ log.info("Update %s is no longer memberof %s (member)" % (memofenh1, memofegrp2))
mods = [(ldap.MOD_DELETE, 'member', memofenh1)]
topology_st.standalone.modify_s(memofegrp2, mods)
@@ -251,6 +262,7 @@ def test_memberof_MultiGrpAttr_007(topology_st):
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+
def test_memberof_MultiGrpAttr_008(topology_st):
"""
MemberOf attribute should be present on both the users
@@ -270,28 +282,28 @@ def test_memberof_MultiGrpAttr_008(topology_st):
topology_st.standalone.modify_s(memofegrp2, mods)
# assert enh1 is member of grp1 and is NOT member of grp2
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
# assert enh2 is NOT member of grp1 and is member of grp2
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
log.info("Remove uniqueMember as a memberofgrpattr")
topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN,
- [(ldap.MOD_DELETE,
- PLUGIN_MEMBEROF_GRP_ATTR,
- ['uniqueMember'])])
+ [(ldap.MOD_DELETE,
+ PLUGIN_MEMBEROF_GRP_ATTR,
+ ['uniqueMember'])])
topology_st.standalone.restart(timeout=10)
log.info("Assert that this change of configuration did change the already set values")
# assert enh1 is member of grp1 and is NOT member of grp2
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
# assert enh2 is NOT member of grp1 and is member of grp2
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
_set_memberofgroupattr_add(topology_st, 'uniqueMember')
topology_st.standalone.restart(timeout=10)
@@ -309,6 +321,7 @@ def test_memberof_MultiGrpAttr_009(topology_st):
log.error("Setting 'memberUid' as memberofgroupattr is rejected (expected)")
assert True
+
def test_memberof_MultiGrpAttr_010(topology_st):
"""
Duplicate member attribute to groups
@@ -321,13 +334,14 @@ def test_memberof_MultiGrpAttr_010(topology_st):
memofegrp2 = _get_group_dn('memofegrp2')
# assert enh1 is member of grp1
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
mods = [(ldap.MOD_ADD, 'member', memofenh1)]
log.info("Try %s is memberof %s (member)" % (memofenh1, memofegrp1))
try:
topology_st.standalone.modify_s(memofegrp1, mods)
- log.error("Should not be allowed to add %s member of %s (because it was already member)" % (memofenh1, memofegrp1))
+ log.error(
+ "Should not be allowed to add %s member of %s (because it was already member)" % (memofenh1, memofegrp1))
assert False
except ldap.TYPE_OR_VALUE_EXISTS:
log.error("%s already member of %s --> fail (expected)" % (memofenh1, memofegrp1))
@@ -355,18 +369,19 @@ def test_memberof_MultiGrpAttr_011(topology_st):
log.info("Check initial status")
# assert enh1 is member of grp1 and is NOT member of grp2
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
# assert enh2 is NOT member of grp1 and is member of grp2
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
mods = [(ldap.MOD_ADD, 'uniqueMember', memofenh2)]
log.info("Try %s is memberof %s (member)" % (memofenh2, memofegrp2))
try:
topology_st.standalone.modify_s(memofegrp2, mods)
- log.error("Should not be allowed to add %s member of %s (because it was already member)" % (memofenh2, memofegrp2))
+ log.error(
+ "Should not be allowed to add %s member of %s (because it was already member)" % (memofenh2, memofegrp2))
assert False
except ldap.TYPE_OR_VALUE_EXISTS:
log.error("%s already member of %s --> fail (expected)" % (memofenh2, memofegrp2))
@@ -374,12 +389,12 @@ def test_memberof_MultiGrpAttr_011(topology_st):
log.info("Check final status")
# assert enh1 is member of grp1 and is NOT member of grp2
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
# assert enh2 is NOT member of grp1 and is member of grp2
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
def test_memberof_MultiGrpAttr_012(topology_st):
@@ -398,6 +413,7 @@ def test_memberof_MultiGrpAttr_012(topology_st):
"""
pass
+
def test_memberof_MultiGrpAttr_013(topology_st):
"""
MemberURL attritbute should reflect the modrdn changes in the group.
@@ -414,6 +430,7 @@ def test_memberof_MultiGrpAttr_013(topology_st):
"""
pass
+
def test_memberof_MultiGrpAttr_014(topology_st):
"""
Both member and uniqueMember pointing to the same user
@@ -447,16 +464,16 @@ def test_memberof_MultiGrpAttr_014(topology_st):
log.info("Check initial status")
# assert enh1 is member of grp1 and is NOT member of grp2
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
# assert enh2 is NOT member of grp1 and is member of grp2
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
memofegrp3 = _create_group(topology_st, 'memofegrp3')
- mods = [(ldap.MOD_ADD, 'member', memofenh1),(ldap.MOD_ADD, 'uniqueMember', memofenh1)]
+ mods = [(ldap.MOD_ADD, 'member', memofenh1), (ldap.MOD_ADD, 'uniqueMember', memofenh1)]
log.info("Update %s is memberof %s (member)" % (memofenh1, memofegrp3))
log.info("Update %s is memberof %s (uniqueMember)" % (memofenh1, memofegrp3))
topology_st.standalone.modify_s(memofegrp3, mods)
@@ -465,9 +482,9 @@ def test_memberof_MultiGrpAttr_014(topology_st):
# - grp1 (member)
# - not grp2
# - grp3 (member uniquemember)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
mods = [(ldap.MOD_DELETE, 'member', memofenh1)]
log.info("Update %s is not memberof %s (member)" % (memofenh1, memofegrp3))
@@ -481,24 +498,24 @@ def test_memberof_MultiGrpAttr_014(topology_st):
# - grp1 (member)
# - not grp2
# - grp3 (uniquemember)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
# assert enh2 is member of
# - not grp1
# - not grp2 (uniquemember)
# - grp3 (member)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
ent = topology_st.standalone.getEntry(memofegrp3, ldap.SCOPE_BASE, "(objectclass=*)", ['member', 'uniqueMember'])
assert ent.hasAttr('member')
assert memofenh1 not in ent.getValues('member')
- assert memofenh2 in ent.getValues('member')
+ assert memofenh2 in ent.getValues('member')
assert ent.hasAttr('uniqueMember')
- assert memofenh1 in ent.getValues('uniqueMember')
+ assert memofenh1 in ent.getValues('uniqueMember')
assert memofenh2 not in ent.getValues('uniqueMember')
log.info("Checking final status")
@@ -506,17 +523,17 @@ def test_memberof_MultiGrpAttr_014(topology_st):
# - grp1 (member)
# - not grp2
# - grp3 (uniquemember)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
# assert enh2 is member of
# - not grp1
# - grp2 (uniquemember)
# - grp3 (member)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
def test_memberof_MultiGrpAttr_015(topology_st):
@@ -550,8 +567,8 @@ def test_memberof_MultiGrpAttr_015(topology_st):
memofenh1 = _get_user_dn('memofenh1')
memofenh2 = _get_user_dn('memofenh2')
- dummy1 = _get_user_dn('dummy1')
- dummy2 = _get_user_dn('dummy2')
+ dummy1 = _get_user_dn('dummy1')
+ dummy2 = _get_user_dn('dummy2')
memofegrp1 = _get_group_dn('memofegrp1')
memofegrp2 = _get_group_dn('memofegrp2')
@@ -562,40 +579,40 @@ def test_memberof_MultiGrpAttr_015(topology_st):
# - grp1 (member)
# - not grp2
# - grp3 (uniquemember)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
# assert enh2 is member of
# - not grp1
# - grp2 (uniquemember)
# - grp3 (member)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
memofegrp015 = _create_group(topology_st, 'memofegrp015')
- mods = [(ldap.MOD_ADD, 'member', dummy1),(ldap.MOD_ADD, 'uniqueMember', dummy2)]
+ mods = [(ldap.MOD_ADD, 'member', dummy1), (ldap.MOD_ADD, 'uniqueMember', dummy2)]
log.info("Update %s is memberof %s (member)" % (dummy1, memofegrp015))
log.info("Update %s is memberof %s (uniqueMember)" % (dummy2, memofegrp015))
topology_st.standalone.modify_s(memofegrp015, mods)
ent = topology_st.standalone.getEntry(memofegrp015, ldap.SCOPE_BASE, "(objectclass=*)", ['member', 'uniqueMember'])
assert ent.hasAttr('member')
- assert dummy1 in ent.getValues('member')
+ assert dummy1 in ent.getValues('member')
assert dummy2 not in ent.getValues('member')
assert ent.hasAttr('uniqueMember')
assert dummy1 not in ent.getValues('uniqueMember')
- assert dummy2 in ent.getValues('uniqueMember')
+ assert dummy2 in ent.getValues('uniqueMember')
# assert enh1 is member of
# - grp1 (member)
# - not grp2
# - grp3 (uniquemember)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015)
# assert enh2 is member of
@@ -604,8 +621,8 @@ def test_memberof_MultiGrpAttr_015(topology_st):
# - grp3 (member)
#
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015)
@@ -644,7 +661,7 @@ def test_memberof_MultiGrpAttr_016(topology_st):
memofenh1 = _get_user_dn('memofenh1')
memofenh2 = _get_user_dn('memofenh2')
- dummy1 = _get_user_dn('dummy1')
+ dummy1 = _get_user_dn('dummy1')
memofegrp1 = _get_group_dn('memofegrp1')
memofegrp2 = _get_group_dn('memofegrp2')
@@ -656,9 +673,9 @@ def test_memberof_MultiGrpAttr_016(topology_st):
# - not grp2
# - grp3 (uniquemember)
# - not grp15
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015)
# assert enh2 is member of
@@ -667,13 +684,13 @@ def test_memberof_MultiGrpAttr_016(topology_st):
# - grp3 (member)
# - not grp15
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015)
memofegrp016 = _create_group(topology_st, 'memofegrp016')
- mods = [(ldap.MOD_ADD, 'member', memofenh1),(ldap.MOD_ADD, 'uniqueMember', memofenh1)]
+ mods = [(ldap.MOD_ADD, 'member', memofenh1), (ldap.MOD_ADD, 'uniqueMember', memofenh1)]
log.info("Update %s is memberof %s (member)" % (memofenh1, memofegrp016))
log.info("Update %s is memberof %s (uniqueMember)" % (memofenh1, memofegrp016))
topology_st.standalone.modify_s(memofegrp016, mods)
@@ -684,11 +701,11 @@ def test_memberof_MultiGrpAttr_016(topology_st):
# - grp3 (uniquemember)
# - not grp15
# - grp16 (member uniquemember)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
# assert enh2 is member of
# - not grp1
@@ -697,22 +714,22 @@ def test_memberof_MultiGrpAttr_016(topology_st):
# - not grp15
# - not grp16
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016)
- mods = [(ldap.MOD_ADD, 'member', dummy1),]
+ mods = [(ldap.MOD_ADD, 'member', dummy1), ]
log.info("Update %s is memberof %s (member)" % (dummy1, memofegrp016))
topology_st.standalone.modify_s(memofegrp016, mods)
ent = topology_st.standalone.getEntry(memofegrp016, ldap.SCOPE_BASE, "(objectclass=*)", ['member', 'uniqueMember'])
assert ent.hasAttr('member')
- assert dummy1 in ent.getValues('member')
+ assert dummy1 in ent.getValues('member')
assert ent.hasAttr('uniqueMember')
assert dummy1 not in ent.getValues('uniqueMember')
- mods = [(ldap.MOD_ADD, 'uniqueMember', dummy1),]
+ mods = [(ldap.MOD_ADD, 'uniqueMember', dummy1), ]
log.info("Update %s is memberof %s (uniqueMember)" % (dummy1, memofegrp016))
topology_st.standalone.modify_s(memofegrp016, mods)
@@ -728,11 +745,11 @@ def test_memberof_MultiGrpAttr_016(topology_st):
# - grp3 (uniquemember)
# - not grp15
# - grp16 (member uniquemember)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
# assert enh2 is member of
# - not grp1
@@ -741,11 +758,12 @@ def test_memberof_MultiGrpAttr_016(topology_st):
# - not grp15
# - not grp16
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016)
+
def test_memberof_MultiGrpAttr_017(topology_st):
"""
Add user1 and user2 as memberof grp017
@@ -822,11 +840,11 @@ def test_memberof_MultiGrpAttr_017(topology_st):
# - grp3 (uniquemember)
# - not grp15
# - grp16 (member uniquemember)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
# assert enh2 is member of
# - not grp1
@@ -835,8 +853,8 @@ def test_memberof_MultiGrpAttr_017(topology_st):
# - not grp15
# - not grp16
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016)
@@ -869,7 +887,8 @@ def test_memberof_MultiGrpAttr_017(topology_st):
memofuser3 = _create_user(topology_st, 'memofuser3')
memofegrp017 = _create_group(topology_st, 'memofegrp017')
- mods = [(ldap.MOD_ADD, 'member', memofuser1),(ldap.MOD_ADD, 'uniqueMember', memofuser2), (ldap.MOD_ADD, 'memberuid', memofuser3)]
+ mods = [(ldap.MOD_ADD, 'member', memofuser1), (ldap.MOD_ADD, 'uniqueMember', memofuser2),
+ (ldap.MOD_ADD, 'memberuid', memofuser3)]
log.info("Update %s is memberof %s (member)" % (memofuser1, memofegrp017))
log.info("Update %s is memberof %s (uniqueMember)" % (memofuser2, memofegrp017))
log.info("Update %s is memberof %s (memberuid)" % (memofuser3, memofegrp017))
@@ -882,11 +901,11 @@ def test_memberof_MultiGrpAttr_017(topology_st):
# - not grp15
# - grp16 (member uniquemember)
# - not grp17
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp017)
# assert enh2 is member of
@@ -897,13 +916,12 @@ def test_memberof_MultiGrpAttr_017(topology_st):
# - not grp16
# - not grp17
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp017)
-
# assert user1 is member of
# - not grp1
# - not grp2
@@ -916,7 +934,7 @@ def test_memberof_MultiGrpAttr_017(topology_st):
assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp016)
- assert _check_memberof(topology_st, member=memofuser1, group=memofegrp017)
+ assert _check_memberof(topology_st, member=memofuser1, group=memofegrp017)
# assert user2 is member of
# - not grp1
@@ -930,7 +948,7 @@ def test_memberof_MultiGrpAttr_017(topology_st):
assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp016)
- assert _check_memberof(topology_st, member=memofuser2, group=memofegrp017)
+ assert _check_memberof(topology_st, member=memofuser2, group=memofegrp017)
# assert user3 is member of
# - not grp1
@@ -1031,11 +1049,11 @@ def test_memberof_MultiGrpAttr_018(topology_st):
# - not grp15
# - grp16 (member uniquemember)
# - not grp17
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp017)
# assert enh2 is member of
@@ -1046,13 +1064,12 @@ def test_memberof_MultiGrpAttr_018(topology_st):
# - not grp16
# - not grp17
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp017)
-
# assert user1 is member of
# - not grp1
# - not grp2
@@ -1065,7 +1082,7 @@ def test_memberof_MultiGrpAttr_018(topology_st):
assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp016)
- assert _check_memberof(topology_st, member=memofuser1, group=memofegrp017)
+ assert _check_memberof(topology_st, member=memofuser1, group=memofegrp017)
# assert user2 is member of
# - not grp1
@@ -1079,7 +1096,7 @@ def test_memberof_MultiGrpAttr_018(topology_st):
assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp016)
- assert _check_memberof(topology_st, member=memofuser2, group=memofegrp017)
+ assert _check_memberof(topology_st, member=memofuser2, group=memofegrp017)
# assert user3 is member of
# - not grp1
@@ -1099,7 +1116,8 @@ def test_memberof_MultiGrpAttr_018(topology_st):
# Create a group grp018 with user1 member/uniquemember
memofegrp018 = _create_group(topology_st, 'memofegrp018')
- mods = [(ldap.MOD_ADD, 'member', memofuser1),(ldap.MOD_ADD, 'uniqueMember', memofuser1), (ldap.MOD_ADD, 'memberuid', memofuser1)]
+ mods = [(ldap.MOD_ADD, 'member', memofuser1), (ldap.MOD_ADD, 'uniqueMember', memofuser1),
+ (ldap.MOD_ADD, 'memberuid', memofuser1)]
log.info("Update %s is memberof %s (member)" % (memofuser1, memofegrp017))
log.info("Update %s is memberof %s (uniqueMember)" % (memofuser1, memofegrp017))
log.info("Update %s is memberof %s (memberuid)" % (memofuser1, memofegrp017))
@@ -1118,10 +1136,10 @@ def test_memberof_MultiGrpAttr_018(topology_st):
assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp016)
- assert _check_memberof(topology_st, member=memofuser1, group=memofegrp017)
- assert _check_memberof(topology_st, member=memofuser1, group=memofegrp018)
+ assert _check_memberof(topology_st, member=memofuser1, group=memofegrp017)
+ assert _check_memberof(topology_st, member=memofuser1, group=memofegrp018)
- mods = [(ldap.MOD_DELETE, 'member', memofuser1),(ldap.MOD_DELETE, 'uniqueMember', memofuser1)]
+ mods = [(ldap.MOD_DELETE, 'member', memofuser1), (ldap.MOD_DELETE, 'uniqueMember', memofuser1)]
log.info("Update %s is no longer memberof %s (member)" % (memofuser1, memofegrp018))
log.info("Update %s is no longer memberof %s (uniqueMember)" % (memofuser1, memofegrp018))
topology_st.standalone.modify_s(memofegrp018, mods)
@@ -1139,7 +1157,7 @@ def test_memberof_MultiGrpAttr_018(topology_st):
assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp016)
- assert _check_memberof(topology_st, member=memofuser1, group=memofegrp017)
+ assert _check_memberof(topology_st, member=memofuser1, group=memofegrp017)
assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp018)
# DEL user1, user2, user3, grp17
@@ -1155,11 +1173,11 @@ def test_memberof_MultiGrpAttr_018(topology_st):
# - not grp15
# - grp16 (member uniquemember)
# - not grp018
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018)
# assert enh2 is member of
@@ -1170,12 +1188,13 @@ def test_memberof_MultiGrpAttr_018(topology_st):
# - not grp16
# - not grp018
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018)
+
def test_memberof_MultiGrpAttr_019(topology_st):
"""
Add user2 to grp19_2
@@ -1235,11 +1254,11 @@ def test_memberof_MultiGrpAttr_019(topology_st):
# - not grp15
# - grp16 (member uniquemember)
# - not grp018
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018)
# assert enh2 is member of
@@ -1250,8 +1269,8 @@ def test_memberof_MultiGrpAttr_019(topology_st):
# - not grp16
# - not grp018
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018)
@@ -1314,7 +1333,7 @@ def test_memberof_MultiGrpAttr_019(topology_st):
assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp018)
- assert _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_1)
+ assert _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_1)
assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_2)
assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_3)
@@ -1334,7 +1353,7 @@ def test_memberof_MultiGrpAttr_019(topology_st):
assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp018)
- assert _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_1)
+ assert _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_1)
assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_2)
assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_3)
@@ -1354,11 +1373,11 @@ def test_memberof_MultiGrpAttr_019(topology_st):
assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp018)
- assert _check_memberof(topology_st, member=memofuser2, group=memofegrp019_1)
- assert _check_memberof(topology_st, member=memofuser2, group=memofegrp019_2)
+ assert _check_memberof(topology_st, member=memofuser2, group=memofegrp019_1)
+ assert _check_memberof(topology_st, member=memofuser2, group=memofegrp019_2)
assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp019_3)
- # assert memofuser3 is member of
+ # assert memofuser3 is member of
# - not grp1
# - not grp2
# - not grp3
@@ -1374,9 +1393,9 @@ def test_memberof_MultiGrpAttr_019(topology_st):
assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp018)
- assert _check_memberof(topology_st, member=memofuser3, group=memofegrp019_1)
+ assert _check_memberof(topology_st, member=memofuser3, group=memofegrp019_1)
assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp019_2)
- assert _check_memberof(topology_st, member=memofuser3, group=memofegrp019_3)
+ assert _check_memberof(topology_st, member=memofuser3, group=memofegrp019_3)
# DEL user2, user3, grp19*
topology_st.standalone.delete_s(memofuser2)
@@ -1385,7 +1404,6 @@ def test_memberof_MultiGrpAttr_019(topology_st):
topology_st.standalone.delete_s(memofegrp019_2)
topology_st.standalone.delete_s(memofegrp019_3)
-
# assert enh1 is member of
# - grp1 (member)
# - not grp2
@@ -1393,11 +1411,11 @@ def test_memberof_MultiGrpAttr_019(topology_st):
# - not grp15
# - grp16 (member uniquemember)
# - not grp018
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018)
# assert enh2 is member of
@@ -1408,12 +1426,13 @@ def test_memberof_MultiGrpAttr_019(topology_st):
# - not grp16
# - not grp018
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018)
+
def test_memberof_MultiGrpAttr_020(topology_st):
"""
Add user1 and grp[1-5]
@@ -1459,11 +1478,11 @@ def test_memberof_MultiGrpAttr_020(topology_st):
# - not grp15
# - grp16 (member uniquemember)
# - not grp018
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018)
# assert enh2 is member of
@@ -1474,8 +1493,8 @@ def test_memberof_MultiGrpAttr_020(topology_st):
# - not grp16
# - not grp018
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018)
@@ -1504,17 +1523,18 @@ def test_memberof_MultiGrpAttr_020(topology_st):
mods.append((ldap.MOD_ADD, 'member', grp))
topology_st.standalone.modify_s(memofegrp020_5, mods)
- assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1)
- assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_2)
- assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_3)
- assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_4)
- assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_5)
+ assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1)
+ assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_2)
+ assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_3)
+ assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_4)
+ assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_5)
# DEL user1, grp20*
topology_st.standalone.delete_s(memofuser1)
for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5]:
topology_st.standalone.delete_s(grp)
+
def test_memberof_MultiGrpAttr_021(topology_st):
"""
Add user[1-4] and Grp[1-4]
@@ -1584,11 +1604,11 @@ def test_memberof_MultiGrpAttr_021(topology_st):
# - not grp15
# - grp16 (member uniquemember)
# - not grp018
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018)
# assert enh2 is member of
@@ -1599,8 +1619,8 @@ def test_memberof_MultiGrpAttr_021(topology_st):
# - not grp16
# - not grp018
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018)
@@ -1617,14 +1637,13 @@ def test_memberof_MultiGrpAttr_021(topology_st):
memofegrp020_2 = _create_group(topology_st, 'memofegrp020_2')
memofegrp020_3 = _create_group(topology_st, 'memofegrp020_3')
memofegrp020_4 = _create_group(topology_st, 'memofegrp020_4')
- for x in [(memofegrp020_1,memofuser1),
- (memofegrp020_2, memofuser2),
- (memofegrp020_3, memofuser3),
- (memofegrp020_4, memofuser4)]:
+ for x in [(memofegrp020_1, memofuser1),
+ (memofegrp020_2, memofuser2),
+ (memofegrp020_3, memofuser3),
+ (memofegrp020_4, memofuser4)]:
mods = [(ldap.MOD_ADD, 'objectClass', 'inetUser'), (ldap.MOD_ADD, 'uniqueMember', x[1])]
topology_st.standalone.modify_s(x[0], mods)
-
# create grp5 with grp[1-4] as member + user1
memofegrp020_5 = _create_group(topology_st, 'memofegrp020_5')
mods = [(ldap.MOD_ADD, 'member', memofuser1)]
@@ -1634,32 +1653,31 @@ def test_memberof_MultiGrpAttr_021(topology_st):
# assert user[1-4] are member of grp20_5
for user in [memofuser1, memofuser2, memofuser3, memofuser4]:
- assert _check_memberof(topology_st, member=user, group=memofegrp020_5)
+ assert _check_memberof(topology_st, member=user, group=memofegrp020_5)
# assert userX is uniqueMember of grpX
- assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1)
- assert _check_memberof(topology_st, member=memofuser2, group=memofegrp020_2)
- assert _check_memberof(topology_st, member=memofuser3, group=memofegrp020_3)
- assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_4)
+ assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1)
+ assert _check_memberof(topology_st, member=memofuser2, group=memofegrp020_2)
+ assert _check_memberof(topology_st, member=memofuser3, group=memofegrp020_3)
+ assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_4)
# check that user[1-4] is only 'uniqueMember' of the grp20_[1-4]
- for x in [(memofegrp020_1,memofuser1),
- (memofegrp020_2, memofuser2),
- (memofegrp020_3, memofuser3),
- (memofegrp020_4, memofuser4)]:
- assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1])
+ for x in [(memofegrp020_1, memofuser1),
+ (memofegrp020_2, memofuser2),
+ (memofegrp020_3, memofuser3),
+ (memofegrp020_4, memofuser4)]:
+ assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1])
assert not _check_memberattr(topology_st, x[0], 'member', x[1])
# check that grp20_[1-4] are only 'member' of grp20_5
# check that user1 are only 'member' of grp20_5
for x in [memofuser1, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]:
- assert _check_memberattr(topology_st, memofegrp020_5, 'member', x)
+ assert _check_memberattr(topology_st, memofegrp020_5, 'member', x)
assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x)
for user in [memofuser2, memofuser3, memofuser4]:
assert not _check_memberattr(topology_st, memofegrp020_5, 'member', user)
assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', user)
-
# assert enh1 is member of
# - grp1 (member)
# - not grp2
@@ -1668,11 +1686,11 @@ def test_memberof_MultiGrpAttr_021(topology_st):
# - grp16 (member uniquemember)
# - not grp018
# - not grp20*
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_2)
@@ -1689,8 +1707,8 @@ def test_memberof_MultiGrpAttr_021(topology_st):
# - not grp018
# - not grp20*
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018)
@@ -1778,13 +1796,13 @@ def test_memberof_MultiGrpAttr_022(topology_st):
# assert user[1-4] are member of grp20_5
for user in [memofuser1, memofuser2, memofuser3, memofuser4]:
- assert _check_memberof(topology_st, member=user, group=memofegrp020_5)
+ assert _check_memberof(topology_st, member=user, group=memofegrp020_5)
# assert userX is member of grpX
- assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1)
- assert _check_memberof(topology_st, member=memofuser2, group=memofegrp020_2)
- assert _check_memberof(topology_st, member=memofuser3, group=memofegrp020_3)
- assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_4)
+ assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1)
+ assert _check_memberof(topology_st, member=memofuser2, group=memofegrp020_2)
+ assert _check_memberof(topology_st, member=memofuser3, group=memofegrp020_3)
+ assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_4)
# assert enh1 is member of
# - grp1 (member)
@@ -1794,11 +1812,11 @@ def test_memberof_MultiGrpAttr_022(topology_st):
# - grp16 (member uniquemember)
# - not grp018
# - not grp20*
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_2)
@@ -1815,8 +1833,8 @@ def test_memberof_MultiGrpAttr_022(topology_st):
# - not grp018
# - not grp20*
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018)
@@ -1827,17 +1845,17 @@ def test_memberof_MultiGrpAttr_022(topology_st):
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_5)
# check that user[1-4] is only 'uniqueMember' of the grp20_[1-4]
- for x in [(memofegrp020_1,memofuser1),
- (memofegrp020_2, memofuser2),
- (memofegrp020_3, memofuser3),
- (memofegrp020_4, memofuser4)]:
- assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1])
+ for x in [(memofegrp020_1, memofuser1),
+ (memofegrp020_2, memofuser2),
+ (memofegrp020_3, memofuser3),
+ (memofegrp020_4, memofuser4)]:
+ assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1])
assert not _check_memberattr(topology_st, x[0], 'member', x[1])
# check that grp20_[1-4] are only 'member' of grp20_5
# check that user1 is only 'member' of grp20_5
for x in [memofuser1, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]:
- assert _check_memberattr(topology_st, memofegrp020_5, 'member', x)
+ assert _check_memberattr(topology_st, memofegrp020_5, 'member', x)
assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x)
# check that user2-4 are neither 'member' nor 'uniquemember' of grp20_5
@@ -1845,37 +1863,36 @@ def test_memberof_MultiGrpAttr_022(topology_st):
assert not _check_memberattr(topology_st, memofegrp020_5, 'member', user)
assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', user)
-
# add userX (member) to grpX
- for x in [(memofegrp020_1,memofuser1),
- (memofegrp020_2, memofuser2),
- (memofegrp020_3, memofuser3),
- (memofegrp020_4, memofuser4)]:
+ for x in [(memofegrp020_1, memofuser1),
+ (memofegrp020_2, memofuser2),
+ (memofegrp020_3, memofuser3),
+ (memofegrp020_4, memofuser4)]:
mods = [(ldap.MOD_ADD, 'member', x[1])]
topology_st.standalone.modify_s(x[0], mods)
# check that user[1-4] are 'member' and 'uniqueMember' of the grp20_[1-4]
- for x in [(memofegrp020_1,memofuser1),
- (memofegrp020_2, memofuser2),
- (memofegrp020_3, memofuser3),
- (memofegrp020_4, memofuser4)]:
- assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1])
- assert _check_memberattr(topology_st, x[0], 'member', x[1])
+ for x in [(memofegrp020_1, memofuser1),
+ (memofegrp020_2, memofuser2),
+ (memofegrp020_3, memofuser3),
+ (memofegrp020_4, memofuser4)]:
+ assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1])
+ assert _check_memberattr(topology_st, x[0], 'member', x[1])
# add Grp[1-4] (uniqueMember) to grp5
# it creates a membership loop !!!
- mods=[(ldap.MOD_ADD, 'uniqueMember', memofegrp020_5)]
+ mods = [(ldap.MOD_ADD, 'uniqueMember', memofegrp020_5)]
for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]:
topology_st.standalone.modify_s(grp, mods)
time.sleep(5)
# assert user[1-4] are member of grp20_[1-4]
for user in [memofuser1, memofuser2, memofuser3, memofuser4]:
- assert _check_memberof(topology_st, member=user, group=memofegrp020_5)
- assert _check_memberof(topology_st, member=user, group=memofegrp020_4)
- assert _check_memberof(topology_st, member=user, group=memofegrp020_3)
- assert _check_memberof(topology_st, member=user, group=memofegrp020_2)
- assert _check_memberof(topology_st, member=user, group=memofegrp020_1)
+ assert _check_memberof(topology_st, member=user, group=memofegrp020_5)
+ assert _check_memberof(topology_st, member=user, group=memofegrp020_4)
+ assert _check_memberof(topology_st, member=user, group=memofegrp020_3)
+ assert _check_memberof(topology_st, member=user, group=memofegrp020_2)
+ assert _check_memberof(topology_st, member=user, group=memofegrp020_1)
# assert that all groups are members of each others because Grp5
# is member of all grp20_[1-4]
@@ -1885,17 +1902,16 @@ def test_memberof_MultiGrpAttr_022(topology_st):
# no member of itself
assert not _check_memberof(topology_st, member=grp, group=owner)
else:
- assert _check_memberof(topology_st, member=grp, group=owner)
+ assert _check_memberof(topology_st, member=grp, group=owner)
for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]:
- assert _check_memberof(topology_st, member=grp, group=memofegrp020_5)
-
+ assert _check_memberof(topology_st, member=grp, group=memofegrp020_5)
# assert userX is uniqueMember of grpX
- assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1)
- assert _check_memberof(topology_st, member=memofuser2, group=memofegrp020_2)
- assert _check_memberof(topology_st, member=memofuser3, group=memofegrp020_3)
- assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_4)
- assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_5)
+ assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1)
+ assert _check_memberof(topology_st, member=memofuser2, group=memofegrp020_2)
+ assert _check_memberof(topology_st, member=memofuser3, group=memofegrp020_3)
+ assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_4)
+ assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_5)
# assert enh1 is member of
# - grp1 (member)
@@ -1905,11 +1921,11 @@ def test_memberof_MultiGrpAttr_022(topology_st):
# - grp16 (member uniquemember)
# - not grp018
# - not grp20*
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_2)
@@ -1926,8 +1942,8 @@ def test_memberof_MultiGrpAttr_022(topology_st):
# - not grp018
# - not grp20*
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018)
@@ -1937,7 +1953,9 @@ def test_memberof_MultiGrpAttr_022(topology_st):
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_4)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_5)
-def verify_post_023(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, memofuser1, memofuser2, memofuser3, memofuser4):
+
+def verify_post_023(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5,
+ memofuser1, memofuser2, memofuser3, memofuser4):
"""
/----member ---> G1 ---uniqueMember -------\
/ V
@@ -1952,34 +1970,36 @@ def verify_post_023(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3,
|<--uniquemember-/
"""
for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]:
- assert _check_memberattr(topology_st, memofegrp020_5, 'member', x)
+ assert _check_memberattr(topology_st, memofegrp020_5, 'member', x)
assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x)
for x in [memofegrp020_2, memofegrp020_3, memofegrp020_4]:
assert not _check_memberattr(topology_st, x, 'member', memofegrp020_5)
- assert _check_memberattr(topology_st, x, 'uniqueMember', memofegrp020_5)
+ assert _check_memberattr(topology_st, x, 'uniqueMember', memofegrp020_5)
# check that user[1-4] is only 'uniqueMember' of the grp20_[1-4]
for x in [(memofegrp020_2, memofuser2),
- (memofegrp020_3, memofuser3),
- (memofegrp020_4, memofuser4)]:
- assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1])
- assert _check_memberattr(topology_st, x[0], 'member', x[1])
- assert _check_memberattr(topology_st, memofegrp020_1, 'uniqueMember', memofuser1)
+ (memofegrp020_3, memofuser3),
+ (memofegrp020_4, memofuser4)]:
+ assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1])
+ assert _check_memberattr(topology_st, x[0], 'member', x[1])
+ assert _check_memberattr(topology_st, memofegrp020_1, 'uniqueMember', memofuser1)
assert not _check_memberattr(topology_st, memofegrp020_1, 'member', memofuser1)
assert not _check_memberattr(topology_st, memofegrp020_1, 'uniqueMember', memofegrp020_5)
assert not _check_memberattr(topology_st, memofegrp020_1, 'member', memofegrp020_5)
- for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofuser1, memofuser2, memofuser3, memofuser4]:
- assert _check_memberof(topology_st, member=x, group=memofegrp020_5)
+ for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofuser1, memofuser2, memofuser3,
+ memofuser4]:
+ assert _check_memberof(topology_st, member=x, group=memofegrp020_5)
for x in [memofegrp020_2, memofegrp020_3, memofegrp020_4]:
- assert _check_memberof(topology_st, member=memofegrp020_5, group=x)
+ assert _check_memberof(topology_st, member=memofegrp020_5, group=x)
for user in [memofuser1, memofuser2, memofuser3, memofuser4]:
- assert _check_memberof(topology_st, member=user, group=memofegrp020_5)
- assert _check_memberof(topology_st, member=user, group=memofegrp020_4)
- assert _check_memberof(topology_st, member=user, group=memofegrp020_3)
- assert _check_memberof(topology_st, member=user, group=memofegrp020_2)
+ assert _check_memberof(topology_st, member=user, group=memofegrp020_5)
+ assert _check_memberof(topology_st, member=user, group=memofegrp020_4)
+ assert _check_memberof(topology_st, member=user, group=memofegrp020_3)
+ assert _check_memberof(topology_st, member=user, group=memofegrp020_2)
for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5]:
- assert _check_memberof(topology_st, member=memofuser1, group=grp)
+ assert _check_memberof(topology_st, member=memofuser1, group=grp)
+
def test_memberof_MultiGrpAttr_023(topology_st):
"""
@@ -2061,11 +2081,11 @@ def test_memberof_MultiGrpAttr_023(topology_st):
# assert user[1-4] are member of grp20_[1-4]
for user in [memofuser1, memofuser2, memofuser3, memofuser4]:
- assert _check_memberof(topology_st, member=user, group=memofegrp020_5)
- assert _check_memberof(topology_st, member=user, group=memofegrp020_4)
- assert _check_memberof(topology_st, member=user, group=memofegrp020_3)
- assert _check_memberof(topology_st, member=user, group=memofegrp020_2)
- assert _check_memberof(topology_st, member=user, group=memofegrp020_1)
+ assert _check_memberof(topology_st, member=user, group=memofegrp020_5)
+ assert _check_memberof(topology_st, member=user, group=memofegrp020_4)
+ assert _check_memberof(topology_st, member=user, group=memofegrp020_3)
+ assert _check_memberof(topology_st, member=user, group=memofegrp020_2)
+ assert _check_memberof(topology_st, member=user, group=memofegrp020_1)
# assert that all groups are members of each others because Grp5
# is member of all grp20_[1-4]
@@ -2075,17 +2095,16 @@ def test_memberof_MultiGrpAttr_023(topology_st):
# no member of itself
assert not _check_memberof(topology_st, member=grp, group=owner)
else:
- assert _check_memberof(topology_st, member=grp, group=owner)
+ assert _check_memberof(topology_st, member=grp, group=owner)
for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]:
- assert _check_memberof(topology_st, member=grp, group=memofegrp020_5)
-
+ assert _check_memberof(topology_st, member=grp, group=memofegrp020_5)
# assert userX is uniqueMember of grpX
- assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1)
- assert _check_memberof(topology_st, member=memofuser2, group=memofegrp020_2)
- assert _check_memberof(topology_st, member=memofuser3, group=memofegrp020_3)
- assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_4)
- assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_5)
+ assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1)
+ assert _check_memberof(topology_st, member=memofuser2, group=memofegrp020_2)
+ assert _check_memberof(topology_st, member=memofuser3, group=memofegrp020_3)
+ assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_4)
+ assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_5)
# assert enh1 is member of
# - grp1 (member)
@@ -2095,11 +2114,11 @@ def test_memberof_MultiGrpAttr_023(topology_st):
# - grp16 (member uniquemember)
# - not grp018
# - not grp20*
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015)
- assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
+ assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_1)
assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_2)
@@ -2116,8 +2135,8 @@ def test_memberof_MultiGrpAttr_023(topology_st):
# - not grp018
# - not grp20*
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
- assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2)
+ assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016)
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018)
@@ -2128,19 +2147,19 @@ def test_memberof_MultiGrpAttr_023(topology_st):
assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_5)
# check that user[1-4] is only 'uniqueMember' of the grp20_[1-4]
- for x in [(memofegrp020_1,memofuser1),
- (memofegrp020_2, memofuser2),
- (memofegrp020_3, memofuser3),
- (memofegrp020_4, memofuser4)]:
- assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1])
- assert _check_memberattr(topology_st, x[0], 'member', x[1])
+ for x in [(memofegrp020_1, memofuser1),
+ (memofegrp020_2, memofuser2),
+ (memofegrp020_3, memofuser3),
+ (memofegrp020_4, memofuser4)]:
+ assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1])
+ assert _check_memberattr(topology_st, x[0], 'member', x[1])
# check that grp20_[1-4] are 'uniqueMember' and 'member' of grp20_5
# check that user1 is only 'member' of grp20_5
for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]:
- assert _check_memberattr(topology_st, memofegrp020_5, 'member', x)
+ assert _check_memberattr(topology_st, memofegrp020_5, 'member', x)
assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x)
- assert _check_memberattr(topology_st, memofegrp020_5, 'member', memofuser1)
+ assert _check_memberattr(topology_st, memofegrp020_5, 'member', memofuser1)
assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', memofuser1)
# DEL user1 as 'member' of grp20_1
@@ -2163,9 +2182,12 @@ def test_memberof_MultiGrpAttr_023(topology_st):
|----member ---> G4 ---member/uniqueMember -> U4
|<--uniquemember-/
"""
- verify_post_023(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, memofuser1, memofuser2, memofuser3, memofuser4)
+ verify_post_023(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5,
+ memofuser1, memofuser2, memofuser3, memofuser4)
+
-def verify_post_024(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, memofuser1, memofuser2, memofuser3, memofuser4):
+def verify_post_024(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5,
+ memofuser1, memofuser2, memofuser3, memofuser4):
"""
/----member ---> G1 ---member/uniqueMember -\
/ V
@@ -2180,33 +2202,35 @@ def verify_post_024(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3,
|<--uniquemember-/
"""
for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]:
- assert _check_memberattr(topology_st, memofegrp020_5, 'member', x)
+ assert _check_memberattr(topology_st, memofegrp020_5, 'member', x)
assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x)
for x in [memofegrp020_2, memofegrp020_3, memofegrp020_4]:
assert not _check_memberattr(topology_st, x, 'member', memofegrp020_5)
- assert _check_memberattr(topology_st, x, 'uniqueMember', memofegrp020_5)
+ assert _check_memberattr(topology_st, x, 'uniqueMember', memofegrp020_5)
# check that user[1-4] is only 'uniqueMember' of the grp20_[1-4]
for x in [(memofegrp020_1, memofuser1),
- (memofegrp020_2, memofuser2),
- (memofegrp020_3, memofuser3),
- (memofegrp020_4, memofuser4)]:
- assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1])
- assert _check_memberattr(topology_st, x[0], 'member', x[1])
+ (memofegrp020_2, memofuser2),
+ (memofegrp020_3, memofuser3),
+ (memofegrp020_4, memofuser4)]:
+ assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1])
+ assert _check_memberattr(topology_st, x[0], 'member', x[1])
assert not _check_memberattr(topology_st, memofegrp020_1, 'uniqueMember', memofegrp020_5)
assert not _check_memberattr(topology_st, memofegrp020_1, 'member', memofegrp020_5)
- for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofuser1, memofuser2, memofuser3, memofuser4]:
- assert _check_memberof(topology_st, member=x, group=memofegrp020_5)
+ for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofuser1, memofuser2, memofuser3,
+ memofuser4]:
+ assert _check_memberof(topology_st, member=x, group=memofegrp020_5)
for x in [memofegrp020_2, memofegrp020_3, memofegrp020_4]:
- assert _check_memberof(topology_st, member=memofegrp020_5, group=x)
+ assert _check_memberof(topology_st, member=memofegrp020_5, group=x)
for user in [memofuser1, memofuser2, memofuser3, memofuser4]:
- assert _check_memberof(topology_st, member=user, group=memofegrp020_5)
- assert _check_memberof(topology_st, member=user, group=memofegrp020_4)
- assert _check_memberof(topology_st, member=user, group=memofegrp020_3)
- assert _check_memberof(topology_st, member=user, group=memofegrp020_2)
+ assert _check_memberof(topology_st, member=user, group=memofegrp020_5)
+ assert _check_memberof(topology_st, member=user, group=memofegrp020_4)
+ assert _check_memberof(topology_st, member=user, group=memofegrp020_3)
+ assert _check_memberof(topology_st, member=user, group=memofegrp020_2)
for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5]:
- assert _check_memberof(topology_st, member=memofuser1, group=grp)
+ assert _check_memberof(topology_st, member=memofuser1, group=grp)
+
def test_memberof_MultiGrpAttr_024(topology_st):
"""
@@ -2249,15 +2273,18 @@ def test_memberof_MultiGrpAttr_024(topology_st):
memofegrp020_3 = _get_group_dn('memofegrp020_3')
memofegrp020_4 = _get_group_dn('memofegrp020_4')
memofegrp020_5 = _get_group_dn('memofegrp020_5')
- verify_post_023(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, memofuser1, memofuser2, memofuser3, memofuser4)
+ verify_post_023(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5,
+ memofuser1, memofuser2, memofuser3, memofuser4)
# ADD user1 as 'member' of grp20_1
mods = [(ldap.MOD_ADD, 'member', memofuser1)]
topology_st.standalone.modify_s(memofegrp020_1, mods)
- verify_post_024(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, memofuser1, memofuser2, memofuser3, memofuser4)
+ verify_post_024(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5,
+ memofuser1, memofuser2, memofuser3, memofuser4)
-def verify_post_025(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, memofuser1, memofuser2, memofuser3, memofuser4):
+def verify_post_025(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5,
+ memofuser1, memofuser2, memofuser3, memofuser4):
"""
/----member ---> G1
/
@@ -2269,24 +2296,24 @@ def verify_post_025(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3,
"""
for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]:
- assert _check_memberattr(topology_st, memofegrp020_5, 'member', x)
+ assert _check_memberattr(topology_st, memofegrp020_5, 'member', x)
assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x)
for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]:
assert not _check_memberattr(topology_st, x, 'member', memofegrp020_5)
assert not _check_memberattr(topology_st, x, 'uniqueMember', memofegrp020_5)
# check that user[1-4] is only 'uniqueMember' of the grp20_[1-4]
for x in [(memofegrp020_1, memofuser1),
- (memofegrp020_2, memofuser2),
- (memofegrp020_3, memofuser3),
- (memofegrp020_4, memofuser4)]:
+ (memofegrp020_2, memofuser2),
+ (memofegrp020_3, memofuser3),
+ (memofegrp020_4, memofuser4)]:
assert not _check_memberattr(topology_st, x[0], 'uniqueMember', x[1])
assert not _check_memberattr(topology_st, x[0], 'member', x[1])
for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofuser1]:
- assert _check_memberof(topology_st, member=x, group=memofegrp020_5)
+ assert _check_memberof(topology_st, member=x, group=memofegrp020_5)
for x in [memofuser2, memofuser3, memofuser4]:
- assert not _check_memberof(topology_st, member=x, group=memofegrp020_5)
- assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_5)
+ assert not _check_memberof(topology_st, member=x, group=memofegrp020_5)
+ assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_5)
for user in [memofuser1, memofuser2, memofuser3, memofuser4]:
for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]:
assert not _check_memberof(topology_st, member=user, group=grp)
@@ -2329,16 +2356,17 @@ def test_memberof_MultiGrpAttr_025(topology_st):
memofegrp020_3 = _get_group_dn('memofegrp020_3')
memofegrp020_4 = _get_group_dn('memofegrp020_4')
memofegrp020_5 = _get_group_dn('memofegrp020_5')
- verify_post_024(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, memofuser1, memofuser2, memofuser3, memofuser4)
+ verify_post_024(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5,
+ memofuser1, memofuser2, memofuser3, memofuser4)
# ADD inet
- #for user in [memofuser1, memofuser2, memofuser3, memofuser4]:
+ # for user in [memofuser1, memofuser2, memofuser3, memofuser4]:
# mods = [(ldap.MOD_ADD, 'objectClass', 'inetUser')]
# topology_st.standalone.modify_s(user, mods)
for x in [(memofegrp020_1, memofuser1),
- (memofegrp020_2, memofuser2),
- (memofegrp020_3, memofuser3),
- (memofegrp020_4, memofuser4)]:
+ (memofegrp020_2, memofuser2),
+ (memofegrp020_3, memofuser3),
+ (memofegrp020_4, memofuser4)]:
mods = [(ldap.MOD_DELETE, 'member', x[1]),
(ldap.MOD_DELETE, 'uniqueMember', x[1])]
topology_st.standalone.modify_s(x[0], mods)
@@ -2370,9 +2398,8 @@ def test_memberof_MultiGrpAttr_025(topology_st):
"""
- verify_post_025(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, memofuser1, memofuser2, memofuser3, memofuser4)
-
-
+ verify_post_025(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5,
+ memofuser1, memofuser2, memofuser3, memofuser4)
def test_memberof_auto_add_oc(topology_st):
diff --git a/dirsrvtests/tests/suites/memory_leaks/range_search_test.py b/dirsrvtests/tests/suites/memory_leaks/range_search_test.py
index 0c8e100..b35d2a9 100644
--- a/dirsrvtests/tests/suites/memory_leaks/range_search_test.py
+++ b/dirsrvtests/tests/suites/memory_leaks/range_search_test.py
@@ -6,16 +6,7 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
from lib389.topologies import topology_st
diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
index 774b255..f01bbc8 100644
--- a/dirsrvtests/tests/suites/paged_results/paged_results_test.py
+++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py
@@ -6,21 +6,17 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import time
-import ldap
-import logging
-import pytest
from random import sample
+
+import pytest
from ldap.controls import SimplePagedResultsControl, GetEffectiveRightsControl
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
from lib389.topologies import topology_st
+
from sss_control import SSSRequestControl
-DEBUGGING = False
+DEBUGGING = os.getenv('DEBUGGING', False)
if DEBUGGING:
logging.getLogger(__name__).setLevel(logging.DEBUG)
diff --git a/dirsrvtests/tests/suites/paged_results/sss_control.py b/dirsrvtests/tests/suites/paged_results/sss_control.py
index 58cd6c5..4598b64 100644
--- a/dirsrvtests/tests/suites/paged_results/sss_control.py
+++ b/dirsrvtests/tests/suites/paged_results/sss_control.py
@@ -11,14 +11,10 @@ __all__ = [
'SSSResponseControl',
]
-
-import ldap
-from ldap.ldapobject import LDAPObject
from ldap.controls import (RequestControl, ResponseControl,
- KNOWN_RESPONSE_CONTROLS, DecodeControlTuples)
-
-from pyasn1.type import univ, namedtype, tag, namedval, constraint
+ KNOWN_RESPONSE_CONTROLS)
from pyasn1.codec.ber import encoder, decoder
+from pyasn1.type import univ, namedtype, tag, namedval, constraint
# SortKeyList ::= SEQUENCE OF SEQUENCE {
@@ -29,14 +25,14 @@ from pyasn1.codec.ber import encoder, decoder
class SortKeyType(univ.Sequence):
componentType = namedtype.NamedTypes(
- namedtype.NamedType('attributeType', univ.OctetString()),
- namedtype.OptionalNamedType('orderingRule',
- univ.OctetString().subtype(
- implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)
- )
- ),
- namedtype.DefaultedNamedType('reverseOrder', univ.Boolean(False).subtype(
- implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))))
+ namedtype.NamedType('attributeType', univ.OctetString()),
+ namedtype.OptionalNamedType('orderingRule',
+ univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)
+ )
+ ),
+ namedtype.DefaultedNamedType('reverseOrder', univ.Boolean(False).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))))
class SortKeyListType(univ.SequenceOf):
@@ -50,11 +46,11 @@ class SSSRequestControl(RequestControl):
controlType = '1.2.840.113556.1.4.473'
def __init__(
- self,
- criticality=False,
- ordering_rules=None,
+ self,
+ criticality=False,
+ ordering_rules=None,
):
- RequestControl.__init__(self,self.controlType,criticality)
+ RequestControl.__init__(self, self.controlType, criticality)
self.ordering_rules = ordering_rules
if isinstance(ordering_rules, basestring):
ordering_rules = [ordering_rules]
@@ -87,33 +83,33 @@ class SSSRequestControl(RequestControl):
class SortResultType(univ.Sequence):
componentType = namedtype.NamedTypes(
- namedtype.NamedType('sortResult', univ.Enumerated().subtype(
- namedValues=namedval.NamedValues(
- ('success', 0),
- ('operationsError', 1),
- ('timeLimitExceeded', 3),
- ('strongAuthRequired', 8),
- ('adminLimitExceeded', 11),
- ('noSuchAttribute', 16),
- ('inappropriateMatching', 18),
- ('insufficientAccessRights', 50),
- ('busy', 51),
- ('unwillingToPerform', 53),
- ('other', 80)),
- subtypeSpec=univ.Enumerated.subtypeSpec + constraint.SingleValueConstraint(
- 0, 1, 3, 8, 11, 16, 18, 50, 51, 53, 80))),
- namedtype.OptionalNamedType('attributeType',
- univ.OctetString().subtype(
- implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)
- )
- ))
+ namedtype.NamedType('sortResult', univ.Enumerated().subtype(
+ namedValues=namedval.NamedValues(
+ ('success', 0),
+ ('operationsError', 1),
+ ('timeLimitExceeded', 3),
+ ('strongAuthRequired', 8),
+ ('adminLimitExceeded', 11),
+ ('noSuchAttribute', 16),
+ ('inappropriateMatching', 18),
+ ('insufficientAccessRights', 50),
+ ('busy', 51),
+ ('unwillingToPerform', 53),
+ ('other', 80)),
+ subtypeSpec=univ.Enumerated.subtypeSpec + constraint.SingleValueConstraint(
+ 0, 1, 3, 8, 11, 16, 18, 50, 51, 53, 80))),
+ namedtype.OptionalNamedType('attributeType',
+ univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)
+ )
+ ))
class SSSResponseControl(ResponseControl):
controlType = '1.2.840.113556.1.4.474'
- def __init__(self,criticality=False):
- ResponseControl.__init__(self,self.controlType,criticality)
+ def __init__(self, criticality=False):
+ ResponseControl.__init__(self, self.controlType, criticality)
def decodeControlValue(self, encoded):
p, rest = decoder.decode(encoded, asn1Spec=SortResultType())
diff --git a/dirsrvtests/tests/suites/password/password_test.py b/dirsrvtests/tests/suites/password/password_test.py
index f450c9a..e61ff7b 100644
--- a/dirsrvtests/tests/suites/password/password_test.py
+++ b/dirsrvtests/tests/suites/password/password_test.py
@@ -6,16 +6,9 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
import logging
+
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.topologies import topology_st
diff --git a/dirsrvtests/tests/suites/password/pwdAdmin_test.py b/dirsrvtests/tests/suites/password/pwdAdmin_test.py
index 46991d8..bbe77d0 100644
--- a/dirsrvtests/tests/suites/password/pwdAdmin_test.py
+++ b/dirsrvtests/tests/suites/password/pwdAdmin_test.py
@@ -6,16 +6,7 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
from lib389.topologies import topology_st
diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
index a359b65..c0ee044 100644
--- a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
+++ b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
@@ -6,15 +6,7 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import time
-import subprocess
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
from lib389.topologies import topology_st
diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py
index 5a51124..38a7950 100644
--- a/dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py
+++ b/dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py
@@ -6,19 +6,14 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
+import logging
+import subprocess
import time
+
import ldap
-import logging
import pytest
-import shutil
-import subprocess
-from lib389 import DirSrv, Entry, tools
-from lib389 import DirSrvTools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
-from lib389.properties import *
from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.INFO)
diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py
index 7336c4d..93c94fb 100644
--- a/dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py
+++ b/dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py
@@ -6,14 +6,9 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import time
-import ldap
import logging
+
import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.topologies import topology_st
diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py
index fd0236c..468131d 100644
--- a/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py
+++ b/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py
@@ -6,18 +6,8 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-import subprocess
-from lib389 import DirSrv, Entry, tools, tasks
from ldap.controls.ppolicy import PasswordPolicyControl
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
from lib389.topologies import topology_st
diff --git a/dirsrvtests/tests/suites/password/pwd_algo_test.py b/dirsrvtests/tests/suites/password/pwd_algo_test.py
index b31f1ec..4b3fb33 100644
--- a/dirsrvtests/tests/suites/password/pwd_algo_test.py
+++ b/dirsrvtests/tests/suites/password/pwd_algo_test.py
@@ -6,16 +6,7 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
from lib389.topologies import topology_st
diff --git a/dirsrvtests/tests/suites/password/pwp_history_test.py b/dirsrvtests/tests/suites/password/pwp_history_test.py
index 31d48f9..2e0fb64 100644
--- a/dirsrvtests/tests/suites/password/pwp_history_test.py
+++ b/dirsrvtests/tests/suites/password/pwp_history_test.py
@@ -6,14 +6,7 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import ldap
-import logging
import pytest
-import time
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
from lib389.topologies import topology_st
diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_test.py
index 4f9ac46..22e81b7 100644
--- a/dirsrvtests/tests/suites/replication/cleanallruv_test.py
+++ b/dirsrvtests/tests/suites/replication/cleanallruv_test.py
@@ -6,18 +6,9 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
import threading
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389.repltools import ReplTools
-from lib389._constants import *
-from lib389.properties import *
+
+import pytest
from lib389.tasks import *
from lib389.utils import *
from lib389.topologies import topology_m4
diff --git a/dirsrvtests/tests/suites/replication/tombstone_test.py b/dirsrvtests/tests/suites/replication/tombstone_test.py
index bc06cd4..83a72a3 100644
--- a/dirsrvtests/tests/suites/replication/tombstone_test.py
+++ b/dirsrvtests/tests/suites/replication/tombstone_test.py
@@ -27,9 +27,9 @@ def test_purge_success(topology_st):
log.info("Add and then delete an entry to create a tombstone...")
try:
topology_st.standalone.add_s(Entry(('cn=entry1,dc=example,dc=com', {
- 'objectclass': 'top person'.split(),
- 'sn': 'user',
- 'cn': 'entry1'})))
+ 'objectclass': 'top person'.split(),
+ 'sn': 'user',
+ 'cn': 'entry1'})))
except ldap.LDAPError as e:
log.error('Failed to add entry: {}'.format(e.message['desc']))
assert False
@@ -43,7 +43,7 @@ def test_purge_success(topology_st):
log.info('Search for tombstone entries...')
try:
entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
- '(objectclass=nsTombstone)')
+ '(objectclass=nsTombstone)')
assert entries
except ldap.LDAPError as e:
log.fatal('Search failed: {}'.format(e.message['desc']))
@@ -55,4 +55,3 @@ if __name__ == '__main__':
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
diff --git a/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py b/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py
index b79784e..cfe92ab 100644
--- a/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py
+++ b/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py
@@ -6,19 +6,11 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
+from collections import Counter
+
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
-from collections import Counter
from lib389.topologies import topology_m2
logging.getLogger(__name__).setLevel(logging.DEBUG)
diff --git a/dirsrvtests/tests/suites/rootdn_plugin/rootdn_plugin_test.py b/dirsrvtests/tests/suites/rootdn_plugin/rootdn_plugin_test.py
index 929f0e9..0775b0a 100644
--- a/dirsrvtests/tests/suites/rootdn_plugin/rootdn_plugin_test.py
+++ b/dirsrvtests/tests/suites/rootdn_plugin/rootdn_plugin_test.py
@@ -6,18 +6,12 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
import logging
-import pytest
import socket
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
+
+import pytest
from lib389.tasks import *
+from lib389.tools import DirSrvTools
from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
@@ -62,8 +56,8 @@ def test_rootdn_init(topology_st):
#
try:
topology_st.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(),
- 'uid': 'user1',
- 'userpassword': PASSWORD})))
+ 'uid': 'user1',
+ 'userpassword': PASSWORD})))
except ldap.LDAPError as e:
log.fatal('test_rootdn_init: Failed to add test user ' + USER1_DN + ': error ' +
e.message['desc'])
@@ -104,7 +98,7 @@ def test_rootdn_access_specific_time(topology_st):
try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-open-time', open_time),
- (ldap.MOD_ADD, 'rootdn-close-time', close_time)])
+ (ldap.MOD_ADD, 'rootdn-close-time', close_time)])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_specific_time: Failed to set (blocking) open/close times: error ' +
e.message['desc'])
@@ -134,7 +128,7 @@ def test_rootdn_access_specific_time(topology_st):
try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'),
- (ldap.MOD_REPLACE, 'rootdn-close-time', '2359')])
+ (ldap.MOD_REPLACE, 'rootdn-close-time', '2359')])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_specific_time: Failed to set (open) open/close times: error ' +
e.message['desc'])
@@ -152,7 +146,7 @@ def test_rootdn_access_specific_time(topology_st):
#
try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-open-time', None),
- (ldap.MOD_DELETE, 'rootdn-close-time', None)])
+ (ldap.MOD_DELETE, 'rootdn-close-time', None)])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_specific_time: Failed to delete open and close time: error ' +
e.message['desc'])
@@ -198,7 +192,7 @@ def test_rootdn_access_day_of_week(topology_st):
#
try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed',
- deny_days)])
+ deny_days)])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_day_of_week: Failed to set the deny days: error ' +
e.message['desc'])
@@ -228,7 +222,7 @@ def test_rootdn_access_day_of_week(topology_st):
try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed',
- allow_days)])
+ allow_days)])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_day_of_week: Failed to set the deny days: error ' +
e.message['desc'])
@@ -270,11 +264,11 @@ def test_rootdn_access_denied_ip(topology_st):
try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
- 'rootdn-deny-ip',
- '127.0.0.1'),
- (ldap.MOD_ADD,
- 'rootdn-deny-ip',
- '::1')])
+ 'rootdn-deny-ip',
+ '127.0.0.1'),
+ (ldap.MOD_ADD,
+ 'rootdn-deny-ip',
+ '::1')])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_denied_ip: Failed to set rootDN plugin config: error ' +
e.message['desc'])
@@ -346,11 +340,11 @@ def test_rootdn_access_denied_host(topology_st):
localhost = DirSrvTools.getLocalhost()
try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD,
- 'rootdn-deny-host',
- hostname)])
+ 'rootdn-deny-host',
+ hostname)])
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD,
- 'rootdn-deny-host',
- localhost)])
+ 'rootdn-deny-host',
+ localhost)])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_denied_host: Failed to set deny host: error ' +
e.message['desc'])
@@ -453,7 +447,7 @@ def test_rootdn_access_allowed_ip(topology_st):
try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-ip', '127.0.0.1'),
- (ldap.MOD_ADD, 'rootdn-allow-ip', '::1')])
+ (ldap.MOD_ADD, 'rootdn-allow-ip', '::1')])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_allowed_ip: Failed to set allowed host: error ' +
e.message['desc'])
@@ -529,11 +523,11 @@ def test_rootdn_access_allowed_host(topology_st):
localhost = DirSrvTools.getLocalhost()
try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD,
- 'rootdn-allow-host',
- localhost)])
+ 'rootdn-allow-host',
+ localhost)])
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD,
- 'rootdn-allow-host',
- hostname)])
+ 'rootdn-allow-host',
+ hostname)])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_allowed_host: Failed to set allowed host: error ' +
e.message['desc'])
@@ -590,7 +584,7 @@ def test_rootdn_config_validate(topology_st):
try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-open-time', '0000'),
- (ldap.MOD_ADD, 'rootdn-open-time', '0001')])
+ (ldap.MOD_ADD, 'rootdn-open-time', '0001')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add multiple "rootdn-open-time"')
assert False
except ldap.LDAPError:
@@ -598,7 +592,7 @@ def test_rootdn_config_validate(topology_st):
try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '-1'),
- (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')])
+ (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-open-time: -1"')
assert False
except ldap.LDAPError:
@@ -606,7 +600,7 @@ def test_rootdn_config_validate(topology_st):
try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '2400'),
- (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')])
+ (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-open-time: 2400"')
assert False
except ldap.LDAPError:
@@ -614,7 +608,7 @@ def test_rootdn_config_validate(topology_st):
try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', 'aaaaa'),
- (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')])
+ (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-open-time: aaaaa"')
assert False
except ldap.LDAPError:
@@ -632,7 +626,7 @@ def test_rootdn_config_validate(topology_st):
try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-close-time', '0000'),
- (ldap.MOD_ADD, 'rootdn-close-time', '0001')])
+ (ldap.MOD_ADD, 'rootdn-close-time', '0001')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add multiple "rootdn-open-time"')
assert False
except ldap.LDAPError:
@@ -640,7 +634,7 @@ def test_rootdn_config_validate(topology_st):
try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'),
- (ldap.MOD_REPLACE, 'rootdn-close-time', '-1')])
+ (ldap.MOD_REPLACE, 'rootdn-close-time', '-1')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-close-time: -1"')
assert False
except ldap.LDAPError:
@@ -648,7 +642,7 @@ def test_rootdn_config_validate(topology_st):
try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'),
- (ldap.MOD_REPLACE, 'rootdn-close-time', '2400')])
+ (ldap.MOD_REPLACE, 'rootdn-close-time', '2400')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-close-time: 2400"')
assert False
except ldap.LDAPError:
@@ -656,7 +650,7 @@ def test_rootdn_config_validate(topology_st):
try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'),
- (ldap.MOD_REPLACE, 'rootdn-close-time', 'aaaaa')])
+ (ldap.MOD_REPLACE, 'rootdn-close-time', 'aaaaa')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-close-time: aaaaa"')
assert False
except ldap.LDAPError:
@@ -667,7 +661,7 @@ def test_rootdn_config_validate(topology_st):
#
try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-days-allowed', 'Mon'),
- (ldap.MOD_ADD, 'rootdn-days-allowed', 'Tue')])
+ (ldap.MOD_ADD, 'rootdn-days-allowed', 'Tue')])
log.fatal('test_rootdn_config_validate: Incorrectly allowed to add two "rootdn-days-allowed"')
assert False
except ldap.LDAPError:
diff --git a/dirsrvtests/tests/suites/schema/test_schema.py b/dirsrvtests/tests/suites/schema/test_schema.py
index ed13d91..123d6fc 100644
--- a/dirsrvtests/tests/suites/schema/test_schema.py
+++ b/dirsrvtests/tests/suites/schema/test_schema.py
@@ -11,19 +11,14 @@ Created on Dec 18, 2013
@author: rmeggins
'''
-import os
-import sys
-import time
+import logging
+
import ldap
+import pytest
import six
from ldap.cidict import cidict
from ldap.schema import SubSchema
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
from lib389._constants import *
-from lib389.properties import *
from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
@@ -49,7 +44,7 @@ def ochasattr(subschema, oc, mustormay, attr, key):
# look in parents
for noroid in oc.sup:
ocpar = subschema.get_obj(occlass, noroid)
- assert(ocpar)
+ assert (ocpar)
rc = ochasattr(subschema, ocpar, mustormay, attr, key)
if rc:
break
@@ -101,7 +96,7 @@ def atgetparfield(subschema, at, field):
v = None
for nameoroid in at.sup:
atpar = subschema.get_obj(attrclass, nameoroid)
- assert(atpar)
+ assert (atpar)
v = atpar.__dict__.get(field, atgetparfield(subschema, atpar, field))
if v is not None:
break
diff --git a/dirsrvtests/tests/tickets/finalizer.py b/dirsrvtests/tests/tickets/finalizer.py
deleted file mode 100644
index 690c76e..0000000
--- a/dirsrvtests/tests/tickets/finalizer.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2016 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-'''
-Created on Nov 5, 2013
-
-@author: tbordaz
-'''
-import os
-import sys
-import time
-import ldap
-import logging
-import socket
-import time
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
-from lib389._constants import DN_DM
-from lib389.properties import *
-
-log = logging.getLogger(__name__)
-
-def test_finalizer():
- # for each defined instance, remove it
- for args_instance in ALL_INSTANCES:
- instance = DirSrv(verbose=True)
- instance.allocate(args_instance)
- if instance.exists():
- instance.delete()
-
- # remove any existing backup for this instance
- instance.clearBackupFS()
-
-def run_isolated():
- '''
- run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
- To run isolated without py.test, you need to
- - set the installation prefix
- - run this program
- '''
- test_finalizer()
-
-if __name__ == '__main__':
- run_isolated()
-
diff --git a/dirsrvtests/tests/tickets/ticket1347760_test.py b/dirsrvtests/tests/tickets/ticket1347760_test.py
index 48643cc..e7fb07f 100644
--- a/dirsrvtests/tests/tickets/ticket1347760_test.py
+++ b/dirsrvtests/tests/tickets/ticket1347760_test.py
@@ -6,24 +6,17 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import time
-import ldap
-import logging
-import pytest
from subprocess import Popen
-from lib389 import DirSrv, Entry
+
+import pytest
from lib389.paths import Paths
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
CONFIG_DN = 'cn=config'
BOU = 'BOU'
BINDOU = 'ou=%s,%s' % (BOU, DEFAULT_SUFFIX)
@@ -40,43 +33,6 @@ GROUPOU = 'ou=groups,%s' % DEFAULT_SUFFIX
BOGUSOU = 'ou=OU,%s' % DEFAULT_SUFFIX
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-
def pattern_accesslog(file, log_pattern):
for i in range(5):
try:
@@ -186,49 +142,49 @@ def check_op_result(server, op, dn, superior, exists, rc):
log.info('PASSED\n')
-def test_ticket1347760(topology):
+def test_ticket1347760(topology_st):
"""
Prevent revealing the entry info to whom has no access rights.
"""
log.info('Testing Bug 1347760 - Information disclosure via repeated use of LDAP ADD operation, etc.')
log.info('Disabling accesslog logbuffering')
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-accesslog-logbuffering', 'off')])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-accesslog-logbuffering', 'off')])
log.info('Bind as {%s,%s}' % (DN_DM, PASSWORD))
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
log.info('Adding ou=%s a bind user belongs to.' % BOU)
- topology.standalone.add_s(Entry((BINDOU, {
- 'objectclass': 'top organizationalunit'.split(),
- 'ou': BOU})))
+ topology_st.standalone.add_s(Entry((BINDOU, {
+ 'objectclass': 'top organizationalunit'.split(),
+ 'ou': BOU})))
log.info('Adding a bind user.')
- topology.standalone.add_s(Entry((BINDDN,
- {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': 'bind user',
- 'sn': 'user',
- 'userPassword': BINDPW})))
+ topology_st.standalone.add_s(Entry((BINDDN,
+ {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': 'bind user',
+ 'sn': 'user',
+ 'userPassword': BINDPW})))
log.info('Adding a test user.')
- topology.standalone.add_s(Entry((TESTDN,
- {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': 'test user',
- 'sn': 'user',
- 'userPassword': TESTPW})))
+ topology_st.standalone.add_s(Entry((TESTDN,
+ {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': 'test user',
+ 'sn': 'user',
+ 'userPassword': TESTPW})))
log.info('Deleting aci in %s.' % DEFAULT_SUFFIX)
- topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', None)])
+ topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', None)])
log.info('While binding as DM, acquire an access log path')
- ds_paths = Paths(serverid=topology.standalone.serverid,
- instance=topology.standalone)
+ ds_paths = Paths(serverid=topology_st.standalone.serverid,
+ instance=topology_st.standalone)
file_path = ds_paths.access_log
log.info('Bind case 1. the bind user has no rights to read the entry itself, bind should be successful.')
log.info('Bind as {%s,%s} who has no access rights.' % (BINDDN, BINDPW))
try:
- topology.standalone.simple_bind_s(BINDDN, BINDPW)
+ topology_st.standalone.simple_bind_s(BINDDN, BINDPW)
except ldap.LDAPError as e:
log.info('Desc ' + e.message['desc'])
assert False
@@ -236,10 +192,11 @@ def test_ticket1347760(topology):
file_obj = open(file_path, "r")
log.info('Access log path: %s' % file_path)
- log.info('Bind case 2-1. the bind user does not exist, bind should fail with error %s' % ldap.INVALID_CREDENTIALS.__name__)
+ log.info(
+ 'Bind case 2-1. the bind user does not exist, bind should fail with error %s' % ldap.INVALID_CREDENTIALS.__name__)
log.info('Bind as {%s,%s} who does not exist.' % (BOGUSDN, 'bogus'))
try:
- topology.standalone.simple_bind_s(BOGUSDN, 'bogus')
+ topology_st.standalone.simple_bind_s(BOGUSDN, 'bogus')
except ldap.LDAPError as e:
log.info("Exception (expected): %s" % type(e).__name__)
log.info('Desc ' + e.message['desc'])
@@ -253,10 +210,11 @@ def test_ticket1347760(topology):
log.info('Cause found - %s' % cause)
time.sleep(1)
- log.info('Bind case 2-2. the bind user\'s suffix does not exist, bind should fail with error %s' % ldap.INVALID_CREDENTIALS.__name__)
+ log.info(
+ 'Bind case 2-2. the bind user\'s suffix does not exist, bind should fail with error %s' % ldap.INVALID_CREDENTIALS.__name__)
log.info('Bind as {%s,%s} who does not exist.' % (BOGUSSUFFIX, 'bogus'))
try:
- topology.standalone.simple_bind_s(BOGUSSUFFIX, 'bogus')
+ topology_st.standalone.simple_bind_s(BOGUSSUFFIX, 'bogus')
except ldap.LDAPError as e:
log.info("Exception (expected): %s" % type(e).__name__)
log.info('Desc ' + e.message['desc'])
@@ -270,10 +228,11 @@ def test_ticket1347760(topology):
log.info('Cause found - %s' % cause)
time.sleep(1)
- log.info('Bind case 2-3. the bind user\'s password is wrong, bind should fail with error %s' % ldap.INVALID_CREDENTIALS.__name__)
+ log.info(
+ 'Bind case 2-3. the bind user\'s password is wrong, bind should fail with error %s' % ldap.INVALID_CREDENTIALS.__name__)
log.info('Bind as {%s,%s} who does not exist.' % (BINDDN, 'bogus'))
try:
- topology.standalone.simple_bind_s(BINDDN, 'bogus')
+ topology_st.standalone.simple_bind_s(BINDDN, 'bogus')
except ldap.LDAPError as e:
log.info("Exception (expected): %s" % type(e).__name__)
log.info('Desc ' + e.message['desc'])
@@ -291,121 +250,139 @@ def test_ticket1347760(topology):
acival = '(targetattr="*")(version 3.0; acl "%s"; allow(all) userdn = "ldap:///%s";)' % (BUID, BINDDN)
log.info('aci: %s' % acival)
log.info('Bind as {%s,%s}' % (DN_DM, PASSWORD))
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(BINDOU, [(ldap.MOD_ADD, 'aci', acival)])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(BINDOU, [(ldap.MOD_ADD, 'aci', acival)])
time.sleep(1)
log.info('Bind case 3. the bind user has the right to read the entry itself, bind should be successful.')
log.info('Bind as {%s,%s} which should be ok.\n' % (BINDDN, BINDPW))
- topology.standalone.simple_bind_s(BINDDN, BINDPW)
+ topology_st.standalone.simple_bind_s(BINDDN, BINDPW)
log.info('The following operations are against the subtree the bind user %s has no rights.' % BINDDN)
# Search
exists = True
rc = ldap.SUCCESS
- log.info('Search case 1. the bind user has no rights to read the search entry, it should return no search results with %s' % rc)
- check_op_result(topology.standalone, 'search', TESTDN, None, exists, rc)
+ log.info(
+ 'Search case 1. the bind user has no rights to read the search entry, it should return no search results with %s' % rc)
+ check_op_result(topology_st.standalone, 'search', TESTDN, None, exists, rc)
exists = False
rc = ldap.SUCCESS
- log.info('Search case 2-1. the search entry does not exist, the search should return no search results with %s' % rc.__name__)
- check_op_result(topology.standalone, 'search', BOGUSDN, None, exists, rc)
+ log.info(
+ 'Search case 2-1. the search entry does not exist, the search should return no search results with %s' % rc.__name__)
+ check_op_result(topology_st.standalone, 'search', BOGUSDN, None, exists, rc)
exists = False
rc = ldap.SUCCESS
- log.info('Search case 2-2. the search entry does not exist, the search should return no search results with %s' % rc.__name__)
- check_op_result(topology.standalone, 'search', BOGUSDN2, None, exists, rc)
+ log.info(
+ 'Search case 2-2. the search entry does not exist, the search should return no search results with %s' % rc.__name__)
+ check_op_result(topology_st.standalone, 'search', BOGUSDN2, None, exists, rc)
# Add
exists = True
rc = ldap.INSUFFICIENT_ACCESS
- log.info('Add case 1. the bind user has no rights AND the adding entry exists, it should fail with %s' % rc.__name__)
- check_op_result(topology.standalone, 'add', TESTDN, None, exists, rc)
+ log.info(
+ 'Add case 1. the bind user has no rights AND the adding entry exists, it should fail with %s' % rc.__name__)
+ check_op_result(topology_st.standalone, 'add', TESTDN, None, exists, rc)
exists = False
rc = ldap.INSUFFICIENT_ACCESS
- log.info('Add case 2-1. the bind user has no rights AND the adding entry does not exist, it should fail with %s' % rc.__name__)
- check_op_result(topology.standalone, 'add', BOGUSDN, None, exists, rc)
+ log.info(
+ 'Add case 2-1. the bind user has no rights AND the adding entry does not exist, it should fail with %s' % rc.__name__)
+ check_op_result(topology_st.standalone, 'add', BOGUSDN, None, exists, rc)
exists = False
rc = ldap.INSUFFICIENT_ACCESS
- log.info('Add case 2-2. the bind user has no rights AND the adding entry does not exist, it should fail with %s' % rc.__name__)
- check_op_result(topology.standalone, 'add', BOGUSDN2, None, exists, rc)
+ log.info(
+ 'Add case 2-2. the bind user has no rights AND the adding entry does not exist, it should fail with %s' % rc.__name__)
+ check_op_result(topology_st.standalone, 'add', BOGUSDN2, None, exists, rc)
# Modify
exists = True
rc = ldap.INSUFFICIENT_ACCESS
- log.info('Modify case 1. the bind user has no rights AND the modifying entry exists, it should fail with %s' % rc.__name__)
- check_op_result(topology.standalone, 'modify', TESTDN, None, exists, rc)
+ log.info(
+ 'Modify case 1. the bind user has no rights AND the modifying entry exists, it should fail with %s' % rc.__name__)
+ check_op_result(topology_st.standalone, 'modify', TESTDN, None, exists, rc)
exists = False
rc = ldap.INSUFFICIENT_ACCESS
- log.info('Modify case 2-1. the bind user has no rights AND the modifying entry does not exist, it should fail with %s' % rc.__name__)
- check_op_result(topology.standalone, 'modify', BOGUSDN, None, exists, rc)
+ log.info(
+ 'Modify case 2-1. the bind user has no rights AND the modifying entry does not exist, it should fail with %s' % rc.__name__)
+ check_op_result(topology_st.standalone, 'modify', BOGUSDN, None, exists, rc)
exists = False
rc = ldap.INSUFFICIENT_ACCESS
- log.info('Modify case 2-2. the bind user has no rights AND the modifying entry does not exist, it should fail with %s' % rc.__name__)
- check_op_result(topology.standalone, 'modify', BOGUSDN2, None, exists, rc)
+ log.info(
+ 'Modify case 2-2. the bind user has no rights AND the modifying entry does not exist, it should fail with %s' % rc.__name__)
+ check_op_result(topology_st.standalone, 'modify', BOGUSDN2, None, exists, rc)
# Modrdn
exists = True
rc = ldap.INSUFFICIENT_ACCESS
- log.info('Modrdn case 1. the bind user has no rights AND the renaming entry exists, it should fail with %s' % rc.__name__)
- check_op_result(topology.standalone, 'modrdn', TESTDN, None, exists, rc)
+ log.info(
+ 'Modrdn case 1. the bind user has no rights AND the renaming entry exists, it should fail with %s' % rc.__name__)
+ check_op_result(topology_st.standalone, 'modrdn', TESTDN, None, exists, rc)
exists = False
rc = ldap.INSUFFICIENT_ACCESS
- log.info('Modrdn case 2-1. the bind user has no rights AND the renaming entry does not exist, it should fail with %s' % rc.__name__)
- check_op_result(topology.standalone, 'modrdn', BOGUSDN, None, exists, rc)
+ log.info(
+ 'Modrdn case 2-1. the bind user has no rights AND the renaming entry does not exist, it should fail with %s' % rc.__name__)
+ check_op_result(topology_st.standalone, 'modrdn', BOGUSDN, None, exists, rc)
exists = False
rc = ldap.INSUFFICIENT_ACCESS
- log.info('Modrdn case 2-2. the bind user has no rights AND the renaming entry does not exist, it should fail with %s' % rc.__name__)
- check_op_result(topology.standalone, 'modrdn', BOGUSDN2, None, exists, rc)
+ log.info(
+ 'Modrdn case 2-2. the bind user has no rights AND the renaming entry does not exist, it should fail with %s' % rc.__name__)
+ check_op_result(topology_st.standalone, 'modrdn', BOGUSDN2, None, exists, rc)
exists = True
rc = ldap.INSUFFICIENT_ACCESS
- log.info('Modrdn case 3. the bind user has no rights AND the node moving an entry to exists, it should fail with %s' % rc.__name__)
- check_op_result(topology.standalone, 'modrdn', TESTDN, GROUPOU, exists, rc)
+ log.info(
+ 'Modrdn case 3. the bind user has no rights AND the node moving an entry to exists, it should fail with %s' % rc.__name__)
+ check_op_result(topology_st.standalone, 'modrdn', TESTDN, GROUPOU, exists, rc)
exists = False
rc = ldap.INSUFFICIENT_ACCESS
- log.info('Modrdn case 4-1. the bind user has no rights AND the node moving an entry to does not, it should fail with %s' % rc.__name__)
- check_op_result(topology.standalone, 'modrdn', TESTDN, BOGUSOU, exists, rc)
+ log.info(
+ 'Modrdn case 4-1. the bind user has no rights AND the node moving an entry to does not, it should fail with %s' % rc.__name__)
+ check_op_result(topology_st.standalone, 'modrdn', TESTDN, BOGUSOU, exists, rc)
exists = False
rc = ldap.INSUFFICIENT_ACCESS
- log.info('Modrdn case 4-2. the bind user has no rights AND the node moving an entry to does not, it should fail with %s' % rc.__name__)
- check_op_result(topology.standalone, 'modrdn', TESTDN, BOGUSOU, exists, rc)
+ log.info(
+ 'Modrdn case 4-2. the bind user has no rights AND the node moving an entry to does not, it should fail with %s' % rc.__name__)
+ check_op_result(topology_st.standalone, 'modrdn', TESTDN, BOGUSOU, exists, rc)
# Delete
exists = True
rc = ldap.INSUFFICIENT_ACCESS
- log.info('Delete case 1. the bind user has no rights AND the deleting entry exists, it should fail with %s' % rc.__name__)
- check_op_result(topology.standalone, 'delete', TESTDN, None, exists, rc)
+ log.info(
+ 'Delete case 1. the bind user has no rights AND the deleting entry exists, it should fail with %s' % rc.__name__)
+ check_op_result(topology_st.standalone, 'delete', TESTDN, None, exists, rc)
exists = False
rc = ldap.INSUFFICIENT_ACCESS
- log.info('Delete case 2-1. the bind user has no rights AND the deleting entry does not exist, it should fail with %s' % rc.__name__)
- check_op_result(topology.standalone, 'delete', BOGUSDN, None, exists, rc)
+ log.info(
+ 'Delete case 2-1. the bind user has no rights AND the deleting entry does not exist, it should fail with %s' % rc.__name__)
+ check_op_result(topology_st.standalone, 'delete', BOGUSDN, None, exists, rc)
exists = False
rc = ldap.INSUFFICIENT_ACCESS
- log.info('Delete case 2-2. the bind user has no rights AND the deleting entry does not exist, it should fail with %s' % rc.__name__)
- check_op_result(topology.standalone, 'delete', BOGUSDN2, None, exists, rc)
+ log.info(
+ 'Delete case 2-2. the bind user has no rights AND the deleting entry does not exist, it should fail with %s' % rc.__name__)
+ check_op_result(topology_st.standalone, 'delete', BOGUSDN2, None, exists, rc)
log.info('EXTRA: Check no regressions')
log.info('Adding aci for %s to %s.' % (BINDDN, DEFAULT_SUFFIX))
acival = '(targetattr="*")(version 3.0; acl "%s-all"; allow(all) userdn = "ldap:///%s";)' % (BUID, BINDDN)
log.info('Bind as {%s,%s}' % (DN_DM, PASSWORD))
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', acival)])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', acival)])
time.sleep(1)
log.info('Bind as {%s,%s}.' % (BINDDN, BINDPW))
try:
- topology.standalone.simple_bind_s(BINDDN, BINDPW)
+ topology_st.standalone.simple_bind_s(BINDDN, BINDPW)
except ldap.LDAPError as e:
log.info('Desc ' + e.message['desc'])
assert False
@@ -414,42 +391,42 @@ def test_ticket1347760(topology):
exists = False
rc = ldap.NO_SUCH_OBJECT
log.info('Search case. the search entry does not exist, the search should fail with %s' % rc.__name__)
- check_op_result(topology.standalone, 'search', BOGUSDN2, None, exists, rc)
+ check_op_result(topology_st.standalone, 'search', BOGUSDN2, None, exists, rc)
file_obj.close()
exists = True
rc = ldap.ALREADY_EXISTS
log.info('Add case. the adding entry already exists, it should fail with %s' % rc.__name__)
- check_op_result(topology.standalone, 'add', TESTDN, None, exists, rc)
+ check_op_result(topology_st.standalone, 'add', TESTDN, None, exists, rc)
exists = False
rc = ldap.NO_SUCH_OBJECT
log.info('Modify case. the modifying entry does not exist, it should fail with %s' % rc.__name__)
- check_op_result(topology.standalone, 'modify', BOGUSDN, None, exists, rc)
+ check_op_result(topology_st.standalone, 'modify', BOGUSDN, None, exists, rc)
exists = False
rc = ldap.NO_SUCH_OBJECT
log.info('Modrdn case 1. the renaming entry does not exist, it should fail with %s' % rc.__name__)
- check_op_result(topology.standalone, 'modrdn', BOGUSDN, None, exists, rc)
+ check_op_result(topology_st.standalone, 'modrdn', BOGUSDN, None, exists, rc)
exists = False
rc = ldap.NO_SUCH_OBJECT
log.info('Modrdn case 2. the node moving an entry to does not, it should fail with %s' % rc.__name__)
- check_op_result(topology.standalone, 'modrdn', TESTDN, BOGUSOU, exists, rc)
+ check_op_result(topology_st.standalone, 'modrdn', TESTDN, BOGUSOU, exists, rc)
exists = False
rc = ldap.NO_SUCH_OBJECT
log.info('Delete case. the deleting entry does not exist, it should fail with %s' % rc.__name__)
- check_op_result(topology.standalone, 'delete', BOGUSDN, None, exists, rc)
+ check_op_result(topology_st.standalone, 'delete', BOGUSDN, None, exists, rc)
log.info('Inactivate %s' % BINDDN)
- nsinactivate = '%s/sbin/ns-inactivate.pl' % topology.standalone.prefix
+ nsinactivate = '%s/sbin/ns-inactivate.pl' % topology_st.standalone.prefix
p = Popen([nsinactivate, '-Z', 'standalone', '-D', DN_DM, '-w', PASSWORD, '-I', BINDDN])
- assert(p.wait() == 0)
+ assert (p.wait() == 0)
log.info('Bind as {%s,%s} which should fail with %s.' % (BINDDN, BUID, ldap.UNWILLING_TO_PERFORM.__name__))
try:
- topology.standalone.simple_bind_s(BINDDN, BUID)
+ topology_st.standalone.simple_bind_s(BINDDN, BUID)
except ldap.LDAPError as e:
log.info("Exception (expected): %s" % type(e).__name__)
log.info('Desc ' + e.message['desc'])
@@ -457,7 +434,7 @@ def test_ticket1347760(topology):
log.info('Bind as {%s,%s} which should fail with %s.' % (BINDDN, 'bogus', ldap.INVALID_CREDENTIALS.__name__))
try:
- topology.standalone.simple_bind_s(BINDDN, 'bogus')
+ topology_st.standalone.simple_bind_s(BINDDN, 'bogus')
except ldap.LDAPError as e:
log.info("Exception (expected): %s" % type(e).__name__)
log.info('Desc ' + e.message['desc'])
diff --git a/dirsrvtests/tests/tickets/ticket365_test.py b/dirsrvtests/tests/tickets/ticket365_test.py
index 8025375..639bfec 100644
--- a/dirsrvtests/tests/tickets/ticket365_test.py
+++ b/dirsrvtests/tests/tickets/ticket365_test.py
@@ -7,56 +7,17 @@
# --- END COPYRIGHT BLOCK ---
#
-import time
-import ldap
import logging
+
import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_ticket365(topology):
+def test_ticket365(topology_st):
'''
Write your testcase here...
@@ -75,11 +36,11 @@ def test_ticket365(topology):
# Add the test entry
#
try:
- topology.standalone.add_s(Entry((USER_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'test_entry',
- 'userpassword': 'password'
- })))
+ topology_st.standalone.add_s(Entry((USER_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'test_entry',
+ 'userpassword': 'password'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add test user: error ' + e.message['desc'])
assert False
@@ -88,16 +49,16 @@ def test_ticket365(topology):
# Enable the audit log
#
try:
- topology.standalone.modify_s(DN_CONFIG,
- [(ldap.MOD_REPLACE,
- 'nsslapd-auditlog-logging-enabled',
- 'on')])
+ topology_st.standalone.modify_s(DN_CONFIG,
+ [(ldap.MOD_REPLACE,
+ 'nsslapd-auditlog-logging-enabled',
+ 'on')])
except ldap.LDAPError as e:
log.fatal('Failed to enable audit log, error: ' + e.message['desc'])
assert False
'''
try:
- ent = topology.standalone.getEntry(DN_CONFIG, attrlist=[
+ ent = topology_st.standalone.getEntry(DN_CONFIG, attrlist=[
'nsslapd-instancedir',
'nsslapd-errorlog',
'nsslapd-accesslog',
@@ -108,8 +69,8 @@ def test_ticket365(topology):
# Allow the unhashed password to be written to audit log
#
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
- 'nsslapd-auditlog-logging-hide-unhashed-pw', 'off')])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
+ 'nsslapd-auditlog-logging-hide-unhashed-pw', 'off')])
except ldap.LDAPError as e:
log.fatal('Failed to enable writing unhashed password to audit log, ' +
'error: ' + e.message['desc'])
@@ -119,9 +80,9 @@ def test_ticket365(topology):
# Set new password, and check the audit log
#
try:
- topology.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
- 'userpassword',
- 'mypassword')])
+ topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
+ 'userpassword',
+ 'mypassword')])
except ldap.LDAPError as e:
log.fatal('Failed to enable writing unhashed password to audit log, ' +
'error: ' + e.message['desc'])
@@ -129,7 +90,7 @@ def test_ticket365(topology):
# Check audit log
time.sleep(1)
- if not topology.standalone.searchAuditLog('unhashed#user#password: mypassword'):
+ if not topology_st.standalone.searchAuditLog('unhashed#user#password: mypassword'):
log.fatal('failed to find unhashed password in auditlog')
assert False
@@ -137,10 +98,10 @@ def test_ticket365(topology):
# Hide unhashed password in audit log
#
try:
- topology.standalone.modify_s(DN_CONFIG,
- [(ldap.MOD_REPLACE,
- 'nsslapd-auditlog-logging-hide-unhashed-pw',
- 'on')])
+ topology_st.standalone.modify_s(DN_CONFIG,
+ [(ldap.MOD_REPLACE,
+ 'nsslapd-auditlog-logging-hide-unhashed-pw',
+ 'on')])
except ldap.LDAPError as e:
log.fatal('Failed to deny writing unhashed password to audit log, ' +
'error: ' + e.message['desc'])
@@ -151,9 +112,9 @@ def test_ticket365(topology):
# Modify password, and check the audit log
#
try:
- topology.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
- 'userpassword',
- 'hidepassword')])
+ topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
+ 'userpassword',
+ 'hidepassword')])
except ldap.LDAPError as e:
log.fatal('Failed to enable writing unhashed password to audit log, ' +
'error: ' + e.message['desc'])
@@ -161,7 +122,7 @@ def test_ticket365(topology):
# Check audit log
time.sleep(1)
- if topology.standalone.searchAuditLog('unhashed#user#password: hidepassword'):
+ if topology_st.standalone.searchAuditLog('unhashed#user#password: hidepassword'):
log.fatal('Found unhashed password in auditlog')
assert False
@@ -173,4 +134,3 @@ if __name__ == '__main__':
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
diff --git a/dirsrvtests/tests/tickets/ticket397_test.py b/dirsrvtests/tests/tickets/ticket397_test.py
index 4bf4eda..424c91b 100644
--- a/dirsrvtests/tests/tickets/ticket397_test.py
+++ b/dirsrvtests/tests/tickets/ticket397_test.py
@@ -1,17 +1,9 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
-DEBUGGING = False
+DEBUGGING = os.getenv('DEBUGGING', False)
USER_DN = 'uid=user,ou=People,%s' % DEFAULT_SUFFIX
if DEBUGGING:
@@ -19,55 +11,9 @@ if DEBUGGING:
else:
logging.getLogger(__name__).setLevel(logging.INFO)
-
log = logging.getLogger(__name__)
-class TopologyStandalone(object):
- """The DS Topology Class"""
- def __init__(self, standalone):
- """Init"""
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- """Create DS Deployment"""
-
- # Creating standalone instance ...
- if DEBUGGING:
- standalone = DirSrv(verbose=True)
- else:
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- """If we are debugging just stop the instances, otherwise remove
- them
- """
- if DEBUGGING:
- standalone.stop()
- else:
- standalone.delete()
-
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
def _test_bind(inst, password):
result = True
userconn = ldap.initialize("ldap://%s:%s" % (HOST_STANDALONE, PORT_STANDALONE))
@@ -78,6 +24,7 @@ def _test_bind(inst, password):
result = False
return result
+
def _test_algo(inst, algo_name):
inst.config.set('passwordStorageScheme', algo_name)
@@ -86,14 +33,14 @@ def _test_algo(inst, algo_name):
# Create the user with a password
inst.add_s(Entry((
- USER_DN, {
- 'objectClass': 'top account simplesecurityobject'.split(),
- 'uid': 'user',
- 'userpassword': ['Secret123', ]
- })))
+ USER_DN, {
+ 'objectClass': 'top account simplesecurityobject'.split(),
+ 'uid': 'user',
+ 'userpassword': ['Secret123', ]
+ })))
# Make sure when we read the userPassword field, it is the correct ALGO
- pw_field = inst.search_s(USER_DN, ldap.SCOPE_BASE, '(objectClass=*)', ['userPassword'] )[0]
+ pw_field = inst.search_s(USER_DN, ldap.SCOPE_BASE, '(objectClass=*)', ['userPassword'])[0]
if DEBUGGING:
print(pw_field.getValue('userPassword'))
@@ -101,29 +48,30 @@ def _test_algo(inst, algo_name):
if algo_name != 'CLEAR':
lalgo_name = algo_name.lower()
lpw_algo_name = pw_field.getValue('userPassword').lower()
- assert(lpw_algo_name.startswith("{%s}" % lalgo_name))
+ assert (lpw_algo_name.startswith("{%s}" % lalgo_name))
# Now make sure a bind works
- assert(_test_bind(inst, 'Secret123'))
+ assert (_test_bind(inst, 'Secret123'))
# Bind with a wrong shorter password, should fail
- assert(not _test_bind(inst, 'Wrong'))
+ assert (not _test_bind(inst, 'Wrong'))
# Bind with a wrong longer password, should fail
- assert(not _test_bind(inst, 'This is even more wrong'))
+ assert (not _test_bind(inst, 'This is even more wrong'))
# Bind with a password that has the algo in the name
- assert(not _test_bind(inst, '{%s}SomeValues....' % algo_name))
+ assert (not _test_bind(inst, '{%s}SomeValues....' % algo_name))
# Bind with a wrong exact length password.
- assert(not _test_bind(inst, 'Alsowrong'))
+ assert (not _test_bind(inst, 'Alsowrong'))
# Bind with a subset password, should fail
- assert(not _test_bind(inst, 'Secret'))
+ assert (not _test_bind(inst, 'Secret'))
if algo_name != 'CRYPT':
# Bind with a subset password that is 1 char shorter, to detect off by 1 in clear
- assert(not _test_bind(inst, 'Secret12'))
+ assert (not _test_bind(inst, 'Secret12'))
# Bind with a superset password, should fail
- assert(not _test_bind(inst, 'Secret123456'))
+ assert (not _test_bind(inst, 'Secret123456'))
# Delete the user
inst.delete_s(USER_DN)
# done!
-def test_397(topology):
+
+def test_397(topology_st):
"""
Assert that all of our password algorithms correctly PASS and FAIL varying
password conditions.
@@ -136,9 +84,9 @@ def test_397(topology):
# Merge this to the password suite in the future
- for algo in ('PBKDF2_SHA256', ):
+ for algo in ('PBKDF2_SHA256',):
for i in range(0, 10):
- _test_algo(topology.standalone, algo)
+ _test_algo(topology_st.standalone, algo)
log.info('Test PASSED')
@@ -148,4 +96,3 @@ if __name__ == '__main__':
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
diff --git a/dirsrvtests/tests/tickets/ticket47313_test.py b/dirsrvtests/tests/tickets/ticket47313_test.py
index 5064126..8640f40 100644
--- a/dirsrvtests/tests/tickets/ticket47313_test.py
+++ b/dirsrvtests/tests/tickets/ticket47313_test.py
@@ -6,64 +6,20 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
import logging
-import time
+
+import ldap
import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
ENTRY_NAME = 'test_entry'
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_ticket47313_run(topology):
+def test_ticket47313_run(topology_st):
"""
It adds 2 test entries
Search with filters including subtype and !
@@ -71,14 +27,14 @@ def test_ticket47313_run(topology):
"""
# bind as directory manager
- topology.standalone.log.info("Bind as %s" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.log.info("Bind as %s" % DN_DM)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
# enable filter error logging
- #mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '32')]
- #topology.standalone.modify_s(DN_CONFIG, mod)
+ # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '32')]
+ # topology_st.standalone.modify_s(DN_CONFIG, mod)
- topology.standalone.log.info("\n\n######################### ADD ######################\n")
+ topology_st.standalone.log.info("\n\n######################### ADD ######################\n")
# Prepare the entry with cn;fr & cn;en
entry_name_fr = '%s fr' % (ENTRY_NAME)
@@ -101,44 +57,44 @@ def test_ticket47313_run(topology):
entry_en_only.setValues('cn', entry_name_en_only)
entry_en_only.setValues('cn;en', entry_name_en)
- topology.standalone.log.info("Try to add Add %s: %r" % (entry_dn_both, entry_both))
- topology.standalone.add_s(entry_both)
+ topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_dn_both, entry_both))
+ topology_st.standalone.add_s(entry_both)
- topology.standalone.log.info("Try to add Add %s: %r" % (entry_dn_en_only, entry_en_only))
- topology.standalone.add_s(entry_en_only)
+ topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_dn_en_only, entry_en_only))
+ topology_st.standalone.add_s(entry_en_only)
- topology.standalone.log.info("\n\n######################### SEARCH ######################\n")
+ topology_st.standalone.log.info("\n\n######################### SEARCH ######################\n")
# filter: (&(cn=test_entry en only)(!(cn=test_entry fr)))
myfilter = '(&(sn=%s)(!(cn=%s)))' % (entry_name_en_only, entry_name_fr)
- topology.standalone.log.info("Try to search with filter %s" % myfilter)
- ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter)
+ topology_st.standalone.log.info("Try to search with filter %s" % myfilter)
+ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter)
assert len(ents) == 1
assert ents[0].sn == entry_name_en_only
- topology.standalone.log.info("Found %s" % ents[0].dn)
+ topology_st.standalone.log.info("Found %s" % ents[0].dn)
# filter: (&(cn=test_entry en only)(!(cn;fr=test_entry fr)))
myfilter = '(&(sn=%s)(!(cn;fr=%s)))' % (entry_name_en_only, entry_name_fr)
- topology.standalone.log.info("Try to search with filter %s" % myfilter)
- ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter)
+ topology_st.standalone.log.info("Try to search with filter %s" % myfilter)
+ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter)
assert len(ents) == 1
assert ents[0].sn == entry_name_en_only
- topology.standalone.log.info("Found %s" % ents[0].dn)
+ topology_st.standalone.log.info("Found %s" % ents[0].dn)
# filter: (&(cn=test_entry en only)(!(cn;en=test_entry en)))
myfilter = '(&(sn=%s)(!(cn;en=%s)))' % (entry_name_en_only, entry_name_en)
- topology.standalone.log.info("Try to search with filter %s" % myfilter)
- ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter)
+ topology_st.standalone.log.info("Try to search with filter %s" % myfilter)
+ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter)
assert len(ents) == 0
- topology.standalone.log.info("Found none")
+ topology_st.standalone.log.info("Found none")
- topology.standalone.log.info("\n\n######################### DELETE ######################\n")
+ topology_st.standalone.log.info("\n\n######################### DELETE ######################\n")
- topology.standalone.log.info("Try to delete %s " % entry_dn_both)
- topology.standalone.delete_s(entry_dn_both)
+ topology_st.standalone.log.info("Try to delete %s " % entry_dn_both)
+ topology_st.standalone.delete_s(entry_dn_both)
- topology.standalone.log.info("Try to delete %s " % entry_dn_en_only)
- topology.standalone.delete_s(entry_dn_en_only)
+ topology_st.standalone.log.info("Try to delete %s " % entry_dn_en_only)
+ topology_st.standalone.delete_s(entry_dn_en_only)
log.info('Testcase PASSED')
@@ -148,4 +104,3 @@ if __name__ == '__main__':
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
diff --git a/dirsrvtests/tests/tickets/ticket47384_test.py b/dirsrvtests/tests/tickets/ticket47384_test.py
index 3229751..777aa94 100644
--- a/dirsrvtests/tests/tickets/ticket47384_test.py
+++ b/dirsrvtests/tests/tickets/ticket47384_test.py
@@ -6,62 +6,16 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-import shutil
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- if os.geteuid() == 0:
- os.system('setenforce 1')
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_ticket47384(topology):
+def test_ticket47384(topology_st):
'''
Test pluginpath validation: relative and absolute paths
@@ -76,29 +30,30 @@ def test_ticket47384(topology):
os.system('setenforce 0')
PLUGIN_DN = 'cn=%s,cn=plugins,cn=config' % PLUGIN_WHOAMI
- tmp_dir = topology.standalone.get_tmp_dir()
- plugin_dir = topology.standalone.get_plugin_dir()
+ tmp_dir = topology_st.standalone.get_tmp_dir()
+ plugin_dir = topology_st.standalone.get_plugin_dir()
# Copy the library to our tmp directory
try:
shutil.copy('%s/libwhoami-plugin.so' % plugin_dir, tmp_dir)
except IOError as e:
- log.fatal('Failed to copy %s/libwhoami-plugin.so to the tmp directory %s, error: %s' % (plugin_dir, tmp_dir, e.strerror))
+ log.fatal('Failed to copy %s/libwhoami-plugin.so to the tmp directory %s, error: %s' % (
+ plugin_dir, tmp_dir, e.strerror))
assert False
try:
shutil.copy('%s/libwhoami-plugin.la' % plugin_dir, tmp_dir)
except IOError as e:
log.warn('Failed to copy ' + plugin_dir +
'/libwhoami-plugin.la to the tmp directory, error: '
- + e.strerror)
+ + e.strerror)
#
# Test adding valid plugin paths
#
# Try using the absolute path to the current library
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
- 'nsslapd-pluginPath', '%s/libwhoami-plugin' % plugin_dir)])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
+ 'nsslapd-pluginPath', '%s/libwhoami-plugin' % plugin_dir)])
except ldap.LDAPError as e:
log.error('Failed to set valid plugin path (%s): error (%s)' %
('%s/libwhoami-plugin' % plugin_dir, e.message['desc']))
@@ -106,8 +61,8 @@ def test_ticket47384(topology):
# Try using new remote location
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
- 'nsslapd-pluginPath', '%s/libwhoami-plugin' % tmp_dir)])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
+ 'nsslapd-pluginPath', '%s/libwhoami-plugin' % tmp_dir)])
except ldap.LDAPError as e:
log.error('Failed to set valid plugin path (%s): error (%s)' %
('%s/libwhoami-plugin' % tmp_dir, e.message['desc']))
@@ -115,8 +70,8 @@ def test_ticket47384(topology):
# Set plugin path back to the default
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
- 'nsslapd-pluginPath', 'libwhoami-plugin')])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
+ 'nsslapd-pluginPath', 'libwhoami-plugin')])
except ldap.LDAPError as e:
log.error('Failed to set valid relative plugin path (%s): error (%s)' %
('libwhoami-plugin' % tmp_dir, e.message['desc']))
@@ -126,8 +81,8 @@ def test_ticket47384(topology):
# Test invalid path (no library present)
#
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
- 'nsslapd-pluginPath', '/bin/libwhoami-plugin')])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
+ 'nsslapd-pluginPath', '/bin/libwhoami-plugin')])
# No exception?! This is an error
log.error('Invalid plugin path was incorrectly accepted by the server!')
assert False
@@ -142,8 +97,8 @@ def test_ticket47384(topology):
# Test invalid relative path (no library present)
#
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
- 'nsslapd-pluginPath', '../libwhoami-plugin')])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
+ 'nsslapd-pluginPath', '../libwhoami-plugin')])
# No exception?! This is an error
log.error('Invalid plugin path was incorrectly accepted by the server!')
assert False
@@ -162,4 +117,3 @@ if __name__ == '__main__':
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
diff --git a/dirsrvtests/tests/tickets/ticket47431_test.py b/dirsrvtests/tests/tickets/ticket47431_test.py
index 3453776..573dc77 100644
--- a/dirsrvtests/tests/tickets/ticket47431_test.py
+++ b/dirsrvtests/tests/tickets/ticket47431_test.py
@@ -6,68 +6,27 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
DN_7BITPLUGIN = "cn=7-bit check,%s" % DN_PLUGIN
ATTRS = ["uid", "mail", "userpassword", ",", SUFFIX, None]
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_ticket47431_0(topology):
+def test_ticket47431_0(topology_st):
'''
Enable 7 bit plugin
'''
log.info("Ticket 47431 - 0: Enable 7bit plugin...")
- topology.standalone.plugins.enable(name=PLUGIN_7_BIT_CHECK)
+ topology_st.standalone.plugins.enable(name=PLUGIN_7_BIT_CHECK)
-def test_ticket47431_1(topology):
+def test_ticket47431_1(topology_st):
'''
nsslapd-pluginarg0: uid
nsslapd-pluginarg1: mail
@@ -85,38 +44,39 @@ def test_ticket47431_1(topology):
log.debug('modify_s %s' % DN_7BITPLUGIN)
try:
- topology.standalone.modify_s(DN_7BITPLUGIN,
- [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', "uid"),
- (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', "mail"),
- (ldap.MOD_REPLACE, 'nsslapd-pluginarg2', "userpassword"),
- (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', ","),
- (ldap.MOD_REPLACE, 'nsslapd-pluginarg4', SUFFIX)])
+ topology_st.standalone.modify_s(DN_7BITPLUGIN,
+ [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', "uid"),
+ (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', "mail"),
+ (ldap.MOD_REPLACE, 'nsslapd-pluginarg2', "userpassword"),
+ (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', ","),
+ (ldap.MOD_REPLACE, 'nsslapd-pluginarg4', SUFFIX)])
except ValueError:
log.error('modify failed: Some problem occured with a value that was provided')
assert False
arg2 = "nsslapd-pluginarg2: userpassword"
- topology.standalone.stop(timeout=10)
- dse_ldif = topology.standalone.confdir + '/dse.ldif'
+ topology_st.standalone.stop(timeout=10)
+ dse_ldif = topology_st.standalone.confdir + '/dse.ldif'
os.system('mv %s %s.47431' % (dse_ldif, dse_ldif))
- os.system('sed -e "s/\\(%s\\)/\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1/" %s.47431 > %s' % (arg2, dse_ldif, dse_ldif))
- topology.standalone.start(timeout=10)
+ os.system(
+ 'sed -e "s/\\(%s\\)/\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1/" %s.47431 > %s' % (
+ arg2, dse_ldif, dse_ldif))
+ topology_st.standalone.start(timeout=10)
- cmdline = 'egrep -i "%s" %s' % (expected, topology.standalone.errlog)
+ cmdline = 'egrep -i "%s" %s' % (expected, topology_st.standalone.errlog)
p = os.popen(cmdline, "r")
line = p.readline()
if line == "":
- log.error('Expected error "%s" not logged in %s' % (expected, topology.standalone.errlog))
+ log.error('Expected error "%s" not logged in %s' % (expected, topology_st.standalone.errlog))
assert False
else:
log.debug('line: %s' % line)
- log.info('Expected error "%s" logged in %s' % (expected, topology.standalone.errlog))
-
+ log.info('Expected error "%s" logged in %s' % (expected, topology_st.standalone.errlog))
log.info("Ticket 47431 - 1: done")
-def test_ticket47431_2(topology):
+def test_ticket47431_2(topology_st):
'''
nsslapd-pluginarg0: uid
nsslapd-pluginarg0: mail
@@ -140,23 +100,23 @@ def test_ticket47431_2(topology):
log.info("Ticket 47431 - 2: Check two values belonging to one arg is fixed...")
try:
- topology.standalone.modify_s(DN_7BITPLUGIN,
- [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', "uid"),
- (ldap.MOD_ADD, 'nsslapd-pluginarg0', "mail"),
- (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', "userpassword"),
- (ldap.MOD_REPLACE, 'nsslapd-pluginarg2', ","),
- (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', SUFFIX),
- (ldap.MOD_DELETE, 'nsslapd-pluginarg4', None)])
+ topology_st.standalone.modify_s(DN_7BITPLUGIN,
+ [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', "uid"),
+ (ldap.MOD_ADD, 'nsslapd-pluginarg0', "mail"),
+ (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', "userpassword"),
+ (ldap.MOD_REPLACE, 'nsslapd-pluginarg2', ","),
+ (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', SUFFIX),
+ (ldap.MOD_DELETE, 'nsslapd-pluginarg4', None)])
except ValueError:
log.error('modify failed: Some problem occured with a value that was provided')
assert False
# PLUGIN LOG LEVEL
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')])
- topology.standalone.restart(timeout=10)
+ topology_st.standalone.restart(timeout=10)
- cmdline = 'egrep -i %s %s' % ("NS7bitAttr_Init", topology.standalone.errlog)
+ cmdline = 'egrep -i %s %s' % ("NS7bitAttr_Init", topology_st.standalone.errlog)
p = os.popen(cmdline, "r")
i = 0
while ATTRS[i]:
@@ -175,7 +135,7 @@ def test_ticket47431_2(topology):
log.info("Ticket 47431 - 2: done")
-def test_ticket47431_3(topology):
+def test_ticket47431_3(topology_st):
'''
nsslapd-pluginarg1: uid
nsslapd-pluginarg3: mail
@@ -199,27 +159,27 @@ def test_ticket47431_3(topology):
log.info("Ticket 47431 - 3: Check missing args are fixed...")
try:
- topology.standalone.modify_s(DN_7BITPLUGIN,
- [(ldap.MOD_DELETE, 'nsslapd-pluginarg0', None),
- (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', "uid"),
- (ldap.MOD_DELETE, 'nsslapd-pluginarg2', None),
- (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', "mail"),
- (ldap.MOD_REPLACE, 'nsslapd-pluginarg5', "userpassword"),
- (ldap.MOD_REPLACE, 'nsslapd-pluginarg7', ","),
- (ldap.MOD_REPLACE, 'nsslapd-pluginarg9', SUFFIX)])
+ topology_st.standalone.modify_s(DN_7BITPLUGIN,
+ [(ldap.MOD_DELETE, 'nsslapd-pluginarg0', None),
+ (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', "uid"),
+ (ldap.MOD_DELETE, 'nsslapd-pluginarg2', None),
+ (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', "mail"),
+ (ldap.MOD_REPLACE, 'nsslapd-pluginarg5', "userpassword"),
+ (ldap.MOD_REPLACE, 'nsslapd-pluginarg7', ","),
+ (ldap.MOD_REPLACE, 'nsslapd-pluginarg9', SUFFIX)])
except ValueError:
log.error('modify failed: Some problem occured with a value that was provided')
assert False
# PLUGIN LOG LEVEL
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')])
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.47431' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
- topology.standalone.start(timeout=10)
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.47431' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
+ topology_st.standalone.start(timeout=10)
- cmdline = 'egrep -i %s %s' % ("NS7bitAttr_Init", topology.standalone.errlog)
+ cmdline = 'egrep -i %s %s' % ("NS7bitAttr_Init", topology_st.standalone.errlog)
p = os.popen(cmdline, "r")
i = 0
while ATTRS[i]:
@@ -242,4 +202,3 @@ if __name__ == '__main__':
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
diff --git a/dirsrvtests/tests/tickets/ticket47462_test.py b/dirsrvtests/tests/tickets/ticket47462_test.py
index 66a2385..8b89d0b 100644
--- a/dirsrvtests/tests/tickets/ticket47462_test.py
+++ b/dirsrvtests/tests/tickets/ticket47462_test.py
@@ -6,25 +6,19 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import sys
+import logging
import time
+
import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
from lib389.properties import *
+from lib389.topologies import topology_m2
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-#
-# important part. We can deploy Master1 and Master2 on different versions
-#
-installation1_prefix = None
-installation2_prefix = None
-
DES_PLUGIN = 'cn=DES,cn=Password Storage Schemes,cn=plugins,cn=config'
AES_PLUGIN = 'cn=AES,cn=Password Storage Schemes,cn=plugins,cn=config'
MMR_PLUGIN = 'cn=Multimaster Replication Plugin,cn=plugins,cn=config'
@@ -35,115 +29,7 @@ TEST_REPL_DN = 'cn=test repl,' + DEFAULT_SUFFIX
DES2AES_TASK_DN = 'cn=convert,cn=des2aes,cn=tasks,cn=config'
-class TopologyMaster1Master2(object):
- def __init__(self, master1, master2):
- master1.open()
- self.master1 = master1
-
- master2.open()
- self.master2 = master2
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to create a replicated topology for the 'module'.
- The replicated topology is MASTER1 <-> Master2.
- '''
- global installation1_prefix
- global installation2_prefix
-
- # allocate master1 on a given deployement
- master1 = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Args for the master1 instance
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_master = args_instance.copy()
- master1.allocate(args_master)
-
- # allocate master1 on a given deployement
- master2 = DirSrv(verbose=False)
- if installation2_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation2_prefix
-
- # Args for the consumer instance
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_master = args_instance.copy()
- master2.allocate(args_master)
-
- # Get the status of the instance and restart it if it exists
- instance_master1 = master1.exists()
- instance_master2 = master2.exists()
-
- # Remove all the instances
- if instance_master1:
- master1.delete()
- if instance_master2:
- master2.delete()
-
- # Create the instances
- master1.create()
- master1.open()
- master2.create()
- master2.open()
-
- #
- # Now prepare the Master-Consumer topology
- #
- # First Enable replication
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
- master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
- # Initialize the supplier->consumer
-
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- AGMT_DN = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
- master1.agreement
- if not AGMT_DN:
- log.fatal("Fail to create a replica agreement")
- sys.exit(1)
-
- log.debug("%s created" % AGMT_DN)
-
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- master2.agreement.create(suffix=DEFAULT_SUFFIX, host=master1.host, port=master1.port, properties=properties)
-
- master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- master1.waitForReplInit(AGMT_DN)
-
- # Check replication is working fine
- if master1.testReplication(DEFAULT_SUFFIX, master2):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- # clear the tmp directory
- master1.clearTmpDir(__file__)
-
- def fin():
- master1.delete()
- master2.delete()
- request.addfinalizer(fin)
-
- return TopologyMaster1Master2(master1, master2)
-
-
-def test_ticket47462(topology):
+def test_ticket47462(topology_m2):
"""
Test that AES properly replaces DES during an update/restart, and that
replication also works correctly.
@@ -157,55 +43,55 @@ def test_ticket47462(topology):
# Add an extra attribute to the DES plugin args
#
try:
- topology.master1.modify_s(DES_PLUGIN,
- [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', 'on')])
+ topology_m2.ms["master1"].modify_s(DES_PLUGIN,
+ [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', 'on')])
except ldap.LDAPError as e:
- log.fatal('Failed to enable DES plugin, error: ' +
- e.message['desc'])
- assert False
+ log.fatal('Failed to enable DES plugin, error: ' +
+ e.message['desc'])
+ assert False
try:
- topology.master1.modify_s(DES_PLUGIN,
- [(ldap.MOD_ADD, 'nsslapd-pluginarg2', 'description')])
+ topology_m2.ms["master1"].modify_s(DES_PLUGIN,
+ [(ldap.MOD_ADD, 'nsslapd-pluginarg2', 'description')])
except ldap.LDAPError as e:
- log.fatal('Failed to reset DES plugin, error: ' +
- e.message['desc'])
- assert False
+ log.fatal('Failed to reset DES plugin, error: ' +
+ e.message['desc'])
+ assert False
try:
- topology.master1.modify_s(MMR_PLUGIN,
- [(ldap.MOD_DELETE,
- 'nsslapd-plugin-depends-on-named',
- 'AES')])
+ topology_m2.ms["master1"].modify_s(MMR_PLUGIN,
+ [(ldap.MOD_DELETE,
+ 'nsslapd-plugin-depends-on-named',
+ 'AES')])
except ldap.NO_SUCH_ATTRIBUTE:
pass
except ldap.LDAPError as e:
- log.fatal('Failed to reset MMR plugin, error: ' +
- e.message['desc'])
- assert False
+ log.fatal('Failed to reset MMR plugin, error: ' +
+ e.message['desc'])
+ assert False
#
# Delete the AES plugin
#
try:
- topology.master1.delete_s(AES_PLUGIN)
+ topology_m2.ms["master1"].delete_s(AES_PLUGIN)
except ldap.NO_SUCH_OBJECT:
pass
except ldap.LDAPError as e:
- log.fatal('Failed to delete AES plugin, error: ' +
- e.message['desc'])
- assert False
+ log.fatal('Failed to delete AES plugin, error: ' +
+ e.message['desc'])
+ assert False
# restart the server so we must use DES plugin
- topology.master1.restart(timeout=10)
+ topology_m2.ms["master1"].restart(timeout=10)
#
# Get the agmt dn, and set the password
#
try:
- entry = topology.master1.search_s('cn=config', ldap.SCOPE_SUBTREE,
- 'objectclass=nsDS5ReplicationAgreement')
+ entry = topology_m2.ms["master1"].search_s('cn=config', ldap.SCOPE_SUBTREE,
+ 'objectclass=nsDS5ReplicationAgreement')
if entry:
agmt_dn = entry[0].dn
log.info('Found agmt dn (%s)' % agmt_dn)
@@ -219,8 +105,8 @@ def test_ticket47462(topology):
try:
properties = {RA_BINDPW: "password"}
- topology.master1.agreement.setProperties(None, agmt_dn, None,
- properties)
+ topology_m2.ms["master1"].agreement.setProperties(None, agmt_dn, None,
+ properties)
log.info('Successfully modified replication agreement')
except ValueError:
log.error('Failed to update replica agreement: ' + AGMT_DN)
@@ -230,17 +116,17 @@ def test_ticket47462(topology):
# Check replication works with the new DES password
#
try:
- topology.master1.add_s(Entry((USER1_DN,
- {'objectclass': "top person".split(),
- 'sn': 'sn',
- 'description': 'DES value to convert',
- 'cn': 'test_user'})))
+ topology_m2.ms["master1"].add_s(Entry((USER1_DN,
+ {'objectclass': "top person".split(),
+ 'sn': 'sn',
+ 'description': 'DES value to convert',
+ 'cn': 'test_user'})))
loop = 0
ent = None
while loop <= 10:
try:
- ent = topology.master2.getEntry(USER1_DN, ldap.SCOPE_BASE,
- "(objectclass=*)")
+ ent = topology_m2.ms["master2"].getEntry(USER1_DN, ldap.SCOPE_BASE,
+ "(objectclass=*)")
break
except ldap.NO_SUCH_OBJECT:
time.sleep(1)
@@ -258,7 +144,7 @@ def test_ticket47462(topology):
# Add a backend (that has no entries)
#
try:
- topology.master1.backend.create("o=empty", {BACKEND_NAME: "empty"})
+ topology_m2.ms["master1"].backend.create("o=empty", {BACKEND_NAME: "empty"})
except ldap.LDAPError as e:
log.fatal('Failed to create extra/empty backend: ' + e.message['desc'])
assert False
@@ -266,16 +152,16 @@ def test_ticket47462(topology):
#
# Run the upgrade...
#
- topology.master1.upgrade('online')
- topology.master1.restart()
- topology.master2.restart()
+ topology_m2.ms["master1"].upgrade('online')
+ topology_m2.ms["master1"].restart()
+ topology_m2.ms["master2"].restart()
#
# Check that the restart converted existing DES credentials
#
try:
- entry = topology.master1.search_s('cn=config', ldap.SCOPE_SUBTREE,
- 'nsDS5ReplicaCredentials=*')
+ entry = topology_m2.ms["master1"].search_s('cn=config', ldap.SCOPE_SUBTREE,
+ 'nsDS5ReplicaCredentials=*')
if entry:
val = entry[0].getValue('nsDS5ReplicaCredentials')
if val.startswith('{AES-'):
@@ -297,11 +183,11 @@ def test_ticket47462(topology):
# all the attributes.
#
try:
- entry = topology.master1.search_s(AES_PLUGIN, ldap.SCOPE_BASE,
- 'objectclass=*')
+ entry = topology_m2.ms["master1"].search_s(AES_PLUGIN, ldap.SCOPE_BASE,
+ 'objectclass=*')
if not entry[0].hasValue('nsslapd-pluginarg0', 'description') and \
- not entry[0].hasValue('nsslapd-pluginarg1', 'description') and \
- not entry[0].hasValue('nsslapd-pluginarg2', 'description'):
+ not entry[0].hasValue('nsslapd-pluginarg1', 'description') and \
+ not entry[0].hasValue('nsslapd-pluginarg2', 'description'):
log.fatal('The AES plugin did not have the DES attribute copied ' +
'over correctly')
assert False
@@ -315,8 +201,8 @@ def test_ticket47462(topology):
# Check that the MMR plugin was updated
#
try:
- entry = topology.master1.search_s(MMR_PLUGIN, ldap.SCOPE_BASE,
- 'objectclass=*')
+ entry = topology_m2.ms["master1"].search_s(MMR_PLUGIN, ldap.SCOPE_BASE,
+ 'objectclass=*')
if not entry[0].hasValue('nsslapd-plugin-depends-on-named', 'AES'):
log.fatal('The MMR Plugin was not correctly updated')
assert False
@@ -330,8 +216,8 @@ def test_ticket47462(topology):
# Check that the DES plugin was correctly updated
#
try:
- entry = topology.master1.search_s(DES_PLUGIN, ldap.SCOPE_BASE,
- 'objectclass=*')
+ entry = topology_m2.ms["master1"].search_s(DES_PLUGIN, ldap.SCOPE_BASE,
+ 'objectclass=*')
if not entry[0].hasValue('nsslapd-pluginPath', 'libpbe-plugin'):
log.fatal('The DES Plugin was not correctly updated')
assert False
@@ -345,16 +231,16 @@ def test_ticket47462(topology):
# Check replication one last time
#
try:
- topology.master1.add_s(Entry((USER_DN,
- {'objectclass': "top person".split(),
- 'sn': 'sn',
- 'cn': 'test_user'})))
+ topology_m2.ms["master1"].add_s(Entry((USER_DN,
+ {'objectclass': "top person".split(),
+ 'sn': 'sn',
+ 'cn': 'test_user'})))
loop = 0
ent = None
while loop <= 10:
try:
- ent = topology.master2.getEntry(USER_DN, ldap.SCOPE_BASE,
- "(objectclass=*)")
+ ent = topology_m2.ms["master2"].getEntry(USER_DN, ldap.SCOPE_BASE,
+ "(objectclass=*)")
break
except ldap.NO_SUCH_OBJECT:
time.sleep(1)
@@ -371,9 +257,9 @@ def test_ticket47462(topology):
# Check the entry
log.info('Entry before running task...')
try:
- entry = topology.master1.search_s(USER1_DN,
- ldap.SCOPE_BASE,
- 'objectclass=*')
+ entry = topology_m2.ms["master1"].search_s(USER1_DN,
+ ldap.SCOPE_BASE,
+ 'objectclass=*')
if entry:
print(str(entry))
else:
@@ -388,27 +274,27 @@ def test_ticket47462(topology):
# Test the DES2AES Task on USER1_DN
#
try:
- topology.master1.add_s(Entry((DES2AES_TASK_DN,
- {'objectclass': ['top',
- 'extensibleObject'],
- 'suffix': DEFAULT_SUFFIX,
- 'cn': 'convert'})))
+ topology_m2.ms["master1"].add_s(Entry((DES2AES_TASK_DN,
+ {'objectclass': ['top',
+ 'extensibleObject'],
+ 'suffix': DEFAULT_SUFFIX,
+ 'cn': 'convert'})))
except ldap.LDAPError as e:
log.fatal('Failed to add task entry: ' + e.message['desc'])
assert False
# Wait for task
task_entry = Entry(DES2AES_TASK_DN)
- (done, exitCode) = topology.master1.tasks.checkTask(task_entry, True)
+ (done, exitCode) = topology_m2.ms["master1"].tasks.checkTask(task_entry, True)
if exitCode:
log.fatal("Error: des2aes task exited with %d" % (exitCode))
assert False
# Check the entry
try:
- entry = topology.master1.search_s(USER1_DN,
- ldap.SCOPE_BASE,
- 'objectclass=*')
+ entry = topology_m2.ms["master1"].search_s(USER1_DN,
+ ldap.SCOPE_BASE,
+ 'objectclass=*')
if entry:
val = entry[0].getValue('description')
print(str(entry[0]))
@@ -426,6 +312,7 @@ def test_ticket47462(topology):
e.message['desc'])
assert False
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/dirsrvtests/tests/tickets/ticket47490_test.py b/dirsrvtests/tests/tickets/ticket47490_test.py
index 0da542e..ea6c2bd 100644
--- a/dirsrvtests/tests/tickets/ticket47490_test.py
+++ b/dirsrvtests/tests/tickets/ticket47490_test.py
@@ -11,16 +11,15 @@ Created on Nov 7, 2013
@author: tbordaz
'''
-import os
-import sys
-import ldap
-import time
import logging
-import pytest
import re
-from lib389 import DirSrv, Entry
+import time
+
+import ldap
+import pytest
+from lib389 import Entry
from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_m1c1
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
@@ -29,25 +28,16 @@ TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
ENTRY_DN = "cn=test_entry, %s" % SUFFIX
MUST_OLD = "(postalAddress $ preferredLocale)"
MUST_NEW = "(postalAddress $ preferredLocale $ telexNumber)"
-MAY_OLD = "(postalCode $ street)"
-MAY_NEW = "(postalCode $ street $ postOfficeBox)"
-
-
-class TopologyMasterConsumer(object):
- def __init__(self, master, consumer):
- master.open()
- self.master = master
+MAY_OLD = "(postalCode $ street)"
+MAY_NEW = "(postalCode $ street $ postOfficeBox)"
- consumer.open()
- self.consumer = consumer
-
-def _header(topology, label):
- topology.master.log.info("\n\n###############################################")
- topology.master.log.info("#######")
- topology.master.log.info("####### %s" % label)
- topology.master.log.info("#######")
- topology.master.log.info("###################################################")
+def _header(topology_m1c1, label):
+ topology_m1c1.ms["master1"].log.info("\n\n###############################################")
+ topology_m1c1.ms["master1"].log.info("#######")
+ topology_m1c1.ms["master1"].log.info("####### %s" % label)
+ topology_m1c1.ms["master1"].log.info("#######")
+ topology_m1c1.ms["master1"].log.info("###################################################")
def pattern_errorlog(file, log_pattern):
@@ -75,9 +65,9 @@ def pattern_errorlog(file, log_pattern):
def _oc_definition(oid_ext, name, must=None, may=None):
- oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
+ oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
desc = 'To test ticket 47490'
- sup = 'person'
+ sup = 'person'
if not must:
must = MUST_OLD
if not may:
@@ -99,7 +89,7 @@ def mod_OC(instance, oid_ext, name, old_must=None, old_may=None, new_must=None,
instance.schema.add_schema('objectClasses', new_oc)
-def support_schema_learning(topology):
+def support_schema_learning(topology_m1c1):
"""
with https://fedorahosted.org/389/ticket/47721, the supplier and consumer can learn
schema definitions when a replication occurs.
@@ -112,7 +102,7 @@ def support_schema_learning(topology):
This function returns True if 47721 is fixed in the current release
False else
"""
- ent = topology.consumer.getEntry(DN_CONFIG, ldap.SCOPE_BASE, "(cn=config)", ['nsslapd-versionstring'])
+ ent = topology_m1c1.cs["consumer1"].getEntry(DN_CONFIG, ldap.SCOPE_BASE, "(cn=config)", ['nsslapd-versionstring'])
if ent.hasAttr('nsslapd-versionstring'):
val = ent.getValue('nsslapd-versionstring')
version = val.split('/')[1].split('.') # something like ['1', '3', '1', '23', 'final_fix']
@@ -130,7 +120,7 @@ def support_schema_learning(topology):
return False
-def trigger_update(topology):
+def trigger_update(topology_m1c1):
"""
It triggers an update on the supplier. This will start a replication
session and a schema push
@@ -140,13 +130,14 @@ def trigger_update(topology):
except AttributeError:
trigger_update.value = 1
replace = [(ldap.MOD_REPLACE, 'telephonenumber', str(trigger_update.value))]
- topology.master.modify_s(ENTRY_DN, replace)
+ topology_m1c1.ms["master1"].modify_s(ENTRY_DN, replace)
# wait 10 seconds that the update is replicated
loop = 0
while loop <= 10:
try:
- ent = topology.consumer.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber'])
+ ent = topology_m1c1.cs["consumer1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)",
+ ['telephonenumber'])
val = ent.telephonenumber or "0"
if int(val) == trigger_update.value:
return
@@ -159,7 +150,7 @@ def trigger_update(topology):
loop += 1
-def trigger_schema_push(topology):
+def trigger_schema_push(topology_m1c1):
'''
Trigger update to create a replication session.
In case of 47721 is fixed and the replica needs to learn the missing definition, then
@@ -167,111 +158,35 @@ def trigger_schema_push(topology):
push the schema (and the schemaCSN.
This is why there is two updates and replica agreement is stopped/start (to create a second session)
'''
- agreements = topology.master.agreement.list(suffix=SUFFIX, consumer_host=topology.consumer.host, consumer_port=topology.consumer.port)
- assert(len(agreements) == 1)
+ agreements = topology_m1c1.ms["master1"].agreement.list(suffix=SUFFIX,
+ consumer_host=topology_m1c1.cs["consumer1"].host,
+ consumer_port=topology_m1c1.cs["consumer1"].port)
+ assert (len(agreements) == 1)
ra = agreements[0]
- trigger_update(topology)
- topology.master.agreement.pause(ra.dn)
- topology.master.agreement.resume(ra.dn)
- trigger_update(topology)
+ trigger_update(topology_m1c1)
+ topology_m1c1.ms["master1"].agreement.pause(ra.dn)
+ topology_m1c1.ms["master1"].agreement.resume(ra.dn)
+ trigger_update(topology_m1c1)
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to create a replicated topology for the 'module'.
- The replicated topology is MASTER -> Consumer.
- '''
- master = DirSrv(verbose=False)
- consumer = DirSrv(verbose=False)
-
- # Args for the master instance
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_master = args_instance.copy()
- master.allocate(args_master)
-
- # Args for the consumer instance
- args_instance[SER_HOST] = HOST_CONSUMER_1
- args_instance[SER_PORT] = PORT_CONSUMER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
- args_consumer = args_instance.copy()
- consumer.allocate(args_consumer)
-
- # Get the status of the instance
- instance_master = master.exists()
- instance_consumer = consumer.exists()
-
- # Remove all the instances
- if instance_master:
- master.delete()
- if instance_consumer:
- consumer.delete()
-
- # Create the instances
- master.create()
- master.open()
- consumer.create()
- consumer.open()
-
- #
- # Now prepare the Master-Consumer topology
- #
- # First Enable replication
- master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
- consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER)
-
- # Initialize the supplier->consumer
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties)
-
- if not repl_agreement:
- log.fatal("Fail to create a replica agreement")
- sys.exit(1)
-
- log.debug("%s created" % repl_agreement)
- master.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
- master.waitForReplInit(repl_agreement)
-
- # Check replication is working fine
- if master.testReplication(DEFAULT_SUFFIX, consumer):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- def fin():
- master.delete()
- consumer.delete()
- request.addfinalizer(fin)
- #
- # Here we have two instances master and consumer
- # with replication working.
- return TopologyMasterConsumer(master, consumer)
-
-
-def test_ticket47490_init(topology):
+def test_ticket47490_init(topology_m1c1):
"""
Initialize the test environment
"""
- log.debug("test_ticket47490_init topology %r (master %r, consumer %r" % (topology, topology.master, topology.consumer))
+ log.debug("test_ticket47490_init topology_m1c1 %r (master %r, consumer %r" % (
+ topology_m1c1, topology_m1c1.ms["master1"], topology_m1c1.cs["consumer1"]))
# the test case will check if a warning message is logged in the
# error log of the supplier
- topology.master.errorlog_file = open(topology.master.errlog, "r")
+ topology_m1c1.ms["master1"].errorlog_file = open(topology_m1c1.ms["master1"].errlog, "r")
# This entry will be used to trigger attempt of schema push
- topology.master.add_s(Entry((ENTRY_DN, {
- 'objectclass': "top person".split(),
- 'sn': 'test_entry',
- 'cn': 'test_entry'})))
+ topology_m1c1.ms["master1"].add_s(Entry((ENTRY_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': 'test_entry',
+ 'cn': 'test_entry'})))
-def test_ticket47490_one(topology):
+def test_ticket47490_one(topology_m1c1):
"""
Summary: Extra OC Schema is pushed - no error
@@ -285,16 +200,17 @@ def test_ticket47490_one(topology):
- consumer +masterNewOCA
"""
- _header(topology, "Extra OC Schema is pushed - no error")
+ _header(topology_m1c1, "Extra OC Schema is pushed - no error")
- log.debug("test_ticket47490_one topology %r (master %r, consumer %r" % (topology, topology.master, topology.consumer))
+ log.debug("test_ticket47490_one topology_m1c1 %r (master %r, consumer %r" % (
+ topology_m1c1, topology_m1c1.ms["master1"], topology_m1c1.cs["consumer1"]))
# update the schema of the supplier so that it is a superset of
# consumer. Schema should be pushed
- add_OC(topology.master, 2, 'masterNewOCA')
+ add_OC(topology_m1c1.ms["master1"], 2, 'masterNewOCA')
- trigger_schema_push(topology)
- master_schema_csn = topology.master.schema.get_schema_csn()
- consumer_schema_csn = topology.consumer.schema.get_schema_csn()
+ trigger_schema_push(topology_m1c1)
+ master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn()
+ consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn()
# Check the schemaCSN was updated on the consumer
log.debug("test_ticket47490_one master_schema_csn=%s", master_schema_csn)
@@ -303,12 +219,12 @@ def test_ticket47490_one(topology):
# Check the error log of the supplier does not contain an error
regex = re.compile("must not be overwritten \(set replication log for additional info\)")
- res = pattern_errorlog(topology.master.errorlog_file, regex)
+ res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex)
if res is not None:
assert False
-def test_ticket47490_two(topology):
+def test_ticket47490_two(topology_m1c1):
"""
Summary: Extra OC Schema is pushed - (ticket 47721 allows to learn missing def)
@@ -323,25 +239,25 @@ def test_ticket47490_two(topology):
"""
- _header(topology, "Extra OC Schema is pushed - (ticket 47721 allows to learn missing def)")
+ _header(topology_m1c1, "Extra OC Schema is pushed - (ticket 47721 allows to learn missing def)")
# add this OC on consumer. Supplier will no push the schema
- add_OC(topology.consumer, 1, 'consumerNewOCA')
+ add_OC(topology_m1c1.cs["consumer1"], 1, 'consumerNewOCA')
# add a new OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s)
time.sleep(2)
- add_OC(topology.master, 3, 'masterNewOCB')
+ add_OC(topology_m1c1.ms["master1"], 3, 'masterNewOCB')
# now push the scheam
- trigger_schema_push(topology)
- master_schema_csn = topology.master.schema.get_schema_csn()
- consumer_schema_csn = topology.consumer.schema.get_schema_csn()
+ trigger_schema_push(topology_m1c1)
+ master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn()
+ consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn()
# Check the schemaCSN was NOT updated on the consumer
# with 47721, supplier learns the missing definition
log.debug("test_ticket47490_two master_schema_csn=%s", master_schema_csn)
log.debug("test_ticket47490_two consumer_schema_csn=%s", consumer_schema_csn)
- if support_schema_learning(topology):
+ if support_schema_learning(topology_m1c1):
assert master_schema_csn == consumer_schema_csn
else:
assert master_schema_csn != consumer_schema_csn
@@ -349,10 +265,10 @@ def test_ticket47490_two(topology):
# Check the error log of the supplier does not contain an error
# This message may happen during the learning phase
regex = re.compile("must not be overwritten \(set replication log for additional info\)")
- res = pattern_errorlog(topology.master.errorlog_file, regex)
+ res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex)
-def test_ticket47490_three(topology):
+def test_ticket47490_three(topology_m1c1):
"""
Summary: Extra OC Schema is pushed - no error
@@ -366,16 +282,16 @@ def test_ticket47490_three(topology):
- consumer +masterNewOCA +masterNewOCB +consumerNewOCA
"""
- _header(topology, "Extra OC Schema is pushed - no error")
+ _header(topology_m1c1, "Extra OC Schema is pushed - no error")
# Do an upate to trigger the schema push attempt
# add this OC on consumer. Supplier will no push the schema
- add_OC(topology.master, 1, 'consumerNewOCA')
+ add_OC(topology_m1c1.ms["master1"], 1, 'consumerNewOCA')
# now push the scheam
- trigger_schema_push(topology)
- master_schema_csn = topology.master.schema.get_schema_csn()
- consumer_schema_csn = topology.consumer.schema.get_schema_csn()
+ trigger_schema_push(topology_m1c1)
+ master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn()
+ consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn()
# Check the schemaCSN was NOT updated on the consumer
log.debug("test_ticket47490_three master_schema_csn=%s", master_schema_csn)
@@ -384,12 +300,12 @@ def test_ticket47490_three(topology):
# Check the error log of the supplier does not contain an error
regex = re.compile("must not be overwritten \(set replication log for additional info\)")
- res = pattern_errorlog(topology.master.errorlog_file, regex)
+ res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex)
if res is not None:
assert False
-def test_ticket47490_four(topology):
+def test_ticket47490_four(topology_m1c1):
"""
Summary: Same OC - extra MUST: Schema is pushed - no error
@@ -405,13 +321,14 @@ def test_ticket47490_four(topology):
+must=telexnumber
"""
- _header(topology, "Same OC - extra MUST: Schema is pushed - no error")
+ _header(topology_m1c1, "Same OC - extra MUST: Schema is pushed - no error")
- mod_OC(topology.master, 2, 'masterNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_OLD)
+ mod_OC(topology_m1c1.ms["master1"], 2, 'masterNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD,
+ new_may=MAY_OLD)
- trigger_schema_push(topology)
- master_schema_csn = topology.master.schema.get_schema_csn()
- consumer_schema_csn = topology.consumer.schema.get_schema_csn()
+ trigger_schema_push(topology_m1c1)
+ master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn()
+ consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn()
# Check the schemaCSN was updated on the consumer
log.debug("test_ticket47490_four master_schema_csn=%s", master_schema_csn)
@@ -420,12 +337,12 @@ def test_ticket47490_four(topology):
# Check the error log of the supplier does not contain an error
regex = re.compile("must not be overwritten \(set replication log for additional info\)")
- res = pattern_errorlog(topology.master.errorlog_file, regex)
+ res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex)
if res is not None:
assert False
-def test_ticket47490_five(topology):
+def test_ticket47490_five(topology_m1c1):
"""
Summary: Same OC - extra MUST: Schema is pushed - (fix for 47721)
@@ -444,26 +361,27 @@ def test_ticket47490_five(topology):
Note: replication log is enabled to get more details
"""
- _header(topology, "Same OC - extra MUST: Schema is pushed - (fix for 47721)")
+ _header(topology_m1c1, "Same OC - extra MUST: Schema is pushed - (fix for 47721)")
# get more detail why it fails
- topology.master.enableReplLogging()
+ topology_m1c1.ms["master1"].enableReplLogging()
# add telenumber to 'consumerNewOCA' on the consumer
- mod_OC(topology.consumer, 1, 'consumerNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_OLD)
+ mod_OC(topology_m1c1.cs["consumer1"], 1, 'consumerNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD,
+ new_may=MAY_OLD)
# add a new OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s)
time.sleep(2)
- add_OC(topology.master, 4, 'masterNewOCC')
+ add_OC(topology_m1c1.ms["master1"], 4, 'masterNewOCC')
- trigger_schema_push(topology)
- master_schema_csn = topology.master.schema.get_schema_csn()
- consumer_schema_csn = topology.consumer.schema.get_schema_csn()
+ trigger_schema_push(topology_m1c1)
+ master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn()
+ consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn()
# Check the schemaCSN was NOT updated on the consumer
# with 47721, supplier learns the missing definition
log.debug("test_ticket47490_five master_schema_csn=%s", master_schema_csn)
log.debug("ctest_ticket47490_five onsumer_schema_csn=%s", consumer_schema_csn)
- if support_schema_learning(topology):
+ if support_schema_learning(topology_m1c1):
assert master_schema_csn == consumer_schema_csn
else:
assert master_schema_csn != consumer_schema_csn
@@ -471,10 +389,10 @@ def test_ticket47490_five(topology):
# Check the error log of the supplier does not contain an error
# This message may happen during the learning phase
regex = re.compile("must not be overwritten \(set replication log for additional info\)")
- res = pattern_errorlog(topology.master.errorlog_file, regex)
+ res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex)
-def test_ticket47490_six(topology):
+def test_ticket47490_six(topology_m1c1):
"""
Summary: Same OC - extra MUST: Schema is pushed - no error
@@ -494,14 +412,15 @@ def test_ticket47490_six(topology):
Note: replication log is enabled to get more details
"""
- _header(topology, "Same OC - extra MUST: Schema is pushed - no error")
+ _header(topology_m1c1, "Same OC - extra MUST: Schema is pushed - no error")
# add telenumber to 'consumerNewOCA' on the consumer
- mod_OC(topology.master, 1, 'consumerNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_OLD)
+ mod_OC(topology_m1c1.ms["master1"], 1, 'consumerNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD,
+ new_may=MAY_OLD)
- trigger_schema_push(topology)
- master_schema_csn = topology.master.schema.get_schema_csn()
- consumer_schema_csn = topology.consumer.schema.get_schema_csn()
+ trigger_schema_push(topology_m1c1)
+ master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn()
+ consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn()
# Check the schemaCSN was NOT updated on the consumer
log.debug("test_ticket47490_six master_schema_csn=%s", master_schema_csn)
@@ -511,12 +430,12 @@ def test_ticket47490_six(topology):
# Check the error log of the supplier does not contain an error
# This message may happen during the learning phase
regex = re.compile("must not be overwritten \(set replication log for additional info\)")
- res = pattern_errorlog(topology.master.errorlog_file, regex)
+ res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex)
if res is not None:
assert False
-def test_ticket47490_seven(topology):
+def test_ticket47490_seven(topology_m1c1):
"""
Summary: Same OC - extra MAY: Schema is pushed - no error
@@ -535,13 +454,14 @@ def test_ticket47490_seven(topology):
+must=telexnumber +must=telexnumber
+may=postOfficeBox
"""
- _header(topology, "Same OC - extra MAY: Schema is pushed - no error")
+ _header(topology_m1c1, "Same OC - extra MAY: Schema is pushed - no error")
- mod_OC(topology.master, 2, 'masterNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW)
+ mod_OC(topology_m1c1.ms["master1"], 2, 'masterNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD,
+ new_may=MAY_NEW)
- trigger_schema_push(topology)
- master_schema_csn = topology.master.schema.get_schema_csn()
- consumer_schema_csn = topology.consumer.schema.get_schema_csn()
+ trigger_schema_push(topology_m1c1)
+ master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn()
+ consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn()
# Check the schemaCSN was updated on the consumer
log.debug("test_ticket47490_seven master_schema_csn=%s", master_schema_csn)
@@ -550,12 +470,12 @@ def test_ticket47490_seven(topology):
# Check the error log of the supplier does not contain an error
regex = re.compile("must not be overwritten \(set replication log for additional info\)")
- res = pattern_errorlog(topology.master.errorlog_file, regex)
+ res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex)
if res is not None:
assert False
-def test_ticket47490_eight(topology):
+def test_ticket47490_eight(topology_m1c1):
"""
Summary: Same OC - extra MAY: Schema is pushed (fix for 47721)
@@ -576,23 +496,25 @@ def test_ticket47490_eight(topology):
+must=telexnumber +must=telexnumber
+may=postOfficeBox +may=postOfficeBox
"""
- _header(topology, "Same OC - extra MAY: Schema is pushed (fix for 47721)")
+ _header(topology_m1c1, "Same OC - extra MAY: Schema is pushed (fix for 47721)")
- mod_OC(topology.consumer, 1, 'consumerNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW)
+ mod_OC(topology_m1c1.cs["consumer1"], 1, 'consumerNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD,
+ new_may=MAY_NEW)
# modify OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s)
time.sleep(2)
- mod_OC(topology.master, 4, 'masterNewOCC', old_must=MUST_OLD, new_must=MUST_OLD, old_may=MAY_OLD, new_may=MAY_NEW)
+ mod_OC(topology_m1c1.ms["master1"], 4, 'masterNewOCC', old_must=MUST_OLD, new_must=MUST_OLD, old_may=MAY_OLD,
+ new_may=MAY_NEW)
- trigger_schema_push(topology)
- master_schema_csn = topology.master.schema.get_schema_csn()
- consumer_schema_csn = topology.consumer.schema.get_schema_csn()
+ trigger_schema_push(topology_m1c1)
+ master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn()
+ consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn()
# Check the schemaCSN was not updated on the consumer
# with 47721, supplier learns the missing definition
log.debug("test_ticket47490_eight master_schema_csn=%s", master_schema_csn)
log.debug("ctest_ticket47490_eight onsumer_schema_csn=%s", consumer_schema_csn)
- if support_schema_learning(topology):
+ if support_schema_learning(topology_m1c1):
assert master_schema_csn == consumer_schema_csn
else:
assert master_schema_csn != consumer_schema_csn
@@ -600,10 +522,10 @@ def test_ticket47490_eight(topology):
# Check the error log of the supplier does not contain an error
# This message may happen during the learning phase
regex = re.compile("must not be overwritten \(set replication log for additional info\)")
- res = pattern_errorlog(topology.master.errorlog_file, regex)
+ res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex)
-def test_ticket47490_nine(topology):
+def test_ticket47490_nine(topology_m1c1):
"""
Summary: Same OC - extra MAY: Schema is pushed - no error
@@ -626,13 +548,14 @@ def test_ticket47490_nine(topology):
+must=telexnumber +must=telexnumber
+may=postOfficeBox +may=postOfficeBox +may=postOfficeBox
"""
- _header(topology, "Same OC - extra MAY: Schema is pushed - no error")
+ _header(topology_m1c1, "Same OC - extra MAY: Schema is pushed - no error")
- mod_OC(topology.master, 1, 'consumerNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW)
+ mod_OC(topology_m1c1.ms["master1"], 1, 'consumerNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD,
+ new_may=MAY_NEW)
- trigger_schema_push(topology)
- master_schema_csn = topology.master.schema.get_schema_csn()
- consumer_schema_csn = topology.consumer.schema.get_schema_csn()
+ trigger_schema_push(topology_m1c1)
+ master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn()
+ consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn()
# Check the schemaCSN was updated on the consumer
log.debug("test_ticket47490_nine master_schema_csn=%s", master_schema_csn)
@@ -641,7 +564,7 @@ def test_ticket47490_nine(topology):
# Check the error log of the supplier does not contain an error
regex = re.compile("must not be overwritten \(set replication log for additional info\)")
- res = pattern_errorlog(topology.master.errorlog_file, regex)
+ res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex)
if res is not None:
assert False
@@ -653,4 +576,3 @@ if __name__ == '__main__':
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
diff --git a/dirsrvtests/tests/tickets/ticket47536_test.py b/dirsrvtests/tests/tickets/ticket47536_test.py
index cf20746..e287e2e 100644
--- a/dirsrvtests/tests/tickets/ticket47536_test.py
+++ b/dirsrvtests/tests/tickets/ticket47536_test.py
@@ -6,21 +6,12 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import shlex
-import subprocess
-import ldap
-import logging
-import pytest
import base64
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
+
+import pytest
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_m2
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
@@ -35,108 +26,8 @@ M1SERVERCERT = 'Server-Cert1'
M2SERVERCERT = 'Server-Cert2'
M1LDAPSPORT = '41636'
M2LDAPSPORT = '42636'
-
-
-class TopologyReplication(object):
- def __init__(self, master1, master2):
- master1.open()
- self.master1 = master1
- master2.open()
- self.master2 = master2
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- # Creating master 1...
- master1 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master1.allocate(args_master)
- instance_master1 = master1.exists()
- if instance_master1:
- master1.delete()
- master1.create()
- master1.open()
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-
- # Creating master 2...
- master2 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master2.allocate(args_master)
- instance_master2 = master2.exists()
- if instance_master2:
- master2.delete()
- master2.create()
- master2.open()
- master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
- # Delete each instance in the end
- def fin():
- master1.delete()
- master2.delete()
- request.addfinalizer(fin)
-
- #
- # Create all the agreements
- #
- # Creating agreement from master 1 to master 2
- properties = {RA_NAME: r'meTo_%s:%s' % (master2.host, master2.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- global m1_m2_agmt
- m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
- if not m1_m2_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m1_m2_agmt)
-
- # Creating agreement from master 2 to master 1
- properties = {RA_NAME: r'meTo_%s:%s' % (master1.host, master1.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- global m2_m1_agmt
- m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
- if not m2_m1_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m2_m1_agmt)
-
- # Allow the replicas to get situated with the new agreements...
- time.sleep(2)
-
- global M1SUBJECT
- M1SUBJECT = 'CN=%s,OU=389 Directory Server' % (master1.host)
- global M2SUBJECT
- M2SUBJECT = 'CN=%s,OU=390 Directory Server' % (master2.host)
-
- #
- # Initialize all the agreements
- #
- master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- master1.waitForReplInit(m1_m2_agmt)
-
- # Check replication is working...
- if master1.testReplication(DEFAULT_SUFFIX, master2):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- return TopologyReplication(master1, master2)
-
-
-(a)pytest.fixture(scope="module")
+M1SUBJECT = 'CN={},OU=389 Directory Server'.format(HOST_MASTER_1)
+M2SUBJECT = 'CN={},OU=390 Directory Server'.format(HOST_MASTER_2)
def add_entry(server, name, rdntmpl, start, num):
@@ -232,16 +123,16 @@ def doAndPrintIt(cmdline):
assert False
-def create_keys_certs(topology):
+def create_keys_certs(topology_m2):
log.info("\n######################### Creating SSL Keys and Certs ######################\n")
global m1confdir
- m1confdir = topology.master1.confdir
+ m1confdir = topology_m2.ms["master1"].confdir
global m2confdir
- m2confdir = topology.master2.confdir
+ m2confdir = topology_m2.ms["master2"].confdir
log.info("##### shutdown master1")
- topology.master1.stop(timeout=10)
+ topology_m2.ms["master1"].stop(timeout=10)
log.info("##### Creating a password file")
pwdfile = '%s/pwdfile.txt' % (m1confdir)
@@ -275,51 +166,55 @@ def create_keys_certs(topology):
log.info("##### Create key3.db and cert8.db database (master1): %s" % cmdline)
doAndPrintIt(cmdline)
- cmdline = ['certutil', '-G', '-d', m1confdir, '-z', noisefile, '-f', pwdfile]
+ cmdline = ['certutil', '-G', '-d', m1confdir, '-z', noisefile, '-f', pwdfile]
log.info("##### Creating encryption key for CA (master1): %s" % cmdline)
- #os.system('certutil -G -d %s -z %s -f %s' % (m1confdir, noisefile, pwdfile))
+ # os.system('certutil -G -d %s -z %s -f %s' % (m1confdir, noisefile, pwdfile))
doAndPrintIt(cmdline)
time.sleep(2)
log.info("##### Creating self-signed CA certificate (master1) -- nickname %s" % CACERT)
- os.system('( echo y ; echo ; echo y ) | certutil -S -n "%s" -s "%s" -x -t "CT,," -m 1000 -v 120 -d %s -z %s -f %s -2' % (CACERT, ISSUER, m1confdir, noisefile, pwdfile))
+ os.system(
+ '( echo y ; echo ; echo y ) | certutil -S -n "%s" -s "%s" -x -t "CT,," -m 1000 -v 120 -d %s -z %s -f %s -2' % (
+ CACERT, ISSUER, m1confdir, noisefile, pwdfile))
global M1SUBJECT
- cmdline = ['certutil', '-S', '-n', M1SERVERCERT, '-s', M1SUBJECT, '-c', CACERT, '-t', ',,', '-m', '1001', '-v', '120', '-d', m1confdir, '-z', noisefile, '-f', pwdfile]
+ cmdline = ['certutil', '-S', '-n', M1SERVERCERT, '-s', M1SUBJECT, '-c', CACERT, '-t', ',,', '-m', '1001', '-v',
+ '120', '-d', m1confdir, '-z', noisefile, '-f', pwdfile]
log.info("##### Creating Server certificate -- nickname %s: %s" % (M1SERVERCERT, cmdline))
doAndPrintIt(cmdline)
time.sleep(2)
global M2SUBJECT
- cmdline = ['certutil', '-S', '-n', M2SERVERCERT, '-s', M2SUBJECT, '-c', CACERT, '-t', ',,', '-m', '1002', '-v', '120', '-d', m1confdir, '-z', noisefile, '-f', pwdfile]
+ cmdline = ['certutil', '-S', '-n', M2SERVERCERT, '-s', M2SUBJECT, '-c', CACERT, '-t', ',,', '-m', '1002', '-v',
+ '120', '-d', m1confdir, '-z', noisefile, '-f', pwdfile]
log.info("##### Creating Server certificate -- nickname %s: %s" % (M2SERVERCERT, cmdline))
doAndPrintIt(cmdline)
time.sleep(2)
log.info("##### start master1")
- topology.master1.start(timeout=10)
+ topology_m2.ms["master1"].start(timeout=10)
log.info("##### enable SSL in master1 with all ciphers")
- enable_ssl(topology.master1, M1LDAPSPORT, M1SERVERCERT)
+ enable_ssl(topology_m2.ms["master1"], M1LDAPSPORT, M1SERVERCERT)
cmdline = ['certutil', '-L', '-d', m1confdir]
log.info("##### Check the cert db: %s" % cmdline)
doAndPrintIt(cmdline)
log.info("##### restart master1")
- topology.master1.restart(timeout=10)
+ topology_m2.ms["master1"].restart(timeout=10)
log.info("##### Check PEM files of master1 (before setting nsslapd-extract-pemfiles")
check_pems(m1confdir, CACERT, M1SERVERCERT, M1SERVERCERT + '-Key', " not")
log.info("##### Set on to nsslapd-extract-pemfiles")
- topology.master1.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-extract-pemfiles', 'on')])
+ topology_m2.ms["master1"].modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-extract-pemfiles', 'on')])
log.info("##### restart master1")
- topology.master1.restart(timeout=10)
+ topology_m2.ms["master1"].restart(timeout=10)
log.info("##### Check PEM files of master1 (after setting nsslapd-extract-pemfiles")
check_pems(m1confdir, CACERT, M1SERVERCERT, M1SERVERCERT + '-Key', "")
@@ -339,7 +234,7 @@ def create_keys_certs(topology):
assert False
log.info("##### stop master2")
- topology.master2.stop(timeout=10)
+ topology_m2.ms["master2"].stop(timeout=10)
log.info("##### Initialize Cert DB for master2")
cmdline = ['certutil', '-N', '-d', m2confdir, '-f', pwdfile]
@@ -358,41 +253,40 @@ def create_keys_certs(topology):
os.system('chmod 400 %s' % m2pinfile)
log.info("##### start master2")
- topology.master2.start(timeout=10)
+ topology_m2.ms["master2"].start(timeout=10)
log.info("##### enable SSL in master2 with all ciphers")
- enable_ssl(topology.master2, M2LDAPSPORT, M2SERVERCERT)
+ enable_ssl(topology_m2.ms["master2"], M2LDAPSPORT, M2SERVERCERT)
log.info("##### restart master2")
- topology.master2.restart(timeout=10)
+ topology_m2.ms["master2"].restart(timeout=10)
log.info("##### Check PEM files of master2 (before setting nsslapd-extract-pemfiles")
check_pems(m2confdir, CACERT, M2SERVERCERT, M2SERVERCERT + '-Key', " not")
log.info("##### Set on to nsslapd-extract-pemfiles")
- topology.master2.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-extract-pemfiles', 'on')])
+ topology_m2.ms["master2"].modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-extract-pemfiles', 'on')])
log.info("##### restart master2")
- topology.master2.restart(timeout=10)
+ topology_m2.ms["master2"].restart(timeout=10)
log.info("##### Check PEM files of master2 (after setting nsslapd-extract-pemfiles")
check_pems(m2confdir, CACERT, M2SERVERCERT, M2SERVERCERT + '-Key', "")
log.info("##### restart master1")
- topology.master1.restart(timeout=10)
-
+ topology_m2.ms["master1"].restart(timeout=10)
log.info("\n######################### Creating SSL Keys and Certs Done ######################\n")
-def config_tls_agreements(topology):
+def config_tls_agreements(topology_m2):
log.info("######################### Configure SSL/TLS agreements ######################")
log.info("######################## master1 -- startTLS -> master2 #####################")
log.info("##################### master1 <- tls_clientAuth -- master2 ##################")
log.info("##### Update the agreement of master1")
- global m1_m2_agmt
- topology.master1.modify_s(m1_m2_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', 'TLS')])
+ m1_m2_agmt = topology_m2.ms["master1_agmts"]["m1_m2"]
+ topology_m2.ms["master1"].modify_s(m1_m2_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', 'TLS')])
log.info("##### Add the cert to the repl manager on master1")
global mytmp
@@ -402,8 +296,8 @@ def config_tls_agreements(topology):
m2servercertstr = ''
for l in m2sc.readlines():
if ((l == "") or l.startswith('This file is auto-generated') or
- l.startswith('Do not edit') or l.startswith('Issuer:') or
- l.startswith('Subject:') or l.startswith('-----')):
+ l.startswith('Do not edit') or l.startswith('Issuer:') or
+ l.startswith('Subject:') or l.startswith('-----')):
continue
m2servercertstr = "%s%s" % (m2servercertstr, l.rstrip())
m2sc.close()
@@ -411,17 +305,18 @@ def config_tls_agreements(topology):
log.info('##### master2 Server Cert in base64 format: %s' % m2servercertstr)
replmgr = defaultProperties[REPLICATION_BIND_DN]
- rentry = topology.master1.search_s(replmgr, ldap.SCOPE_BASE, 'objectclass=*')
+ rentry = topology_m2.ms["master1"].search_s(replmgr, ldap.SCOPE_BASE, 'objectclass=*')
log.info('##### Replication manager on master1: %s' % replmgr)
oc = 'ObjectClass'
log.info(' %s:' % oc)
if rentry:
for val in rentry[0].getValues(oc):
log.info(' : %s' % val)
- topology.master1.modify_s(replmgr, [(ldap.MOD_ADD, oc, 'extensibleObject')])
+ topology_m2.ms["master1"].modify_s(replmgr, [(ldap.MOD_ADD, oc, 'extensibleObject')])
global M2SUBJECT
- topology.master1.modify_s(replmgr, [(ldap.MOD_ADD, 'userCertificate;binary', base64.b64decode(m2servercertstr)),
+ topology_m2.ms["master1"].modify_s(replmgr,
+ [(ldap.MOD_ADD, 'userCertificate;binary', base64.b64decode(m2servercertstr)),
(ldap.MOD_ADD, 'description', M2SUBJECT)])
log.info("##### Modify the certmap.conf on master1")
@@ -437,32 +332,32 @@ def config_tls_agreements(topology):
os.system('chmod 440 %s' % m1certmap)
log.info("##### Update the agreement of master2")
- global m2_m1_agmt
- topology.master2.modify_s(m2_m1_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', 'TLS'),
- (ldap.MOD_REPLACE, 'nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH')])
+ m2_m1_agmt = topology_m2.ms["master2_agmts"]["m2_m1"]
+ topology_m2.ms["master2"].modify_s(m2_m1_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', 'TLS'),
+ (ldap.MOD_REPLACE, 'nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH')])
- topology.master1.stop(10)
- topology.master2.stop(10)
- topology.master1.start(10)
- topology.master2.start(10)
+ topology_m2.ms["master1"].stop(10)
+ topology_m2.ms["master2"].stop(10)
+ topology_m2.ms["master1"].start(10)
+ topology_m2.ms["master2"].start(10)
log.info("\n######################### Configure SSL/TLS agreements Done ######################\n")
-def relocate_pem_files(topology):
+def relocate_pem_files(topology_m2):
log.info("######################### Relocate PEM files on master1 ######################")
mycacert = 'MyCA'
- topology.master1.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'CACertExtractFile', mycacert)])
+ topology_m2.ms["master1"].modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'CACertExtractFile', mycacert)])
myservercert = 'MyServerCert1'
myserverkey = 'MyServerKey1'
- topology.master1.modify_s(RSA_DN, [(ldap.MOD_REPLACE, 'ServerCertExtractFile', myservercert),
- (ldap.MOD_REPLACE, 'ServerKeyExtractFile', myserverkey)])
+ topology_m2.ms["master1"].modify_s(RSA_DN, [(ldap.MOD_REPLACE, 'ServerCertExtractFile', myservercert),
+ (ldap.MOD_REPLACE, 'ServerKeyExtractFile', myserverkey)])
log.info("##### restart master1")
- topology.master1.restart(timeout=10)
+ topology_m2.ms["master1"].restart(timeout=10)
check_pems(m1confdir, mycacert, myservercert, myserverkey, "")
-def test_ticket47536(topology):
+def test_ticket47536(topology_m2):
"""
Set up 2way MMR:
master_1 ----- startTLS -----> master_2
@@ -477,44 +372,45 @@ def test_ticket47536(topology):
"""
log.info("Ticket 47536 - Allow usage of OpenLDAP libraries that don't use NSS for crypto")
- create_keys_certs(topology)
- config_tls_agreements(topology)
+ create_keys_certs(topology_m2)
+ config_tls_agreements(topology_m2)
- add_entry(topology.master1, 'master1', 'uid=m1user', 0, 5)
- add_entry(topology.master2, 'master2', 'uid=m2user', 0, 5)
+ add_entry(topology_m2.ms["master1"], 'master1', 'uid=m1user', 0, 5)
+ add_entry(topology_m2.ms["master2"], 'master2', 'uid=m2user', 0, 5)
time.sleep(1)
log.info('##### Searching for entries on master1...')
- entries = topology.master1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
+ entries = topology_m2.ms["master1"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
assert 10 == len(entries)
log.info('##### Searching for entries on master2...')
- entries = topology.master2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
+ entries = topology_m2.ms["master2"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
assert 10 == len(entries)
- relocate_pem_files(topology)
+ relocate_pem_files(topology_m2)
- add_entry(topology.master1, 'master1', 'uid=m1user', 10, 5)
- add_entry(topology.master2, 'master2', 'uid=m2user', 10, 5)
+ add_entry(topology_m2.ms["master1"], 'master1', 'uid=m1user', 10, 5)
+ add_entry(topology_m2.ms["master2"], 'master2', 'uid=m2user', 10, 5)
time.sleep(10)
log.info('##### Searching for entries on master1...')
- entries = topology.master1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
+ entries = topology_m2.ms["master1"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
assert 20 == len(entries)
log.info('##### Searching for entries on master2...')
- entries = topology.master2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
+ entries = topology_m2.ms["master2"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
assert 20 == len(entries)
- db2ldifpl = '%s/sbin/db2ldif.pl' % topology.master1.prefix
+ db2ldifpl = '%s/sbin/db2ldif.pl' % topology_m2.ms["master1"].prefix
cmdline = [db2ldifpl, '-n', 'userRoot', '-Z', SERVERID_MASTER_1, '-D', DN_DM, '-w', PASSWORD]
log.info("##### db2ldif.pl -- %s" % (cmdline))
doAndPrintIt(cmdline)
log.info("Ticket 47536 - PASSED")
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/dirsrvtests/tests/tickets/ticket47553_test.py b/dirsrvtests/tests/tickets/ticket47553_test.py
index 01cd08c..08454d4 100644
--- a/dirsrvtests/tests/tickets/ticket47553_test.py
+++ b/dirsrvtests/tests/tickets/ticket47553_test.py
@@ -6,24 +6,14 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
CONTAINER_1_OU = 'test_ou_1'
CONTAINER_2_OU = 'test_ou_2'
CONTAINER_1 = 'ou=%s,dc=example,dc=com' % CONTAINER_1_OU
@@ -33,69 +23,32 @@ USER_PWD = 'Secret123'
USER = 'cn=%s,%s' % (USER_CN, CONTAINER_1)
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-
@pytest.fixture(scope="module")
-def env_setup(topology):
+def env_setup(topology_st):
"""Adds two containers, one user and two ACI rules"""
try:
log.info("Add a container: %s" % CONTAINER_1)
- topology.standalone.add_s(Entry((CONTAINER_1,
- {'objectclass': 'top',
- 'objectclass': 'organizationalunit',
- 'ou': CONTAINER_1_OU,
- })))
+ topology_st.standalone.add_s(Entry((CONTAINER_1,
+ {'objectclass': 'top',
+ 'objectclass': 'organizationalunit',
+ 'ou': CONTAINER_1_OU,
+ })))
log.info("Add a container: %s" % CONTAINER_2)
- topology.standalone.add_s(Entry((CONTAINER_2,
- {'objectclass': 'top',
- 'objectclass': 'organizationalunit',
- 'ou': CONTAINER_2_OU,
- })))
+ topology_st.standalone.add_s(Entry((CONTAINER_2,
+ {'objectclass': 'top',
+ 'objectclass': 'organizationalunit',
+ 'ou': CONTAINER_2_OU,
+ })))
log.info("Add a user: %s" % USER)
- topology.standalone.add_s(Entry((USER,
- {'objectclass': 'top person'.split(),
- 'cn': USER_CN,
- 'sn': USER_CN,
- 'userpassword': USER_PWD
- })))
+ topology_st.standalone.add_s(Entry((USER,
+ {'objectclass': 'top person'.split(),
+ 'cn': USER_CN,
+ 'sn': USER_CN,
+ 'userpassword': USER_PWD
+ })))
except ldap.LDAPError as e:
log.error('Failed to add object to database: %s' % e.message['desc'])
assert False
@@ -109,17 +62,17 @@ def env_setup(topology):
try:
log.info("Add an ACI 'allow (all)' by %s to the %s" % (USER,
CONTAINER_1))
- topology.standalone.modify_s(CONTAINER_1, mod)
+ topology_st.standalone.modify_s(CONTAINER_1, mod)
log.info("Add an ACI 'allow (all)' by %s to the %s" % (USER,
CONTAINER_2))
- topology.standalone.modify_s(CONTAINER_2, mod)
+ topology_st.standalone.modify_s(CONTAINER_2, mod)
except ldap.LDAPError as e:
log.fatal('Failed to add ACI: error (%s)' % (e.message['desc']))
assert False
-def test_ticket47553(topology, env_setup):
+def test_ticket47553(topology_st, env_setup):
"""Tests, that MODRDN operation is allowed,
if user has ACI right '(all)' under superior entries,
but doesn't have '(modrdn)'
@@ -127,7 +80,7 @@ def test_ticket47553(topology, env_setup):
log.info("Bind as %s" % USER)
try:
- topology.standalone.simple_bind_s(USER, USER_PWD)
+ topology_st.standalone.simple_bind_s(USER, USER_PWD)
except ldap.LDAPError as e:
log.error('Bind failed for %s, error %s' % (USER, e.message['desc']))
assert False
@@ -135,23 +88,23 @@ def test_ticket47553(topology, env_setup):
log.info("User MODRDN operation from %s to %s" % (CONTAINER_1,
CONTAINER_2))
try:
- topology.standalone.rename_s(USER, "cn=%s" % USER_CN,
- newsuperior=CONTAINER_2, delold=1)
+ topology_st.standalone.rename_s(USER, "cn=%s" % USER_CN,
+ newsuperior=CONTAINER_2, delold=1)
except ldap.LDAPError as e:
log.error('MODRDN failed for %s, error %s' % (USER, e.message['desc']))
assert False
try:
log.info("Check there is no user in %s" % CONTAINER_1)
- entries = topology.standalone.search_s(CONTAINER_1,
- ldap.SCOPE_ONELEVEL,
- 'cn=%s' % USER_CN)
+ entries = topology_st.standalone.search_s(CONTAINER_1,
+ ldap.SCOPE_ONELEVEL,
+ 'cn=%s' % USER_CN)
assert not entries
log.info("Check there is our user in %s" % CONTAINER_2)
- entries = topology.standalone.search_s(CONTAINER_2,
- ldap.SCOPE_ONELEVEL,
- 'cn=%s' % USER_CN)
+ entries = topology_st.standalone.search_s(CONTAINER_2,
+ ldap.SCOPE_ONELEVEL,
+ 'cn=%s' % USER_CN)
assert entries
except ldap.LDAPError as e:
log.fatal('Search failed, error: ' + e.message['desc'])
diff --git a/dirsrvtests/tests/tickets/ticket47560_test.py b/dirsrvtests/tests/tickets/ticket47560_test.py
index f52926f..175b712 100644
--- a/dirsrvtests/tests/tickets/ticket47560_test.py
+++ b/dirsrvtests/tests/tickets/ticket47560_test.py
@@ -6,62 +6,20 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
+import logging
import time
+
import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
from lib389.properties import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def test_ticket47560(topology):
+def test_ticket47560(topology_st):
"""
This test case does the following:
SETUP
@@ -90,19 +48,19 @@ def test_ticket47560(topology):
"""
# enable/disable the mbo plugin
if value == 'on':
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
else:
- topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
log.debug("-------------> _enable_disable_mbo(%s)" % value)
- topology.standalone.stop(timeout=120)
+ topology_st.standalone.stop(timeout=120)
time.sleep(1)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
time.sleep(3)
# need to reopen a connection toward the instance
- topology.standalone.open()
+ topology_st.standalone.open()
def _test_ticket47560_setup():
"""
@@ -123,7 +81,7 @@ def test_ticket47560(topology):
entry.setValues('objectclass', 'top', 'groupOfNames', 'inetUser')
entry.setValues('cn', 'group')
try:
- topology.standalone.add_s(entry)
+ topology_st.standalone.add_s(entry)
except ldap.ALREADY_EXISTS:
log.debug("Entry %s already exists" % (group_DN))
@@ -133,12 +91,12 @@ def test_ticket47560(topology):
entry.setValues('cn', 'member')
entry.setValues('sn', 'member')
try:
- topology.standalone.add_s(entry)
+ topology_st.standalone.add_s(entry)
except ldap.ALREADY_EXISTS:
log.debug("Entry %s already exists" % (member_DN))
replace = [(ldap.MOD_REPLACE, 'memberof', group_DN)]
- topology.standalone.modify_s(member_DN, replace)
+ topology_st.standalone.modify_s(member_DN, replace)
#
# enable the memberof plugin and restart the instance
@@ -149,12 +107,12 @@ def test_ticket47560(topology):
# check memberof attribute is still present
#
filt = 'uid=member'
- ents = topology.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt)
+ ents = topology_st.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt)
assert len(ents) == 1
ent = ents[0]
- #print ent
+ # print ent
value = ent.getValue('memberof')
- #print "memberof: %s" % (value)
+ # print "memberof: %s" % (value)
assert value == group_DN
def _test_ticket47560_teardown():
@@ -166,11 +124,11 @@ def test_ticket47560(topology):
log.debug("-------- > _test_ticket47560_teardown\n")
# remove the entries group_DN and member_DN
try:
- topology.standalone.delete_s(group_DN)
+ topology_st.standalone.delete_s(group_DN)
except:
log.warning("Entry %s fail to delete" % (group_DN))
try:
- topology.standalone.delete_s(member_DN)
+ topology_st.standalone.delete_s(member_DN)
except:
log.warning("Entry %s fail to delete" % (member_DN))
#
@@ -178,7 +136,7 @@ def test_ticket47560(topology):
#
_enable_disable_mbo('off')
- group_DN = "cn=group,%s" % (SUFFIX)
+ group_DN = "cn=group,%s" % (SUFFIX)
member_DN = "uid=member,%s" % (SUFFIX)
#
@@ -194,15 +152,15 @@ def test_ticket47560(topology):
log.debug("-------- > Start ticket tests\n")
filt = 'uid=member'
- ents = topology.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt)
+ ents = topology_st.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt)
assert len(ents) == 1
ent = ents[0]
log.debug("Unfixed entry %r\n" % ent)
# run the fixup task
- topology.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: True})
+ topology_st.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: True})
- ents = topology.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt)
+ ents = topology_st.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt)
assert len(ents) == 1
ent = ents[0]
log.debug("Fixed entry %r\n" % ent)
@@ -228,4 +186,3 @@ if __name__ == '__main__':
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
diff --git a/dirsrvtests/tests/tickets/ticket47573_test.py b/dirsrvtests/tests/tickets/ticket47573_test.py
index e7e9641..1b443ef 100644
--- a/dirsrvtests/tests/tickets/ticket47573_test.py
+++ b/dirsrvtests/tests/tickets/ticket47573_test.py
@@ -11,17 +11,15 @@ Created on Nov 7, 2013
@author: tbordaz
'''
-import os
-import sys
+import logging
+import re
import time
+
import ldap
-import logging
import pytest
-import re
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_m1c1
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
@@ -30,19 +28,10 @@ TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
ENTRY_DN = "cn=test_entry, %s" % SUFFIX
MUST_OLD = "(postalAddress $ preferredLocale $ telexNumber)"
-MAY_OLD = "(postalCode $ street)"
+MAY_OLD = "(postalCode $ street)"
MUST_NEW = "(postalAddress $ preferredLocale)"
-MAY_NEW = "(telexNumber $ postalCode $ street)"
-
-
-class TopologyMasterConsumer(object):
- def __init__(self, master, consumer):
- master.open()
- self.master = master
-
- consumer.open()
- self.consumer = consumer
+MAY_NEW = "(telexNumber $ postalCode $ street)"
def pattern_errorlog(file, log_pattern):
@@ -70,9 +59,9 @@ def pattern_errorlog(file, log_pattern):
def _oc_definition(oid_ext, name, must=None, may=None):
- oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
+ oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
desc = 'To test ticket 47573'
- sup = 'person'
+ sup = 'person'
if not must:
must = MUST_OLD
if not may:
@@ -94,7 +83,7 @@ def mod_OC(instance, oid_ext, name, old_must=None, old_may=None, new_must=None,
instance.schema.add_schema('objectClasses', new_oc)
-def trigger_schema_push(topology):
+def trigger_schema_push(topology_m1c1):
"""
It triggers an update on the supplier. This will start a replication
session and a schema push
@@ -104,13 +93,14 @@ def trigger_schema_push(topology):
except AttributeError:
trigger_schema_push.value = 1
replace = [(ldap.MOD_REPLACE, 'telephonenumber', str(trigger_schema_push.value))]
- topology.master.modify_s(ENTRY_DN, replace)
+ topology_m1c1.ms["master1"].modify_s(ENTRY_DN, replace)
# wait 10 seconds that the update is replicated
loop = 0
while loop <= 10:
try:
- ent = topology.consumer.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber'])
+ ent = topology_m1c1.cs["consumer1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)",
+ ['telephonenumber'])
val = ent.telephonenumber or "0"
if int(val) == trigger_schema_push.value:
return
@@ -123,104 +113,24 @@ def trigger_schema_push(topology):
loop += 1
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to create a replicated topology for the 'module'.
- The replicated topology is MASTER -> Consumer.
- '''
- master = DirSrv(verbose=False)
- consumer = DirSrv(verbose=False)
-
- # Args for the master instance
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_master = args_instance.copy()
- master.allocate(args_master)
-
- # Args for the consumer instance
- args_instance[SER_HOST] = HOST_CONSUMER_1
- args_instance[SER_PORT] = PORT_CONSUMER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
- args_consumer = args_instance.copy()
- consumer.allocate(args_consumer)
-
- # Get the status of the instance
- instance_master = master.exists()
- instance_consumer = consumer.exists()
-
- # Remove all the instances
- if instance_master:
- master.delete()
- if instance_consumer:
- consumer.delete()
-
- # Create the instances
- master.create()
- master.open()
- consumer.create()
- consumer.open()
-
- #
- # Now prepare the Master-Consumer topology
- #
- # First Enable replication
- master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
- consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER)
-
- # Initialize the supplier->consumer
-
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties)
-
- if not repl_agreement:
- log.fatal("Fail to create a replica agreement")
- sys.exit(1)
-
- log.debug("%s created" % repl_agreement)
- master.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
- master.waitForReplInit(repl_agreement)
-
- # Check replication is working fine
- if master.testReplication(DEFAULT_SUFFIX, consumer):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- def fin():
- master.delete()
- consumer.delete()
- request.addfinalizer(fin)
-
- # Here we have two instances master and consumer
- # with replication working.
- return TopologyMasterConsumer(master, consumer)
-
-
-def test_ticket47573_init(topology):
+def test_ticket47573_init(topology_m1c1):
"""
Initialize the test environment
"""
- log.debug("test_ticket47573_init topology %r (master %r, consumer %r" %
- (topology, topology.master, topology.consumer))
+ log.debug("test_ticket47573_init topology_m1c1 %r (master %r, consumer %r" %
+ (topology_m1c1, topology_m1c1.ms["master1"], topology_m1c1.cs["consumer1"]))
# the test case will check if a warning message is logged in the
# error log of the supplier
- topology.master.errorlog_file = open(topology.master.errlog, "r")
+ topology_m1c1.ms["master1"].errorlog_file = open(topology_m1c1.ms["master1"].errlog, "r")
# This entry will be used to trigger attempt of schema push
- topology.master.add_s(Entry((ENTRY_DN, {
- 'objectclass': "top person".split(),
- 'sn': 'test_entry',
- 'cn': 'test_entry'})))
+ topology_m1c1.ms["master1"].add_s(Entry((ENTRY_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': 'test_entry',
+ 'cn': 'test_entry'})))
-def test_ticket47573_one(topology):
+def test_ticket47573_one(topology_m1c1):
"""
Summary: Add a custom OC with MUST and MAY
MUST = postalAddress $ preferredLocale
@@ -231,17 +141,18 @@ def test_ticket47573_one(topology):
- consumer +OCwithMayAttr
"""
- log.debug("test_ticket47573_one topology %r (master %r, consumer %r" % (topology, topology.master, topology.consumer))
+ log.debug("test_ticket47573_one topology_m1c1 %r (master %r, consumer %r" % (
+ topology_m1c1, topology_m1c1.ms["master1"], topology_m1c1.cs["consumer1"]))
# update the schema of the supplier so that it is a superset of
# consumer. Schema should be pushed
new_oc = _oc_definition(2, 'OCwithMayAttr',
- must = MUST_OLD,
- may = MAY_OLD)
- topology.master.schema.add_schema('objectClasses', new_oc)
+ must=MUST_OLD,
+ may=MAY_OLD)
+ topology_m1c1.ms["master1"].schema.add_schema('objectClasses', new_oc)
- trigger_schema_push(topology)
- master_schema_csn = topology.master.schema.get_schema_csn()
- consumer_schema_csn = topology.consumer.schema.get_schema_csn()
+ trigger_schema_push(topology_m1c1)
+ master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn()
+ consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn()
# Check the schemaCSN was updated on the consumer
log.debug("test_ticket47573_one master_schema_csn=%s", master_schema_csn)
@@ -250,11 +161,11 @@ def test_ticket47573_one(topology):
# Check the error log of the supplier does not contain an error
regex = re.compile("must not be overwritten \(set replication log for additional info\)")
- res = pattern_errorlog(topology.master.errorlog_file, regex)
+ res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex)
assert res is None
-def test_ticket47573_two(topology):
+def test_ticket47573_two(topology_m1c1):
"""
Summary: Change OCwithMayAttr to move a MAY attribute to a MUST attribute
@@ -266,12 +177,13 @@ def test_ticket47573_two(topology):
"""
# Update the objectclass so that a MAY attribute is moved to MUST attribute
- mod_OC(topology.master, 2, 'OCwithMayAttr', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW)
+ mod_OC(topology_m1c1.ms["master1"], 2, 'OCwithMayAttr', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD,
+ new_may=MAY_NEW)
# now push the scheam
- trigger_schema_push(topology)
- master_schema_csn = topology.master.schema.get_schema_csn()
- consumer_schema_csn = topology.consumer.schema.get_schema_csn()
+ trigger_schema_push(topology_m1c1)
+ master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn()
+ consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn()
# Check the schemaCSN was NOT updated on the consumer
log.debug("test_ticket47573_two master_schema_csn=%s", master_schema_csn)
@@ -280,29 +192,29 @@ def test_ticket47573_two(topology):
# Check the error log of the supplier does not contain an error
regex = re.compile("must not be overwritten \(set replication log for additional info\)")
- res = pattern_errorlog(topology.master.errorlog_file, regex)
+ res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex)
assert res is None
-def test_ticket47573_three(topology):
+def test_ticket47573_three(topology_m1c1):
'''
Create a entry with OCwithMayAttr OC
'''
# Check replication is working fine
dn = "cn=ticket47573, %s" % SUFFIX
- topology.master.add_s(Entry((dn,
- {'objectclass': "top person OCwithMayAttr".split(),
- 'sn': 'test_repl',
- 'cn': 'test_repl',
- 'postalAddress': 'here',
- 'preferredLocale': 'en',
- 'telexNumber': '12$us$21',
- 'postalCode': '54321'})))
+ topology_m1c1.ms["master1"].add_s(Entry((dn,
+ {'objectclass': "top person OCwithMayAttr".split(),
+ 'sn': 'test_repl',
+ 'cn': 'test_repl',
+ 'postalAddress': 'here',
+ 'preferredLocale': 'en',
+ 'telexNumber': '12$us$21',
+ 'postalCode': '54321'})))
loop = 0
ent = None
while loop <= 10:
try:
- ent = topology.consumer.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
+ ent = topology_m1c1.cs["consumer1"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
break
except ldap.NO_SUCH_OBJECT:
time.sleep(1)
@@ -318,4 +230,3 @@ if __name__ == '__main__':
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
diff --git a/dirsrvtests/tests/tickets/ticket47619_test.py b/dirsrvtests/tests/tickets/ticket47619_test.py
index 988ea04..1bd613f 100644
--- a/dirsrvtests/tests/tickets/ticket47619_test.py
+++ b/dirsrvtests/tests/tickets/ticket47619_test.py
@@ -11,16 +11,15 @@ Created on Nov 7, 2013
@author: tbordaz
'''
-import os
-import sys
+import logging
import time
+
import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
from lib389.properties import *
+from lib389.topologies import topology_m1c1
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
@@ -34,148 +33,58 @@ MAX_OTHERS = 100
ATTRIBUTES = ['street', 'countryName', 'description', 'postalAddress', 'postalCode', 'title', 'l', 'roomNumber']
-class TopologyMasterConsumer(object):
- def __init__(self, master, consumer):
- master.open()
- self.master = master
-
- consumer.open()
- self.consumer = consumer
-
- def __repr__(self):
- return "Master[%s] -> Consumer[%s" % (self.master, self.consumer)
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to create a replicated topology for the 'module'.
- The replicated topology is MASTER -> Consumer.
- '''
- master = DirSrv(verbose=False)
- consumer = DirSrv(verbose=False)
-
- # Args for the master instance
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_master = args_instance.copy()
- master.allocate(args_master)
-
- # Args for the consumer instance
- args_instance[SER_HOST] = HOST_CONSUMER_1
- args_instance[SER_PORT] = PORT_CONSUMER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
- args_consumer = args_instance.copy()
- consumer.allocate(args_consumer)
-
- # Get the status of the instance
- instance_master = master.exists()
- instance_consumer = consumer.exists()
-
- # Remove all the instances
- if instance_master:
- master.delete()
- if instance_consumer:
- consumer.delete()
-
- # Create the instances
- master.create()
- master.open()
- consumer.create()
- consumer.open()
-
- #
- # Now prepare the Master-Consumer topology
- #
- # First Enable replication
- master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
- consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER)
-
- # Initialize the supplier->consumer
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties)
-
- if not repl_agreement:
- log.fatal("Fail to create a replica agreement")
- sys.exit(1)
-
- log.debug("%s created" % repl_agreement)
- master.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
- master.waitForReplInit(repl_agreement)
-
- # Check replication is working fine
- if master.testReplication(DEFAULT_SUFFIX, consumer):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- def fin():
- master.delete()
- consumer.delete()
- request.addfinalizer(fin)
-
- # Here we have two instances master and consumer
- # with replication working.
- return TopologyMasterConsumer(master, consumer)
-
-
-def test_ticket47619_init(topology):
+def test_ticket47619_init(topology_m1c1):
"""
Initialize the test environment
"""
- topology.master.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
- #topology.master.plugins.enable(name=PLUGIN_MEMBER_OF)
- #topology.master.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
- topology.master.stop(timeout=10)
- topology.master.start(timeout=10)
+ topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
+ # topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_MEMBER_OF)
+ # topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_REFER_INTEGRITY)
+ topology_m1c1.ms["master1"].stop(timeout=10)
+ topology_m1c1.ms["master1"].start(timeout=10)
- topology.master.log.info("test_ticket47619_init topology %r" % (topology))
+ topology_m1c1.ms["master1"].log.info("test_ticket47619_init topology_m1c1 %r" % (topology_m1c1))
# the test case will check if a warning message is logged in the
# error log of the supplier
- topology.master.errorlog_file = open(topology.master.errlog, "r")
+ topology_m1c1.ms["master1"].errorlog_file = open(topology_m1c1.ms["master1"].errlog, "r")
# add dummy entries
for cpt in range(MAX_OTHERS):
name = "%s%d" % (OTHER_NAME, cpt)
- topology.master.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
- 'objectclass': "top person".split(),
- 'sn': name,
- 'cn': name})))
+ topology_m1c1.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
+ 'objectclass': "top person".split(),
+ 'sn': name,
+ 'cn': name})))
- topology.master.log.info("test_ticket47619_init: %d entries ADDed %s[0..%d]" % (MAX_OTHERS, OTHER_NAME, MAX_OTHERS-1))
+ topology_m1c1.ms["master1"].log.info(
+ "test_ticket47619_init: %d entries ADDed %s[0..%d]" % (MAX_OTHERS, OTHER_NAME, MAX_OTHERS - 1))
# Check the number of entries in the retro changelog
time.sleep(2)
- ents = topology.master.search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)")
+ ents = topology_m1c1.ms["master1"].search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)")
assert len(ents) == MAX_OTHERS
-def test_ticket47619_create_index(topology):
+def test_ticket47619_create_index(topology_m1c1):
args = {INDEX_TYPE: 'eq'}
for attr in ATTRIBUTES:
- topology.master.index.create(suffix=RETROCL_SUFFIX, attr=attr, args=args)
- topology.master.restart(timeout=10)
+ topology_m1c1.ms["master1"].index.create(suffix=RETROCL_SUFFIX, attr=attr, args=args)
+ topology_m1c1.ms["master1"].restart(timeout=10)
-def test_ticket47619_reindex(topology):
+def test_ticket47619_reindex(topology_m1c1):
'''
Reindex all the attributes in ATTRIBUTES
'''
args = {TASK_WAIT: True}
for attr in ATTRIBUTES:
- rc = topology.master.tasks.reindex(suffix=RETROCL_SUFFIX, attrname=attr, args=args)
+ rc = topology_m1c1.ms["master1"].tasks.reindex(suffix=RETROCL_SUFFIX, attrname=attr, args=args)
assert rc == 0
-def test_ticket47619_check_indexed_search(topology):
+def test_ticket47619_check_indexed_search(topology_m1c1):
for attr in ATTRIBUTES:
- ents = topology.master.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, "(%s=hello)" % attr)
+ ents = topology_m1c1.ms["master1"].search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, "(%s=hello)" % attr)
assert len(ents) == 0
@@ -184,4 +93,3 @@ if __name__ == '__main__':
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
diff --git a/dirsrvtests/tests/tickets/ticket47640_test.py b/dirsrvtests/tests/tickets/ticket47640_test.py
index 09ed691..aa11684 100644
--- a/dirsrvtests/tests/tickets/ticket47640_test.py
+++ b/dirsrvtests/tests/tickets/ticket47640_test.py
@@ -6,59 +6,16 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_ticket47640(topology):
+def test_ticket47640(topology_st):
'''
Linked Attrs Plugins - verify that if the plugin fails to update the link entry
that the entire operation is aborted
@@ -66,25 +23,25 @@ def test_ticket47640(topology):
# Enable Dynamic plugins, and the linked Attrs plugin
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
except ldap.LDAPError as e:
ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc'])
assert False
try:
- topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
+ topology_st.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
except ValueError as e:
ldap.fatal('Failed to enable linked attributes plugin!' + e.message['desc'])
assert False
# Add the plugin config entry
try:
- topology.standalone.add_s(Entry(('cn=manager link,cn=Linked Attributes,cn=plugins,cn=config', {
- 'objectclass': 'top extensibleObject'.split(),
- 'cn': 'Manager Link',
- 'linkType': 'seeAlso',
- 'managedType': 'seeAlso'
- })))
+ topology_st.standalone.add_s(Entry(('cn=manager link,cn=Linked Attributes,cn=plugins,cn=config', {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'cn': 'Manager Link',
+ 'linkType': 'seeAlso',
+ 'managedType': 'seeAlso'
+ })))
except ldap.LDAPError as e:
log.fatal('Failed to add linked attr config entry: error ' + e.message['desc'])
assert False
@@ -92,11 +49,11 @@ def test_ticket47640(topology):
# Add an entry who has a link to an entry that does not exist
OP_REJECTED = False
try:
- topology.standalone.add_s(Entry(('uid=manager,' + DEFAULT_SUFFIX, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'manager',
- 'seeAlso': 'uid=user,dc=example,dc=com'
- })))
+ topology_st.standalone.add_s(Entry(('uid=manager,' + DEFAULT_SUFFIX, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'manager',
+ 'seeAlso': 'uid=user,dc=example,dc=com'
+ })))
except ldap.UNWILLING_TO_PERFORM:
# Success
log.info('Add operation correctly rejected.')
@@ -117,5 +74,3 @@ if __name__ == '__main__':
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
-
diff --git a/dirsrvtests/tests/tickets/ticket47653MMR_test.py b/dirsrvtests/tests/tickets/ticket47653MMR_test.py
index 5cd7118..7fe07f5 100644
--- a/dirsrvtests/tests/tickets/ticket47653MMR_test.py
+++ b/dirsrvtests/tests/tickets/ticket47653MMR_test.py
@@ -11,47 +11,39 @@ Created on Nov 7, 2013
@author: tbordaz
'''
-import os
-import sys
+import logging
import time
+
import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_m2
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-#
-# important part. We can deploy Master1 and Master2 on different versions
-#
-installation1_prefix = None
-installation2_prefix = None
-
TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
OC_NAME = 'OCticket47653'
MUST = "(postalAddress $ postalCode)"
-MAY = "(member $ street)"
+MAY = "(member $ street)"
OTHER_NAME = 'other_entry'
MAX_OTHERS = 10
-BIND_NAME = 'bind_entry'
-BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
-BIND_PW = 'password'
+BIND_NAME = 'bind_entry'
+BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
+BIND_PW = 'password'
ENTRY_NAME = 'test_entry'
-ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
-ENTRY_OC = "top person %s" % OC_NAME
+ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
+ENTRY_OC = "top person %s" % OC_NAME
def _oc_definition(oid_ext, name, must=None, may=None):
- oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
+ oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
desc = 'To test ticket 47490'
- sup = 'person'
+ sup = 'person'
if not must:
must = MUST
if not may:
@@ -61,114 +53,7 @@ def _oc_definition(oid_ext, name, must=None, may=None):
return new_oc
-class TopologyMaster1Master2(object):
- def __init__(self, master1, master2):
- master1.open()
- self.master1 = master1
-
- master2.open()
- self.master2 = master2
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to create a replicated topology for the 'module'.
- The replicated topology is MASTER1 <-> Master2.
- '''
- global installation1_prefix
- global installation2_prefix
-
- # allocate master1 on a given deployement
- master1 = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Args for the master1 instance
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_master = args_instance.copy()
- master1.allocate(args_master)
-
- # allocate master1 on a given deployement
- master2 = DirSrv(verbose=False)
- if installation2_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation2_prefix
-
- # Args for the consumer instance
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_master = args_instance.copy()
- master2.allocate(args_master)
-
- # Get the status of the instance and restart it if it exists
- instance_master1 = master1.exists()
- instance_master2 = master2.exists()
-
- # Remove all the instances
- if instance_master1:
- master1.delete()
- if instance_master2:
- master2.delete()
-
- # Create the instances
- master1.create()
- master1.open()
- master2.create()
- master2.open()
-
- #
- # Now prepare the Master-Consumer topology
- #
- # First Enable replication
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
- master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
- # Initialize the supplier->consumer
-
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
-
- if not repl_agreement:
- log.fatal("Fail to create a replica agreement")
- sys.exit(1)
-
- log.debug("%s created" % repl_agreement)
-
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
-
- master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- master1.waitForReplInit(repl_agreement)
-
- # Check replication is working fine
- if master1.testReplication(DEFAULT_SUFFIX, master2):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- def fin():
- master1.delete()
- master2.delete()
- request.addfinalizer(fin)
-
- # Here we have two instances master and consumer
- # with replication working.
- return TopologyMaster1Master2(master1, master2)
-
-
-def test_ticket47653_init(topology):
+def test_ticket47653_init(topology_m2):
"""
It adds
- Objectclass with MAY 'member'
@@ -177,38 +62,38 @@ def test_ticket47653_init(topology):
"""
- topology.master1.log.info("Add %s that allows 'member' attribute" % OC_NAME)
+ topology_m2.ms["master1"].log.info("Add %s that allows 'member' attribute" % OC_NAME)
new_oc = _oc_definition(2, OC_NAME, must=MUST, may=MAY)
- topology.master1.schema.add_schema('objectClasses', new_oc)
+ topology_m2.ms["master1"].schema.add_schema('objectClasses', new_oc)
# entry used to bind with
- topology.master1.log.info("Add %s" % BIND_DN)
- topology.master1.add_s(Entry((BIND_DN, {
- 'objectclass': "top person".split(),
- 'sn': BIND_NAME,
- 'cn': BIND_NAME,
- 'userpassword': BIND_PW})))
+ topology_m2.ms["master1"].log.info("Add %s" % BIND_DN)
+ topology_m2.ms["master1"].add_s(Entry((BIND_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': BIND_NAME,
+ 'cn': BIND_NAME,
+ 'userpassword': BIND_PW})))
# enable acl error logging
mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(128 + 8192))] # ACL + REPL
- topology.master1.modify_s(DN_CONFIG, mod)
- topology.master2.modify_s(DN_CONFIG, mod)
+ topology_m2.ms["master1"].modify_s(DN_CONFIG, mod)
+ topology_m2.ms["master2"].modify_s(DN_CONFIG, mod)
# remove all aci's and start with a clean slate
mod = [(ldap.MOD_DELETE, 'aci', None)]
- topology.master1.modify_s(SUFFIX, mod)
- topology.master2.modify_s(SUFFIX, mod)
+ topology_m2.ms["master1"].modify_s(SUFFIX, mod)
+ topology_m2.ms["master2"].modify_s(SUFFIX, mod)
# add dummy entries
for cpt in range(MAX_OTHERS):
name = "%s%d" % (OTHER_NAME, cpt)
- topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
- 'objectclass': "top person".split(),
- 'sn': name,
- 'cn': name})))
+ topology_m2.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
+ 'objectclass': "top person".split(),
+ 'sn': name,
+ 'cn': name})))
-def test_ticket47653_add(topology):
+def test_ticket47653_add(topology_m2):
'''
This test ADD an entry on MASTER1 where 47653 is fixed. Then it checks that entry is replicated
on MASTER2 (even if on MASTER2 47653 is NOT fixed). Then update on MASTER2 and check the update on MASTER1
@@ -218,11 +103,11 @@ def test_ticket47653_add(topology):
- with the proper ACI we can not ADD with 'member' attribute
- with the proper ACI and 'member' it succeeds to ADD
'''
- topology.master1.log.info("\n\n######################### ADD ######################\n")
+ topology_m2.ms["master1"].log.info("\n\n######################### ADD ######################\n")
# bind as bind_entry
- topology.master1.log.info("Bind as %s" % BIND_DN)
- topology.master1.simple_bind_s(BIND_DN, BIND_PW)
+ topology_m2.ms["master1"].log.info("Bind as %s" % BIND_DN)
+ topology_m2.ms["master1"].simple_bind_s(BIND_DN, BIND_PW)
# Prepare the entry with multivalued members
entry_with_members = Entry(ENTRY_DN)
@@ -251,68 +136,68 @@ def test_ticket47653_add(topology):
# entry to add WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS
try:
- topology.master1.log.info("Try to add Add %s (aci is missing): %r" % (ENTRY_DN, entry_with_member))
+ topology_m2.ms["master1"].log.info("Try to add Add %s (aci is missing): %r" % (ENTRY_DN, entry_with_member))
- topology.master1.add_s(entry_with_member)
+ topology_m2.ms["master1"].add_s(entry_with_member)
except Exception as e:
- topology.master1.log.info("Exception (expected): %s" % type(e).__name__)
+ topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__)
assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
# Ok Now add the proper ACI
- topology.master1.log.info("Bind as %s and add the ADD SELFDN aci" % DN_DM)
- topology.master1.simple_bind_s(DN_DM, PASSWORD)
+ topology_m2.ms["master1"].log.info("Bind as %s and add the ADD SELFDN aci" % DN_DM)
+ topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD)
- ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
+ ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME
- ACI_ALLOW = "(version 3.0; acl \"SelfDN add\"; allow (add)"
- ACI_SUBJECT = " userattr = \"member#selfDN\";)"
- ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
+ ACI_ALLOW = "(version 3.0; acl \"SelfDN add\"; allow (add)"
+ ACI_SUBJECT = " userattr = \"member#selfDN\";)"
+ ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
- topology.master1.modify_s(SUFFIX, mod)
+ topology_m2.ms["master1"].modify_s(SUFFIX, mod)
time.sleep(1)
# bind as bind_entry
- topology.master1.log.info("Bind as %s" % BIND_DN)
- topology.master1.simple_bind_s(BIND_DN, BIND_PW)
+ topology_m2.ms["master1"].log.info("Bind as %s" % BIND_DN)
+ topology_m2.ms["master1"].simple_bind_s(BIND_DN, BIND_PW)
# entry to add WITHOUT member and WITH the ACI -> ldap.INSUFFICIENT_ACCESS
try:
- topology.master1.log.info("Try to add Add %s (member is missing)" % ENTRY_DN)
- topology.master1.add_s(Entry((ENTRY_DN, {
- 'objectclass': ENTRY_OC.split(),
- 'sn': ENTRY_NAME,
- 'cn': ENTRY_NAME,
- 'postalAddress': 'here',
- 'postalCode': '1234'})))
+ topology_m2.ms["master1"].log.info("Try to add Add %s (member is missing)" % ENTRY_DN)
+ topology_m2.ms["master1"].add_s(Entry((ENTRY_DN, {
+ 'objectclass': ENTRY_OC.split(),
+ 'sn': ENTRY_NAME,
+ 'cn': ENTRY_NAME,
+ 'postalAddress': 'here',
+ 'postalCode': '1234'})))
except Exception as e:
- topology.master1.log.info("Exception (expected): %s" % type(e).__name__)
+ topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__)
assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
# entry to add WITH memberS and WITH the ACI -> ldap.INSUFFICIENT_ACCESS
# member should contain only one value
try:
- topology.master1.log.info("Try to add Add %s (with several member values)" % ENTRY_DN)
- topology.master1.add_s(entry_with_members)
+ topology_m2.ms["master1"].log.info("Try to add Add %s (with several member values)" % ENTRY_DN)
+ topology_m2.ms["master1"].add_s(entry_with_members)
except Exception as e:
- topology.master1.log.info("Exception (expected): %s" % type(e).__name__)
+ topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__)
assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
- topology.master1.log.info("Try to add Add %s should be successful" % ENTRY_DN)
+ topology_m2.ms["master1"].log.info("Try to add Add %s should be successful" % ENTRY_DN)
try:
- topology.master1.add_s(entry_with_member)
+ topology_m2.ms["master1"].add_s(entry_with_member)
except ldap.LDAPError as e:
- topology.master1.log.info("Failed to add entry, error: " + e.message['desc'])
+ topology_m2.ms["master1"].log.info("Failed to add entry, error: " + e.message['desc'])
assert False
#
# Now check the entry as been replicated
#
- topology.master2.simple_bind_s(DN_DM, PASSWORD)
- topology.master1.log.info("Try to retrieve %s from Master2" % ENTRY_DN)
+ topology_m2.ms["master2"].simple_bind_s(DN_DM, PASSWORD)
+ topology_m2.ms["master1"].log.info("Try to retrieve %s from Master2" % ENTRY_DN)
loop = 0
while loop <= 10:
try:
- ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ ent = topology_m2.ms["master2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
break
except ldap.NO_SUCH_OBJECT:
time.sleep(1)
@@ -320,15 +205,15 @@ def test_ticket47653_add(topology):
assert loop <= 10
# Now update the entry on Master2 (as DM because 47653 is possibly not fixed on M2)
- topology.master1.log.info("Update %s on M2" % ENTRY_DN)
+ topology_m2.ms["master1"].log.info("Update %s on M2" % ENTRY_DN)
mod = [(ldap.MOD_REPLACE, 'description', 'test_add')]
- topology.master2.modify_s(ENTRY_DN, mod)
+ topology_m2.ms["master2"].modify_s(ENTRY_DN, mod)
- topology.master1.simple_bind_s(DN_DM, PASSWORD)
+ topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD)
loop = 0
while loop <= 10:
try:
- ent = topology.master1.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ ent = topology_m2.ms["master1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
if ent.hasAttr('description') and (ent.getValue('description') == 'test_add'):
break
except ldap.NO_SUCH_OBJECT:
@@ -338,7 +223,7 @@ def test_ticket47653_add(topology):
assert ent.getValue('description') == 'test_add'
-def test_ticket47653_modify(topology):
+def test_ticket47653_modify(topology_m2):
'''
This test MOD an entry on MASTER1 where 47653 is fixed. Then it checks that update is replicated
on MASTER2 (even if on MASTER2 47653 is NOT fixed). Then update on MASTER2 (bound as BIND_DN).
@@ -349,59 +234,59 @@ def test_ticket47653_modify(topology):
- adding the ACI, we can modify the entry
'''
# bind as bind_entry
- topology.master1.log.info("Bind as %s" % BIND_DN)
- topology.master1.simple_bind_s(BIND_DN, BIND_PW)
+ topology_m2.ms["master1"].log.info("Bind as %s" % BIND_DN)
+ topology_m2.ms["master1"].simple_bind_s(BIND_DN, BIND_PW)
- topology.master1.log.info("\n\n######################### MODIFY ######################\n")
+ topology_m2.ms["master1"].log.info("\n\n######################### MODIFY ######################\n")
# entry to modify WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS
try:
- topology.master1.log.info("Try to modify %s (aci is missing)" % ENTRY_DN)
+ topology_m2.ms["master1"].log.info("Try to modify %s (aci is missing)" % ENTRY_DN)
mod = [(ldap.MOD_REPLACE, 'postalCode', '9876')]
- topology.master1.modify_s(ENTRY_DN, mod)
+ topology_m2.ms["master1"].modify_s(ENTRY_DN, mod)
except Exception as e:
- topology.master1.log.info("Exception (expected): %s" % type(e).__name__)
+ topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__)
assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
# Ok Now add the proper ACI
- topology.master1.log.info("Bind as %s and add the WRITE SELFDN aci" % DN_DM)
- topology.master1.simple_bind_s(DN_DM, PASSWORD)
+ topology_m2.ms["master1"].log.info("Bind as %s and add the WRITE SELFDN aci" % DN_DM)
+ topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD)
- ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
- ACI_TARGETATTR = "(targetattr = *)"
+ ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
+ ACI_TARGETATTR = "(targetattr = *)"
ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME
- ACI_ALLOW = "(version 3.0; acl \"SelfDN write\"; allow (write)"
- ACI_SUBJECT = " userattr = \"member#selfDN\";)"
- ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
+ ACI_ALLOW = "(version 3.0; acl \"SelfDN write\"; allow (write)"
+ ACI_SUBJECT = " userattr = \"member#selfDN\";)"
+ ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
- topology.master1.modify_s(SUFFIX, mod)
+ topology_m2.ms["master1"].modify_s(SUFFIX, mod)
time.sleep(1)
# bind as bind_entry
- topology.master1.log.info("M1: Bind as %s" % BIND_DN)
- topology.master1.simple_bind_s(BIND_DN, BIND_PW)
+ topology_m2.ms["master1"].log.info("M1: Bind as %s" % BIND_DN)
+ topology_m2.ms["master1"].simple_bind_s(BIND_DN, BIND_PW)
# modify the entry and checks the value
- topology.master1.log.info("M1: Try to modify %s. It should succeeds" % ENTRY_DN)
+ topology_m2.ms["master1"].log.info("M1: Try to modify %s. It should succeeds" % ENTRY_DN)
mod = [(ldap.MOD_REPLACE, 'postalCode', '1928')]
- topology.master1.modify_s(ENTRY_DN, mod)
+ topology_m2.ms["master1"].modify_s(ENTRY_DN, mod)
- topology.master1.log.info("M1: Bind as %s" % DN_DM)
- topology.master1.simple_bind_s(DN_DM, PASSWORD)
+ topology_m2.ms["master1"].log.info("M1: Bind as %s" % DN_DM)
+ topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD)
- topology.master1.log.info("M1: Check the update of %s" % ENTRY_DN)
- ents = topology.master1.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*')
+ topology_m2.ms["master1"].log.info("M1: Check the update of %s" % ENTRY_DN)
+ ents = topology_m2.ms["master1"].search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*')
assert len(ents) == 1
assert ents[0].postalCode == '1928'
# Now check the update has been replicated on M2
- topology.master1.log.info("M2: Bind as %s" % DN_DM)
- topology.master2.simple_bind_s(DN_DM, PASSWORD)
- topology.master1.log.info("M2: Try to retrieve %s" % ENTRY_DN)
+ topology_m2.ms["master1"].log.info("M2: Bind as %s" % DN_DM)
+ topology_m2.ms["master2"].simple_bind_s(DN_DM, PASSWORD)
+ topology_m2.ms["master1"].log.info("M2: Try to retrieve %s" % ENTRY_DN)
loop = 0
while loop <= 10:
try:
- ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ ent = topology_m2.ms["master2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
if ent.hasAttr('postalCode') and (ent.getValue('postalCode') == '1928'):
break
except ldap.NO_SUCH_OBJECT:
@@ -411,29 +296,30 @@ def test_ticket47653_modify(topology):
assert ent.getValue('postalCode') == '1928'
# Now update the entry on Master2 bound as BIND_DN (update may fail if 47653 is not fixed on M2)
- topology.master1.log.info("M2: Update %s (bound as %s)" % (ENTRY_DN, BIND_DN))
- topology.master2.simple_bind_s(BIND_DN, PASSWORD)
+ topology_m2.ms["master1"].log.info("M2: Update %s (bound as %s)" % (ENTRY_DN, BIND_DN))
+ topology_m2.ms["master2"].simple_bind_s(BIND_DN, PASSWORD)
fail = False
try:
mod = [(ldap.MOD_REPLACE, 'postalCode', '1929')]
- topology.master2.modify_s(ENTRY_DN, mod)
+ topology_m2.ms["master2"].modify_s(ENTRY_DN, mod)
fail = False
except ldap.INSUFFICIENT_ACCESS:
- topology.master1.log.info("M2: Exception (INSUFFICIENT_ACCESS): that is fine the bug is possibly not fixed on M2")
+ topology_m2.ms["master1"].log.info(
+ "M2: Exception (INSUFFICIENT_ACCESS): that is fine the bug is possibly not fixed on M2")
fail = True
except Exception as e:
- topology.master1.log.info("M2: Exception (not expected): %s" % type(e).__name__)
+ topology_m2.ms["master1"].log.info("M2: Exception (not expected): %s" % type(e).__name__)
assert 0
if not fail:
# Check the update has been replicaed on M1
- topology.master1.log.info("M1: Bind as %s" % DN_DM)
- topology.master1.simple_bind_s(DN_DM, PASSWORD)
- topology.master1.log.info("M1: Check %s.postalCode=1929)" % (ENTRY_DN))
+ topology_m2.ms["master1"].log.info("M1: Bind as %s" % DN_DM)
+ topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD)
+ topology_m2.ms["master1"].log.info("M1: Check %s.postalCode=1929)" % (ENTRY_DN))
loop = 0
while loop <= 10:
try:
- ent = topology.master1.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ ent = topology_m2.ms["master1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
if ent.hasAttr('postalCode') and (ent.getValue('postalCode') == '1929'):
break
except ldap.NO_SUCH_OBJECT:
diff --git a/dirsrvtests/tests/tickets/ticket47653_test.py b/dirsrvtests/tests/tickets/ticket47653_test.py
index 0eda94b..7ff1b87 100644
--- a/dirsrvtests/tests/tickets/ticket47653_test.py
+++ b/dirsrvtests/tests/tickets/ticket47653_test.py
@@ -6,39 +6,36 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
import logging
+
+import ldap
import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
OC_NAME = 'OCticket47653'
MUST = "(postalAddress $ postalCode)"
-MAY = "(member $ street)"
+MAY = "(member $ street)"
OTHER_NAME = 'other_entry'
MAX_OTHERS = 10
-BIND_NAME = 'bind_entry'
-BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
-BIND_PW = 'password'
+BIND_NAME = 'bind_entry'
+BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
+BIND_PW = 'password'
ENTRY_NAME = 'test_entry'
-ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
-ENTRY_OC = "top person %s" % OC_NAME
+ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
+ENTRY_OC = "top person %s" % OC_NAME
def _oc_definition(oid_ext, name, must=None, may=None):
- oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
+ oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
desc = 'To test ticket 47490'
- sup = 'person'
+ sup = 'person'
if not must:
must = MUST
if not may:
@@ -48,45 +45,7 @@ def _oc_definition(oid_ext, name, must=None, may=None):
return new_oc
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def test_ticket47653_init(topology):
+def test_ticket47653_init(topology_st):
"""
It adds
- Objectclass with MAY 'member'
@@ -95,47 +54,47 @@ def test_ticket47653_init(topology):
"""
- topology.standalone.log.info("Add %s that allows 'member' attribute" % OC_NAME)
+ topology_st.standalone.log.info("Add %s that allows 'member' attribute" % OC_NAME)
new_oc = _oc_definition(2, OC_NAME, must=MUST, may=MAY)
- topology.standalone.schema.add_schema('objectClasses', new_oc)
+ topology_st.standalone.schema.add_schema('objectClasses', new_oc)
# entry used to bind with
- topology.standalone.log.info("Add %s" % BIND_DN)
- topology.standalone.add_s(Entry((BIND_DN, {
- 'objectclass': "top person".split(),
- 'sn': BIND_NAME,
- 'cn': BIND_NAME,
- 'userpassword': BIND_PW})))
+ topology_st.standalone.log.info("Add %s" % BIND_DN)
+ topology_st.standalone.add_s(Entry((BIND_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': BIND_NAME,
+ 'cn': BIND_NAME,
+ 'userpassword': BIND_PW})))
# enable acl error logging
mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '128')]
- topology.standalone.modify_s(DN_CONFIG, mod)
+ topology_st.standalone.modify_s(DN_CONFIG, mod)
# Remove aci's to start with a clean slate
mod = [(ldap.MOD_DELETE, 'aci', None)]
- topology.standalone.modify_s(SUFFIX, mod)
+ topology_st.standalone.modify_s(SUFFIX, mod)
# add dummy entries
for cpt in range(MAX_OTHERS):
name = "%s%d" % (OTHER_NAME, cpt)
- topology.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
- 'objectclass': "top person".split(),
- 'sn': name,
- 'cn': name})))
+ topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
+ 'objectclass': "top person".split(),
+ 'sn': name,
+ 'cn': name})))
-def test_ticket47653_add(topology):
+def test_ticket47653_add(topology_st):
'''
It checks that, bound as bind_entry,
- we can not ADD an entry without the proper SELFDN aci.
- with the proper ACI we can not ADD with 'member' attribute
- with the proper ACI and 'member' it succeeds to ADD
'''
- topology.standalone.log.info("\n\n######################### ADD ######################\n")
+ topology_st.standalone.log.info("\n\n######################### ADD ######################\n")
# bind as bind_entry
- topology.standalone.log.info("Bind as %s" % BIND_DN)
- topology.standalone.simple_bind_s(BIND_DN, BIND_PW)
+ topology_st.standalone.log.info("Bind as %s" % BIND_DN)
+ topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW)
# Prepare the entry with multivalued members
entry_with_members = Entry(ENTRY_DN)
@@ -164,182 +123,181 @@ def test_ticket47653_add(topology):
# entry to add WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS
try:
- topology.standalone.log.info("Try to add Add %s (aci is missing): %r" % (ENTRY_DN, entry_with_member))
+ topology_st.standalone.log.info("Try to add Add %s (aci is missing): %r" % (ENTRY_DN, entry_with_member))
- topology.standalone.add_s(entry_with_member)
+ topology_st.standalone.add_s(entry_with_member)
except Exception as e:
- topology.standalone.log.info("Exception (expected): %s" % type(e).__name__)
+ topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__)
assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
# Ok Now add the proper ACI
- topology.standalone.log.info("Bind as %s and add the ADD SELFDN aci" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.log.info("Bind as %s and add the ADD SELFDN aci" % DN_DM)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
- ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
+ ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME
- ACI_ALLOW = "(version 3.0; acl \"SelfDN add\"; allow (add)"
- ACI_SUBJECT = " userattr = \"member#selfDN\";)"
- ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
+ ACI_ALLOW = "(version 3.0; acl \"SelfDN add\"; allow (add)"
+ ACI_SUBJECT = " userattr = \"member#selfDN\";)"
+ ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
- topology.standalone.modify_s(SUFFIX, mod)
+ topology_st.standalone.modify_s(SUFFIX, mod)
# bind as bind_entry
- topology.standalone.log.info("Bind as %s" % BIND_DN)
- topology.standalone.simple_bind_s(BIND_DN, BIND_PW)
+ topology_st.standalone.log.info("Bind as %s" % BIND_DN)
+ topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW)
# entry to add WITHOUT member and WITH the ACI -> ldap.INSUFFICIENT_ACCESS
try:
- topology.standalone.log.info("Try to add Add %s (member is missing)" % ENTRY_DN)
- topology.standalone.add_s(Entry((ENTRY_DN, {
- 'objectclass': ENTRY_OC.split(),
- 'sn': ENTRY_NAME,
- 'cn': ENTRY_NAME,
- 'postalAddress': 'here',
- 'postalCode': '1234'})))
+ topology_st.standalone.log.info("Try to add Add %s (member is missing)" % ENTRY_DN)
+ topology_st.standalone.add_s(Entry((ENTRY_DN, {
+ 'objectclass': ENTRY_OC.split(),
+ 'sn': ENTRY_NAME,
+ 'cn': ENTRY_NAME,
+ 'postalAddress': 'here',
+ 'postalCode': '1234'})))
except Exception as e:
- topology.standalone.log.info("Exception (expected): %s" % type(e).__name__)
+ topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__)
assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
# entry to add WITH memberS and WITH the ACI -> ldap.INSUFFICIENT_ACCESS
# member should contain only one value
try:
- topology.standalone.log.info("Try to add Add %s (with several member values)" % ENTRY_DN)
- topology.standalone.add_s(entry_with_members)
+ topology_st.standalone.log.info("Try to add Add %s (with several member values)" % ENTRY_DN)
+ topology_st.standalone.add_s(entry_with_members)
except Exception as e:
- topology.standalone.log.info("Exception (expected): %s" % type(e).__name__)
+ topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__)
assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
- topology.standalone.log.info("Try to add Add %s should be successful" % ENTRY_DN)
- topology.standalone.add_s(entry_with_member)
+ topology_st.standalone.log.info("Try to add Add %s should be successful" % ENTRY_DN)
+ topology_st.standalone.add_s(entry_with_member)
-def test_ticket47653_search(topology):
+def test_ticket47653_search(topology_st):
'''
It checks that, bound as bind_entry,
- we can not search an entry without the proper SELFDN aci.
- adding the ACI, we can search the entry
'''
- topology.standalone.log.info("\n\n######################### SEARCH ######################\n")
+ topology_st.standalone.log.info("\n\n######################### SEARCH ######################\n")
# bind as bind_entry
- topology.standalone.log.info("Bind as %s" % BIND_DN)
- topology.standalone.simple_bind_s(BIND_DN, BIND_PW)
+ topology_st.standalone.log.info("Bind as %s" % BIND_DN)
+ topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW)
# entry to search WITH member being BIND_DN but WITHOUT the ACI -> no entry returned
- topology.standalone.log.info("Try to search %s (aci is missing)" % ENTRY_DN)
- ents = topology.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*')
+ topology_st.standalone.log.info("Try to search %s (aci is missing)" % ENTRY_DN)
+ ents = topology_st.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*')
assert len(ents) == 0
# Ok Now add the proper ACI
- topology.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
- ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
- ACI_TARGETATTR = "(targetattr = *)"
+ ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
+ ACI_TARGETATTR = "(targetattr = *)"
ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME
- ACI_ALLOW = "(version 3.0; acl \"SelfDN search-read\"; allow (read, search, compare)"
- ACI_SUBJECT = " userattr = \"member#selfDN\";)"
- ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
+ ACI_ALLOW = "(version 3.0; acl \"SelfDN search-read\"; allow (read, search, compare)"
+ ACI_SUBJECT = " userattr = \"member#selfDN\";)"
+ ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
- topology.standalone.modify_s(SUFFIX, mod)
+ topology_st.standalone.modify_s(SUFFIX, mod)
# bind as bind_entry
- topology.standalone.log.info("Bind as %s" % BIND_DN)
- topology.standalone.simple_bind_s(BIND_DN, BIND_PW)
+ topology_st.standalone.log.info("Bind as %s" % BIND_DN)
+ topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW)
# entry to search with the proper aci
- topology.standalone.log.info("Try to search %s should be successful" % ENTRY_DN)
- ents = topology.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*')
+ topology_st.standalone.log.info("Try to search %s should be successful" % ENTRY_DN)
+ ents = topology_st.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*')
assert len(ents) == 1
-def test_ticket47653_modify(topology):
+def test_ticket47653_modify(topology_st):
'''
It checks that, bound as bind_entry,
- we can not modify an entry without the proper SELFDN aci.
- adding the ACI, we can modify the entry
'''
# bind as bind_entry
- topology.standalone.log.info("Bind as %s" % BIND_DN)
- topology.standalone.simple_bind_s(BIND_DN, BIND_PW)
+ topology_st.standalone.log.info("Bind as %s" % BIND_DN)
+ topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW)
- topology.standalone.log.info("\n\n######################### MODIFY ######################\n")
+ topology_st.standalone.log.info("\n\n######################### MODIFY ######################\n")
# entry to modify WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS
try:
- topology.standalone.log.info("Try to modify %s (aci is missing)" % ENTRY_DN)
+ topology_st.standalone.log.info("Try to modify %s (aci is missing)" % ENTRY_DN)
mod = [(ldap.MOD_REPLACE, 'postalCode', '9876')]
- topology.standalone.modify_s(ENTRY_DN, mod)
+ topology_st.standalone.modify_s(ENTRY_DN, mod)
except Exception as e:
- topology.standalone.log.info("Exception (expected): %s" % type(e).__name__)
+ topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__)
assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
-
# Ok Now add the proper ACI
- topology.standalone.log.info("Bind as %s and add the WRITE SELFDN aci" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.log.info("Bind as %s and add the WRITE SELFDN aci" % DN_DM)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
- ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
- ACI_TARGETATTR = "(targetattr = *)"
+ ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
+ ACI_TARGETATTR = "(targetattr = *)"
ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME
- ACI_ALLOW = "(version 3.0; acl \"SelfDN write\"; allow (write)"
- ACI_SUBJECT = " userattr = \"member#selfDN\";)"
- ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
+ ACI_ALLOW = "(version 3.0; acl \"SelfDN write\"; allow (write)"
+ ACI_SUBJECT = " userattr = \"member#selfDN\";)"
+ ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
- topology.standalone.modify_s(SUFFIX, mod)
+ topology_st.standalone.modify_s(SUFFIX, mod)
# bind as bind_entry
- topology.standalone.log.info("Bind as %s" % BIND_DN)
- topology.standalone.simple_bind_s(BIND_DN, BIND_PW)
+ topology_st.standalone.log.info("Bind as %s" % BIND_DN)
+ topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW)
# modify the entry and checks the value
- topology.standalone.log.info("Try to modify %s. It should succeeds" % ENTRY_DN)
+ topology_st.standalone.log.info("Try to modify %s. It should succeeds" % ENTRY_DN)
mod = [(ldap.MOD_REPLACE, 'postalCode', '1928')]
- topology.standalone.modify_s(ENTRY_DN, mod)
+ topology_st.standalone.modify_s(ENTRY_DN, mod)
- ents = topology.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*')
+ ents = topology_st.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*')
assert len(ents) == 1
assert ents[0].postalCode == '1928'
-def test_ticket47653_delete(topology):
+def test_ticket47653_delete(topology_st):
'''
It checks that, bound as bind_entry,
- we can not delete an entry without the proper SELFDN aci.
- adding the ACI, we can delete the entry
'''
- topology.standalone.log.info("\n\n######################### DELETE ######################\n")
+ topology_st.standalone.log.info("\n\n######################### DELETE ######################\n")
# bind as bind_entry
- topology.standalone.log.info("Bind as %s" % BIND_DN)
- topology.standalone.simple_bind_s(BIND_DN, BIND_PW)
+ topology_st.standalone.log.info("Bind as %s" % BIND_DN)
+ topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW)
# entry to delete WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS
try:
- topology.standalone.log.info("Try to delete %s (aci is missing)" % ENTRY_DN)
- topology.standalone.delete_s(ENTRY_DN)
+ topology_st.standalone.log.info("Try to delete %s (aci is missing)" % ENTRY_DN)
+ topology_st.standalone.delete_s(ENTRY_DN)
except Exception as e:
- topology.standalone.log.info("Exception (expected): %s" % type(e).__name__)
+ topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__)
assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
# Ok Now add the proper ACI
- topology.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
- ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
+ ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME
- ACI_ALLOW = "(version 3.0; acl \"SelfDN delete\"; allow (delete)"
- ACI_SUBJECT = " userattr = \"member#selfDN\";)"
- ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
+ ACI_ALLOW = "(version 3.0; acl \"SelfDN delete\"; allow (delete)"
+ ACI_SUBJECT = " userattr = \"member#selfDN\";)"
+ ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
- topology.standalone.modify_s(SUFFIX, mod)
+ topology_st.standalone.modify_s(SUFFIX, mod)
# bind as bind_entry
- topology.standalone.log.info("Bind as %s" % BIND_DN)
- topology.standalone.simple_bind_s(BIND_DN, BIND_PW)
+ topology_st.standalone.log.info("Bind as %s" % BIND_DN)
+ topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW)
# entry to search with the proper aci
- topology.standalone.log.info("Try to delete %s should be successful" % ENTRY_DN)
- topology.standalone.delete_s(ENTRY_DN)
+ topology_st.standalone.log.info("Try to delete %s should be successful" % ENTRY_DN)
+ topology_st.standalone.delete_s(ENTRY_DN)
if __name__ == '__main__':
diff --git a/dirsrvtests/tests/tickets/ticket47669_test.py b/dirsrvtests/tests/tickets/ticket47669_test.py
index e26fa05..03fb722 100644
--- a/dirsrvtests/tests/tickets/ticket47669_test.py
+++ b/dirsrvtests/tests/tickets/ticket47669_test.py
@@ -6,19 +6,11 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
import logging
+
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
-from ldap.controls import SimplePagedResultsControl
-from ldap.controls.simple import GetEffectiveRightsControl
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
@@ -32,48 +24,7 @@ COMPACTDBINTERVAL = 'nsslapd-changelogcompactdb-interval'
FILTER = '(cn=*)'
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def test_ticket47669_init(topology):
+def test_ticket47669_init(topology_st):
"""
Add cn=changelog5,cn=config
Enable cn=Retro Changelog Plugin,cn=plugins,cn=config
@@ -81,36 +32,36 @@ def test_ticket47669_init(topology):
log.info('Testing Ticket 47669 - Test duration syntax in the changelogs')
# bind as directory manager
- topology.standalone.log.info("Bind as %s" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.log.info("Bind as %s" % DN_DM)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
try:
- changelogdir = os.path.join(os.path.dirname(topology.standalone.dbdir), 'changelog')
- topology.standalone.add_s(Entry((CHANGELOG,
- {'objectclass': 'top extensibleObject'.split(),
- 'nsslapd-changelogdir': changelogdir})))
+ changelogdir = os.path.join(os.path.dirname(topology_st.standalone.dbdir), 'changelog')
+ topology_st.standalone.add_s(Entry((CHANGELOG,
+ {'objectclass': 'top extensibleObject'.split(),
+ 'nsslapd-changelogdir': changelogdir})))
except ldap.LDAPError as e:
log.error('Failed to add ' + CHANGELOG + ': error ' + e.message['desc'])
assert False
try:
- topology.standalone.modify_s(RETROCHANGELOG, [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', 'on')])
+ topology_st.standalone.modify_s(RETROCHANGELOG, [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', 'on')])
except ldap.LDAPError as e:
log.error('Failed to enable ' + RETROCHANGELOG + ': error ' + e.message['desc'])
assert False
# restart the server
- topology.standalone.restart(timeout=10)
+ topology_st.standalone.restart(timeout=10)
-def add_and_check(topology, plugin, attr, val, isvalid):
+def add_and_check(topology_st, plugin, attr, val, isvalid):
"""
Helper function to add/replace attr: val and check the added value
"""
if isvalid:
log.info('Test %s: %s -- valid' % (attr, val))
try:
- topology.standalone.modify_s(plugin, [(ldap.MOD_REPLACE, attr, val)])
+ topology_st.standalone.modify_s(plugin, [(ldap.MOD_REPLACE, attr, val)])
except ldap.LDAPError as e:
log.error('Failed to add ' + attr + ': ' + val + ' to ' + plugin + ': error ' + e.message['desc'])
assert False
@@ -118,18 +69,18 @@ def add_and_check(topology, plugin, attr, val, isvalid):
log.info('Test %s: %s -- invalid' % (attr, val))
if plugin == CHANGELOG:
try:
- topology.standalone.modify_s(plugin, [(ldap.MOD_REPLACE, attr, val)])
+ topology_st.standalone.modify_s(plugin, [(ldap.MOD_REPLACE, attr, val)])
except ldap.LDAPError as e:
log.error('Expectedly failed to add ' + attr + ': ' + val +
' to ' + plugin + ': error ' + e.message['desc'])
else:
try:
- topology.standalone.modify_s(plugin, [(ldap.MOD_REPLACE, attr, val)])
+ topology_st.standalone.modify_s(plugin, [(ldap.MOD_REPLACE, attr, val)])
except ldap.LDAPError as e:
log.error('Failed to add ' + attr + ': ' + val + ' to ' + plugin + ': error ' + e.message['desc'])
try:
- entries = topology.standalone.search_s(plugin, ldap.SCOPE_BASE, FILTER, [attr])
+ entries = topology_st.standalone.search_s(plugin, ldap.SCOPE_BASE, FILTER, [attr])
if isvalid:
if not entries[0].hasValue(attr, val):
log.fatal('%s does not have expected (%s: %s)' % (plugin, attr, val))
@@ -148,86 +99,86 @@ def add_and_check(topology, plugin, attr, val, isvalid):
assert False
-def test_ticket47669_changelog_maxage(topology):
+def test_ticket47669_changelog_maxage(topology_st):
"""
Test nsslapd-changelogmaxage in cn=changelog5,cn=config
"""
log.info('1. Test nsslapd-changelogmaxage in cn=changelog5,cn=config')
# bind as directory manager
- topology.standalone.log.info("Bind as %s" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.log.info("Bind as %s" % DN_DM)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
- add_and_check(topology, CHANGELOG, MAXAGE, '12345', True)
- add_and_check(topology, CHANGELOG, MAXAGE, '10s', True)
- add_and_check(topology, CHANGELOG, MAXAGE, '30M', True)
- add_and_check(topology, CHANGELOG, MAXAGE, '12h', True)
- add_and_check(topology, CHANGELOG, MAXAGE, '2D', True)
- add_and_check(topology, CHANGELOG, MAXAGE, '4w', True)
- add_and_check(topology, CHANGELOG, MAXAGE, '-123', False)
- add_and_check(topology, CHANGELOG, MAXAGE, 'xyz', False)
+ add_and_check(topology_st, CHANGELOG, MAXAGE, '12345', True)
+ add_and_check(topology_st, CHANGELOG, MAXAGE, '10s', True)
+ add_and_check(topology_st, CHANGELOG, MAXAGE, '30M', True)
+ add_and_check(topology_st, CHANGELOG, MAXAGE, '12h', True)
+ add_and_check(topology_st, CHANGELOG, MAXAGE, '2D', True)
+ add_and_check(topology_st, CHANGELOG, MAXAGE, '4w', True)
+ add_and_check(topology_st, CHANGELOG, MAXAGE, '-123', False)
+ add_and_check(topology_st, CHANGELOG, MAXAGE, 'xyz', False)
-def test_ticket47669_changelog_triminterval(topology):
+def test_ticket47669_changelog_triminterval(topology_st):
"""
Test nsslapd-changelogtrim-interval in cn=changelog5,cn=config
"""
log.info('2. Test nsslapd-changelogtrim-interval in cn=changelog5,cn=config')
# bind as directory manager
- topology.standalone.log.info("Bind as %s" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.log.info("Bind as %s" % DN_DM)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
- add_and_check(topology, CHANGELOG, TRIMINTERVAL, '12345', True)
- add_and_check(topology, CHANGELOG, TRIMINTERVAL, '10s', True)
- add_and_check(topology, CHANGELOG, TRIMINTERVAL, '30M', True)
- add_and_check(topology, CHANGELOG, TRIMINTERVAL, '12h', True)
- add_and_check(topology, CHANGELOG, TRIMINTERVAL, '2D', True)
- add_and_check(topology, CHANGELOG, TRIMINTERVAL, '4w', True)
- add_and_check(topology, CHANGELOG, TRIMINTERVAL, '-123', False)
- add_and_check(topology, CHANGELOG, TRIMINTERVAL, 'xyz', False)
+ add_and_check(topology_st, CHANGELOG, TRIMINTERVAL, '12345', True)
+ add_and_check(topology_st, CHANGELOG, TRIMINTERVAL, '10s', True)
+ add_and_check(topology_st, CHANGELOG, TRIMINTERVAL, '30M', True)
+ add_and_check(topology_st, CHANGELOG, TRIMINTERVAL, '12h', True)
+ add_and_check(topology_st, CHANGELOG, TRIMINTERVAL, '2D', True)
+ add_and_check(topology_st, CHANGELOG, TRIMINTERVAL, '4w', True)
+ add_and_check(topology_st, CHANGELOG, TRIMINTERVAL, '-123', False)
+ add_and_check(topology_st, CHANGELOG, TRIMINTERVAL, 'xyz', False)
-def test_ticket47669_changelog_compactdbinterval(topology):
+def test_ticket47669_changelog_compactdbinterval(topology_st):
"""
Test nsslapd-changelogcompactdb-interval in cn=changelog5,cn=config
"""
log.info('3. Test nsslapd-changelogcompactdb-interval in cn=changelog5,cn=config')
# bind as directory manager
- topology.standalone.log.info("Bind as %s" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.log.info("Bind as %s" % DN_DM)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
- add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '12345', True)
- add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '10s', True)
- add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '30M', True)
- add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '12h', True)
- add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '2D', True)
- add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '4w', True)
- add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '-123', False)
- add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, 'xyz', False)
+ add_and_check(topology_st, CHANGELOG, COMPACTDBINTERVAL, '12345', True)
+ add_and_check(topology_st, CHANGELOG, COMPACTDBINTERVAL, '10s', True)
+ add_and_check(topology_st, CHANGELOG, COMPACTDBINTERVAL, '30M', True)
+ add_and_check(topology_st, CHANGELOG, COMPACTDBINTERVAL, '12h', True)
+ add_and_check(topology_st, CHANGELOG, COMPACTDBINTERVAL, '2D', True)
+ add_and_check(topology_st, CHANGELOG, COMPACTDBINTERVAL, '4w', True)
+ add_and_check(topology_st, CHANGELOG, COMPACTDBINTERVAL, '-123', False)
+ add_and_check(topology_st, CHANGELOG, COMPACTDBINTERVAL, 'xyz', False)
-def test_ticket47669_retrochangelog_maxage(topology):
+def test_ticket47669_retrochangelog_maxage(topology_st):
"""
Test nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config
"""
log.info('4. Test nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config')
# bind as directory manager
- topology.standalone.log.info("Bind as %s" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-
- add_and_check(topology, RETROCHANGELOG, MAXAGE, '12345', True)
- add_and_check(topology, RETROCHANGELOG, MAXAGE, '10s', True)
- add_and_check(topology, RETROCHANGELOG, MAXAGE, '30M', True)
- add_and_check(topology, RETROCHANGELOG, MAXAGE, '12h', True)
- add_and_check(topology, RETROCHANGELOG, MAXAGE, '2D', True)
- add_and_check(topology, RETROCHANGELOG, MAXAGE, '4w', True)
- add_and_check(topology, RETROCHANGELOG, MAXAGE, '-123', False)
- add_and_check(topology, RETROCHANGELOG, MAXAGE, 'xyz', False)
-
- topology.standalone.log.info("ticket47669 was successfully verified.")
+ topology_st.standalone.log.info("Bind as %s" % DN_DM)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+
+ add_and_check(topology_st, RETROCHANGELOG, MAXAGE, '12345', True)
+ add_and_check(topology_st, RETROCHANGELOG, MAXAGE, '10s', True)
+ add_and_check(topology_st, RETROCHANGELOG, MAXAGE, '30M', True)
+ add_and_check(topology_st, RETROCHANGELOG, MAXAGE, '12h', True)
+ add_and_check(topology_st, RETROCHANGELOG, MAXAGE, '2D', True)
+ add_and_check(topology_st, RETROCHANGELOG, MAXAGE, '4w', True)
+ add_and_check(topology_st, RETROCHANGELOG, MAXAGE, '-123', False)
+ add_and_check(topology_st, RETROCHANGELOG, MAXAGE, 'xyz', False)
+
+ topology_st.standalone.log.info("ticket47669 was successfully verified.")
if __name__ == '__main__':
diff --git a/dirsrvtests/tests/tickets/ticket47676_test.py b/dirsrvtests/tests/tickets/ticket47676_test.py
index 3ba29c5..a7a12f6 100644
--- a/dirsrvtests/tests/tickets/ticket47676_test.py
+++ b/dirsrvtests/tests/tickets/ticket47676_test.py
@@ -11,37 +11,29 @@ Created on Nov 7, 2013
@author: tbordaz
'''
-import os
-import sys
+import logging
import time
+
import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_m2
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-#
-# important part. We can deploy Master1 and Master2 on different versions
-#
-installation1_prefix = None
-installation2_prefix = None
-
-SCHEMA_DN = "cn=schema"
+SCHEMA_DN = "cn=schema"
TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
-OC_NAME = 'OCticket47676'
-OC_OID_EXT = 2
+OC_NAME = 'OCticket47676'
+OC_OID_EXT = 2
MUST = "(postalAddress $ postalCode)"
-MAY = "(member $ street)"
+MAY = "(member $ street)"
-OC2_NAME = 'OC2ticket47676'
+OC2_NAME = 'OC2ticket47676'
OC2_OID_EXT = 3
MUST_2 = "(postalAddress $ postalCode)"
-MAY_2 = "(member $ street)"
+MAY_2 = "(member $ street)"
REPL_SCHEMA_POLICY_CONSUMER = "cn=consumerUpdatePolicy,cn=replSchema,cn=config"
REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config"
@@ -49,21 +41,21 @@ REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config"
OTHER_NAME = 'other_entry'
MAX_OTHERS = 10
-BIND_NAME = 'bind_entry'
-BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
-BIND_PW = 'password'
+BIND_NAME = 'bind_entry'
+BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
+BIND_PW = 'password'
ENTRY_NAME = 'test_entry'
-ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
-ENTRY_OC = "top person %s" % OC_NAME
+ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
+ENTRY_OC = "top person %s" % OC_NAME
BASE_OID = "1.2.3.4.5.6.7.8.9.10"
def _oc_definition(oid_ext, name, must=None, may=None):
- oid = "%s.%d" % (BASE_OID, oid_ext)
+ oid = "%s.%d" % (BASE_OID, oid_ext)
desc = 'To test ticket 47490'
- sup = 'person'
+ sup = 'person'
if not must:
must = MUST
if not may:
@@ -73,114 +65,7 @@ def _oc_definition(oid_ext, name, must=None, may=None):
return new_oc
-class TopologyMaster1Master2(object):
- def __init__(self, master1, master2):
- master1.open()
- self.master1 = master1
-
- master2.open()
- self.master2 = master2
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to create a replicated topology for the 'module'.
- The replicated topology is MASTER1 <-> Master2.
- '''
- global installation1_prefix
- global installation2_prefix
-
- # allocate master1 on a given deployement
- master1 = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Args for the master1 instance
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_master = args_instance.copy()
- master1.allocate(args_master)
-
- # allocate master1 on a given deployement
- master2 = DirSrv(verbose=False)
- if installation2_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation2_prefix
-
- # Args for the consumer instance
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_master = args_instance.copy()
- master2.allocate(args_master)
-
- # Get the status of the instance and restart it if it exists
- instance_master1 = master1.exists()
- instance_master2 = master2.exists()
-
- # Remove all the instances
- if instance_master1:
- master1.delete()
- if instance_master2:
- master2.delete()
-
- # Create the instances
- master1.create()
- master1.open()
- master2.create()
- master2.open()
-
- #
- # Now prepare the Master-Consumer topology
- #
- # First Enable replication
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
- master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
- # Initialize the supplier->consumer
-
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
-
- if not repl_agreement:
- log.fatal("Fail to create a replica agreement")
- sys.exit(1)
-
- log.debug("%s created" % repl_agreement)
-
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
-
- master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- master1.waitForReplInit(repl_agreement)
-
- # Check replication is working fine
- if master1.testReplication(DEFAULT_SUFFIX, master2):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- def fin():
- master1.delete()
- master2.delete()
- request.addfinalizer(fin)
-
- # Here we have two instances master and consumer
- # with replication working.
- return TopologyMaster1Master2(master1, master2)
-
-
-def test_ticket47676_init(topology):
+def test_ticket47676_init(topology_m2):
"""
It adds
- Objectclass with MAY 'member'
@@ -189,43 +74,43 @@ def test_ticket47676_init(topology):
"""
- topology.master1.log.info("Add %s that allows 'member' attribute" % OC_NAME)
- new_oc = _oc_definition(OC_OID_EXT, OC_NAME, must = MUST, may = MAY)
- topology.master1.schema.add_schema('objectClasses', new_oc)
+ topology_m2.ms["master1"].log.info("Add %s that allows 'member' attribute" % OC_NAME)
+ new_oc = _oc_definition(OC_OID_EXT, OC_NAME, must=MUST, may=MAY)
+ topology_m2.ms["master1"].schema.add_schema('objectClasses', new_oc)
# entry used to bind with
- topology.master1.log.info("Add %s" % BIND_DN)
- topology.master1.add_s(Entry((BIND_DN, {
- 'objectclass': "top person".split(),
- 'sn': BIND_NAME,
- 'cn': BIND_NAME,
- 'userpassword': BIND_PW})))
+ topology_m2.ms["master1"].log.info("Add %s" % BIND_DN)
+ topology_m2.ms["master1"].add_s(Entry((BIND_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': BIND_NAME,
+ 'cn': BIND_NAME,
+ 'userpassword': BIND_PW})))
# enable acl error logging
mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(128 + 8192))] # ACL + REPL
- topology.master1.modify_s(DN_CONFIG, mod)
- topology.master2.modify_s(DN_CONFIG, mod)
+ topology_m2.ms["master1"].modify_s(DN_CONFIG, mod)
+ topology_m2.ms["master2"].modify_s(DN_CONFIG, mod)
# add dummy entries
for cpt in range(MAX_OTHERS):
name = "%s%d" % (OTHER_NAME, cpt)
- topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
- 'objectclass': "top person".split(),
- 'sn': name,
- 'cn': name})))
+ topology_m2.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
+ 'objectclass': "top person".split(),
+ 'sn': name,
+ 'cn': name})))
-def test_ticket47676_skip_oc_at(topology):
+def test_ticket47676_skip_oc_at(topology_m2):
'''
This test ADD an entry on MASTER1 where 47676 is fixed. Then it checks that entry is replicated
on MASTER2 (even if on MASTER2 47676 is NOT fixed). Then update on MASTER2.
If the schema has successfully been pushed, updating Master2 should succeed
'''
- topology.master1.log.info("\n\n######################### ADD ######################\n")
+ topology_m2.ms["master1"].log.info("\n\n######################### ADD ######################\n")
# bind as 'cn=Directory manager'
- topology.master1.log.info("Bind as %s and add the add the entry with specific oc" % DN_DM)
- topology.master1.simple_bind_s(DN_DM, PASSWORD)
+ topology_m2.ms["master1"].log.info("Bind as %s and add the add the entry with specific oc" % DN_DM)
+ topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD)
# Prepare the entry with multivalued members
entry = Entry(ENTRY_DN)
@@ -241,18 +126,18 @@ def test_ticket47676_skip_oc_at(topology):
members.append(BIND_DN)
entry.setValues('member', members)
- topology.master1.log.info("Try to add Add %s should be successful" % ENTRY_DN)
- topology.master1.add_s(entry)
+ topology_m2.ms["master1"].log.info("Try to add Add %s should be successful" % ENTRY_DN)
+ topology_m2.ms["master1"].add_s(entry)
#
# Now check the entry as been replicated
#
- topology.master2.simple_bind_s(DN_DM, PASSWORD)
- topology.master1.log.info("Try to retrieve %s from Master2" % ENTRY_DN)
+ topology_m2.ms["master2"].simple_bind_s(DN_DM, PASSWORD)
+ topology_m2.ms["master1"].log.info("Try to retrieve %s from Master2" % ENTRY_DN)
loop = 0
while loop <= 10:
try:
- ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ ent = topology_m2.ms["master2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
break
except ldap.NO_SUCH_OBJECT:
time.sleep(2)
@@ -260,14 +145,14 @@ def test_ticket47676_skip_oc_at(topology):
assert loop <= 10
# Now update the entry on Master2 (as DM because 47676 is possibly not fixed on M2)
- topology.master1.log.info("Update %s on M2" % ENTRY_DN)
+ topology_m2.ms["master1"].log.info("Update %s on M2" % ENTRY_DN)
mod = [(ldap.MOD_REPLACE, 'description', 'test_add')]
- topology.master2.modify_s(ENTRY_DN, mod)
+ topology_m2.ms["master2"].modify_s(ENTRY_DN, mod)
- topology.master1.simple_bind_s(DN_DM, PASSWORD)
+ topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD)
loop = 0
while loop <= 10:
- ent = topology.master1.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ ent = topology_m2.ms["master1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
if ent.hasAttr('description') and (ent.getValue('description') == 'test_add'):
break
time.sleep(1)
@@ -276,29 +161,28 @@ def test_ticket47676_skip_oc_at(topology):
assert ent.getValue('description') == 'test_add'
-def test_ticket47676_reject_action(topology):
-
- topology.master1.log.info("\n\n######################### REJECT ACTION ######################\n")
+def test_ticket47676_reject_action(topology_m2):
+ topology_m2.ms["master1"].log.info("\n\n######################### REJECT ACTION ######################\n")
- topology.master1.simple_bind_s(DN_DM, PASSWORD)
- topology.master2.simple_bind_s(DN_DM, PASSWORD)
+ topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD)
+ topology_m2.ms["master2"].simple_bind_s(DN_DM, PASSWORD)
# make master1 to refuse to push the schema if OC_NAME is present in consumer schema
mod = [(ldap.MOD_ADD, 'schemaUpdateObjectclassReject', '%s' % (OC_NAME))] # ACL + REPL
- topology.master1.modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod)
+ topology_m2.ms["master1"].modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod)
# Restart is required to take into account that policy
- topology.master1.stop(timeout=10)
- topology.master1.start(timeout=10)
+ topology_m2.ms["master1"].stop(timeout=10)
+ topology_m2.ms["master1"].start(timeout=10)
# Add a new OC on M1 so that schema CSN will change and M1 will try to push the schema
- topology.master1.log.info("Add %s on M1" % OC2_NAME)
+ topology_m2.ms["master1"].log.info("Add %s on M1" % OC2_NAME)
new_oc = _oc_definition(OC2_OID_EXT, OC2_NAME, must=MUST, may=MAY)
- topology.master1.schema.add_schema('objectClasses', new_oc)
+ topology_m2.ms["master1"].schema.add_schema('objectClasses', new_oc)
# Safety checking that the schema has been updated on M1
- topology.master1.log.info("Check %s is in M1" % OC2_NAME)
- ent = topology.master1.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
+ topology_m2.ms["master1"].log.info("Check %s is in M1" % OC2_NAME)
+ ent = topology_m2.ms["master1"].getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
assert ent.hasAttr('objectclasses')
found = False
for objectclass in ent.getValues('objectclasses'):
@@ -308,15 +192,15 @@ def test_ticket47676_reject_action(topology):
assert found
# Do an update of M1 so that M1 will try to push the schema
- topology.master1.log.info("Update %s on M1" % ENTRY_DN)
+ topology_m2.ms["master1"].log.info("Update %s on M1" % ENTRY_DN)
mod = [(ldap.MOD_REPLACE, 'description', 'test_reject')]
- topology.master1.modify_s(ENTRY_DN, mod)
+ topology_m2.ms["master1"].modify_s(ENTRY_DN, mod)
# Check the replication occured and so also M1 attempted to push the schema
- topology.master1.log.info("Check updated %s on M2" % ENTRY_DN)
+ topology_m2.ms["master1"].log.info("Check updated %s on M2" % ENTRY_DN)
loop = 0
while loop <= 10:
- ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
+ ent = topology_m2.ms["master2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
if ent.hasAttr('description') and ent.getValue('description') == 'test_reject':
# update was replicated
break
@@ -325,8 +209,8 @@ def test_ticket47676_reject_action(topology):
assert loop <= 10
# Check that the schema has not been pushed
- topology.master1.log.info("Check %s is not in M2" % OC2_NAME)
- ent = topology.master2.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
+ topology_m2.ms["master1"].log.info("Check %s is not in M2" % OC2_NAME)
+ ent = topology_m2.ms["master2"].getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
assert ent.hasAttr('objectclasses')
found = False
for objectclass in ent.getValues('objectclasses'):
@@ -335,26 +219,26 @@ def test_ticket47676_reject_action(topology):
break
assert not found
- topology.master1.log.info("\n\n######################### NO MORE REJECT ACTION ######################\n")
+ topology_m2.ms["master1"].log.info("\n\n######################### NO MORE REJECT ACTION ######################\n")
# make master1 to do no specific action on OC_NAME
mod = [(ldap.MOD_DELETE, 'schemaUpdateObjectclassReject', '%s' % (OC_NAME))] # ACL + REPL
- topology.master1.modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod)
+ topology_m2.ms["master1"].modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod)
# Restart is required to take into account that policy
- topology.master1.stop(timeout=10)
- topology.master1.start(timeout=10)
+ topology_m2.ms["master1"].stop(timeout=10)
+ topology_m2.ms["master1"].start(timeout=10)
# Do an update of M1 so that M1 will try to push the schema
- topology.master1.log.info("Update %s on M1" % ENTRY_DN)
+ topology_m2.ms["master1"].log.info("Update %s on M1" % ENTRY_DN)
mod = [(ldap.MOD_REPLACE, 'description', 'test_no_more_reject')]
- topology.master1.modify_s(ENTRY_DN, mod)
+ topology_m2.ms["master1"].modify_s(ENTRY_DN, mod)
# Check the replication occured and so also M1 attempted to push the schema
- topology.master1.log.info("Check updated %s on M2" % ENTRY_DN)
+ topology_m2.ms["master1"].log.info("Check updated %s on M2" % ENTRY_DN)
loop = 0
while loop <= 10:
- ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
+ ent = topology_m2.ms["master2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
if ent.hasAttr('description') and ent.getValue('description') == 'test_no_more_reject':
# update was replicated
break
@@ -363,15 +247,15 @@ def test_ticket47676_reject_action(topology):
assert loop <= 10
# Check that the schema has been pushed
- topology.master1.log.info("Check %s is in M2" % OC2_NAME)
- ent = topology.master2.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
+ topology_m2.ms["master1"].log.info("Check %s is in M2" % OC2_NAME)
+ ent = topology_m2.ms["master2"].getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
assert ent.hasAttr('objectclasses')
found = False
for objectclass in ent.getValues('objectclasses'):
if str(objectclass).find(OC2_NAME) >= 0:
found = True
break
- assert found
+ assert found
if __name__ == '__main__':
diff --git a/dirsrvtests/tests/tickets/ticket47714_test.py b/dirsrvtests/tests/tickets/ticket47714_test.py
index 08ca98a..49d671f 100644
--- a/dirsrvtests/tests/tickets/ticket47714_test.py
+++ b/dirsrvtests/tests/tickets/ticket47714_test.py
@@ -6,17 +6,14 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
+import logging
import time
+
import ldap
-import logging
import pytest
-import shutil
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
@@ -31,78 +28,40 @@ TEST_USER_DN = 'uid=%s,%s' % (TEST_USER, SUFFIX)
TEST_USER_PW = '%s' % TEST_USER
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def _header(topology, label):
- topology.standalone.log.info("\n\n###############################################")
- topology.standalone.log.info("#######")
- topology.standalone.log.info("####### %s" % label)
- topology.standalone.log.info("#######")
- topology.standalone.log.info("###############################################")
+def _header(topology_st, label):
+ topology_st.standalone.log.info("\n\n###############################################")
+ topology_st.standalone.log.info("#######")
+ topology_st.standalone.log.info("####### %s" % label)
+ topology_st.standalone.log.info("#######")
+ topology_st.standalone.log.info("###############################################")
-def test_ticket47714_init(topology):
+def test_ticket47714_init(topology_st):
"""
1. Add account policy entry to the DB
2. Add a test user to the DB
"""
- _header(topology, 'Testing Ticket 47714 - [RFE] Update lastLoginTime also in Account Policy plugin if account lockout is based on passwordExpirationTime.')
+ _header(topology_st,
+ 'Testing Ticket 47714 - [RFE] Update lastLoginTime also in Account Policy plugin if account lockout is based on passwordExpirationTime.')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
log.info("\n######################### Adding Account Policy entry: %s ######################\n" % ACCT_POLICY_DN)
- topology.standalone.add_s(Entry((ACCT_POLICY_DN, {'objectclass': "top ldapsubentry extensibleObject accountpolicy".split(),
- 'accountInactivityLimit': INACTIVITY_LIMIT})))
+ topology_st.standalone.add_s(
+ Entry((ACCT_POLICY_DN, {'objectclass': "top ldapsubentry extensibleObject accountpolicy".split(),
+ 'accountInactivityLimit': INACTIVITY_LIMIT})))
log.info("\n######################### Adding Test User entry: %s ######################\n" % TEST_USER_DN)
- topology.standalone.add_s(Entry((TEST_USER_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': TEST_USER,
- 'sn': TEST_USER,
- 'givenname': TEST_USER,
- 'userPassword': TEST_USER_PW,
- 'acctPolicySubentry': ACCT_POLICY_DN})))
+ topology_st.standalone.add_s(
+ Entry((TEST_USER_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': TEST_USER,
+ 'sn': TEST_USER,
+ 'givenname': TEST_USER,
+ 'userPassword': TEST_USER_PW,
+ 'acctPolicySubentry': ACCT_POLICY_DN})))
-def test_ticket47714_run_0(topology):
+def test_ticket47714_run_0(topology_st):
"""
Check this change has no inpact to the existing functionality.
1. Set account policy config without the new attr alwaysRecordLoginAttr
@@ -111,67 +70,68 @@ def test_ticket47714_run_0(topology):
4. Waint longer than the accountInactivityLimit time and bind as the test user,
which should fail with CONSTANT_VIOLATION.
"""
- _header(topology, 'Account Policy - No new attr alwaysRecordLoginAttr in config')
+ _header(topology_st, 'Account Policy - No new attr alwaysRecordLoginAttr in config')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
# Modify Account Policy config entry
- topology.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'),
- (ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime'),
- (ldap.MOD_REPLACE, 'altstateattrname', 'createTimestamp'),
- (ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry'),
- (ldap.MOD_REPLACE, 'limitattrname', 'accountInactivityLimit')])
+ topology_st.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'),
+ (ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime'),
+ (ldap.MOD_REPLACE, 'altstateattrname', 'createTimestamp'),
+ (ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry'),
+ (ldap.MOD_REPLACE, 'limitattrname',
+ 'accountInactivityLimit')])
# Enable the plugins
- topology.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY)
+ topology_st.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY)
- topology.standalone.restart(timeout=120)
+ topology_st.standalone.restart(timeout=120)
log.info("\n######################### Bind as %s ######################\n" % TEST_USER_DN)
try:
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
except ldap.CONSTRAINT_VIOLATION as e:
log.error('CONSTRAINT VIOLATION ' + e.message['desc'])
time.sleep(2)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- entry = topology.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime'])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime'])
lastLoginTime0 = entry[0].lastLoginTime
log.info("\n######################### Bind as %s again ######################\n" % TEST_USER_DN)
try:
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
except ldap.CONSTRAINT_VIOLATION as e:
log.error('CONSTRAINT VIOLATION ' + e.message['desc'])
time.sleep(2)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- entry = topology.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime'])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime'])
lastLoginTime1 = entry[0].lastLoginTime
log.info("First lastLoginTime: %s, Second lastLoginTime: %s" % (lastLoginTime0, lastLoginTime1))
assert lastLoginTime0 < lastLoginTime1
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- entry = topology.standalone.search_s(ACCT_POLICY_DN, ldap.SCOPE_BASE, SEARCHFILTER)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ entry = topology_st.standalone.search_s(ACCT_POLICY_DN, ldap.SCOPE_BASE, SEARCHFILTER)
log.info("\n######################### %s ######################\n" % ACCT_POLICY_CONFIG_DN)
log.info("accountInactivityLimit: %s" % entry[0].accountInactivityLimit)
log.info("\n######################### %s DONE ######################\n" % ACCT_POLICY_CONFIG_DN)
log.info("\n######################### Bind as %s again to fail ######################\n" % TEST_USER_DN)
try:
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
except ldap.CONSTRAINT_VIOLATION as e:
log.info('CONSTRAINT VIOLATION ' + e.message['desc'])
log.info("%s was successfully inactivated." % TEST_USER_DN)
pass
-def test_ticket47714_run_1(topology):
+def test_ticket47714_run_1(topology_st):
"""
Verify a new config attr alwaysRecordLoginAttr
1. Set account policy config with the new attr alwaysRecordLoginAttr: lastLoginTime
@@ -180,52 +140,54 @@ def test_ticket47714_run_1(topology):
2. Bind as a test user
3. Bind as the test user again and check the alwaysRecordLoginAttr: lastLoginTime is updated
"""
- _header(topology, 'Account Policy - With new attr alwaysRecordLoginAttr in config')
+ _header(topology_st, 'Account Policy - With new attr alwaysRecordLoginAttr in config')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_DELETE, 'lastLoginTime', None)])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_DELETE, 'lastLoginTime', None)])
# Modify Account Policy config entry
- topology.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'),
- (ldap.MOD_REPLACE, 'stateattrname', 'bogus'),
- (ldap.MOD_REPLACE, 'altstateattrname', 'modifyTimestamp'),
- (ldap.MOD_REPLACE, 'alwaysRecordLoginAttr', 'lastLoginTime'),
- (ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry'),
- (ldap.MOD_REPLACE, 'limitattrname', 'accountInactivityLimit')])
+ topology_st.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'),
+ (ldap.MOD_REPLACE, 'stateattrname', 'bogus'),
+ (ldap.MOD_REPLACE, 'altstateattrname', 'modifyTimestamp'),
+ (
+ ldap.MOD_REPLACE, 'alwaysRecordLoginAttr', 'lastLoginTime'),
+ (ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry'),
+ (ldap.MOD_REPLACE, 'limitattrname',
+ 'accountInactivityLimit')])
# Enable the plugins
- topology.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY)
+ topology_st.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY)
- topology.standalone.restart(timeout=120)
+ topology_st.standalone.restart(timeout=120)
log.info("\n######################### Bind as %s ######################\n" % TEST_USER_DN)
try:
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
except ldap.CONSTRAINT_VIOLATION as e:
log.error('CONSTRAINT VIOLATION ' + e.message['desc'])
time.sleep(1)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- entry = topology.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime'])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime'])
lastLoginTime0 = entry[0].lastLoginTime
log.info("\n######################### Bind as %s again ######################\n" % TEST_USER_DN)
try:
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
except ldap.CONSTRAINT_VIOLATION as e:
log.error('CONSTRAINT VIOLATION ' + e.message['desc'])
time.sleep(1)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- entry = topology.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime'])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime'])
lastLoginTime1 = entry[0].lastLoginTime
log.info("First lastLoginTime: %s, Second lastLoginTime: %s" % (lastLoginTime0, lastLoginTime1))
assert lastLoginTime0 < lastLoginTime1
- topology.standalone.log.info("ticket47714 was successfully verified.")
+ topology_st.standalone.log.info("ticket47714 was successfully verified.")
if __name__ == '__main__':
diff --git a/dirsrvtests/tests/tickets/ticket47721_test.py b/dirsrvtests/tests/tickets/ticket47721_test.py
index b1606bb..495606e 100644
--- a/dirsrvtests/tests/tickets/ticket47721_test.py
+++ b/dirsrvtests/tests/tickets/ticket47721_test.py
@@ -11,38 +11,29 @@ Created on Nov 7, 2013
@author: tbordaz
'''
-import os
-import sys
+import logging
import time
+
import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
-from lib389.properties import *
-from lib389._constants import REPLICAROLE_MASTER
+from lib389.topologies import topology_m2
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-#
-# important part. We can deploy Master1 and Master2 on different versions
-#
-installation1_prefix = None
-installation2_prefix = None
-
-SCHEMA_DN = "cn=schema"
+SCHEMA_DN = "cn=schema"
TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
-OC_NAME = 'OCticket47721'
-OC_OID_EXT = 2
+OC_NAME = 'OCticket47721'
+OC_OID_EXT = 2
MUST = "(postalAddress $ postalCode)"
-MAY = "(member $ street)"
+MAY = "(member $ street)"
-OC2_NAME = 'OC2ticket47721'
+OC2_NAME = 'OC2ticket47721'
OC2_OID_EXT = 3
MUST_2 = "(postalAddress $ postalCode)"
-MAY_2 = "(member $ street)"
+MAY_2 = "(member $ street)"
REPL_SCHEMA_POLICY_CONSUMER = "cn=consumerUpdatePolicy,cn=replSchema,cn=config"
REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config"
@@ -50,20 +41,22 @@ REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config"
OTHER_NAME = 'other_entry'
MAX_OTHERS = 10
-BIND_NAME = 'bind_entry'
-BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
-BIND_PW = 'password'
+BIND_NAME = 'bind_entry'
+BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
+BIND_PW = 'password'
ENTRY_NAME = 'test_entry'
-ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
-ENTRY_OC = "top person %s" % OC_NAME
+ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
+ENTRY_OC = "top person %s" % OC_NAME
BASE_OID = "1.2.3.4.5.6.7.8.9.10"
SLEEP_INTERVAL = 60
+
def _add_custom_at_definition(name='ATticket47721'):
- new_at = "( %s-oid NAME '%s' DESC 'test AT ticket 47721' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN ( 'Test 47721' 'user defined' ) )" % (name, name)
+ new_at = "( %s-oid NAME '%s' DESC 'test AT ticket 47721' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN ( 'Test 47721' 'user defined' ) )" % (
+ name, name)
return new_at
@@ -73,7 +66,8 @@ def _chg_std_at_defintion():
def _add_custom_oc_defintion(name='OCticket47721'):
- new_oc = "( %s-oid NAME '%s' DESC 'An group of related automount objects' SUP top STRUCTURAL MUST ou X-ORIGIN 'draft-howard-rfc2307bis' )" % (name, name)
+ new_oc = "( %s-oid NAME '%s' DESC 'An group of related automount objects' SUP top STRUCTURAL MUST ou X-ORIGIN 'draft-howard-rfc2307bis' )" % (
+ name, name)
return new_oc
@@ -82,116 +76,7 @@ def _chg_std_oc_defintion():
return new_oc
-class TopologyMaster1Master2(object):
- def __init__(self, master1, master2):
- master1.open()
- self.master1 = master1
-
- master2.open()
- self.master2 = master2
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to create a replicated topology for the 'module'.
- The replicated topology is MASTER1 <-> Master2.
- '''
- global installation1_prefix
- global installation2_prefix
-
- # allocate master1 on a given deployement
- master1 = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Args for the master1 instance
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_master = args_instance.copy()
- master1.allocate(args_master)
-
- # allocate master1 on a given deployement
- master2 = DirSrv(verbose=False)
- if installation2_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation2_prefix
-
- # Args for the consumer instance
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_master = args_instance.copy()
- master2.allocate(args_master)
-
- # Get the status of the instance and restart it if it exists
- instance_master1 = master1.exists()
- instance_master2 = master2.exists()
-
- # Remove all the instances
- if instance_master1:
- master1.delete()
- if instance_master2:
- master2.delete()
-
- # Create the instances
- master1.create()
- master1.open()
- master2.create()
- master2.open()
-
- #
- # Now prepare the Master-Consumer topology
- #
- # First Enable replication
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
- master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
- # Initialize the supplier->consumer
-
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
-
- if not repl_agreement:
- log.fatal("Fail to create a replica agreement")
- sys.exit(1)
-
- log.debug("%s created" % repl_agreement)
-
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
-
- master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- master1.waitForReplInit(repl_agreement)
-
- # Check replication is working fine
- if master1.testReplication(DEFAULT_SUFFIX, master2):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- def fin():
- master1.delete()
- master2.delete()
- request.addfinalizer(fin)
- #
- # Here we have two instances master and consumer
- # with replication working. Either coming from a backup recovery
- # or from a fresh (re)init
- # Time to return the topology
- return TopologyMaster1Master2(master1, master2)
-
-
-def test_ticket47721_init(topology):
+def test_ticket47721_init(topology_m2):
"""
It adds
- Objectclass with MAY 'member'
@@ -201,34 +86,34 @@ def test_ticket47721_init(topology):
"""
# entry used to bind with
- topology.master1.log.info("Add %s" % BIND_DN)
- topology.master1.add_s(Entry((BIND_DN, {
- 'objectclass': "top person".split(),
- 'sn': BIND_NAME,
- 'cn': BIND_NAME,
- 'userpassword': BIND_PW})))
+ topology_m2.ms["master1"].log.info("Add %s" % BIND_DN)
+ topology_m2.ms["master1"].add_s(Entry((BIND_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': BIND_NAME,
+ 'cn': BIND_NAME,
+ 'userpassword': BIND_PW})))
# enable repl error logging
mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192))] # REPL logging
- topology.master1.modify_s(DN_CONFIG, mod)
- topology.master2.modify_s(DN_CONFIG, mod)
+ topology_m2.ms["master1"].modify_s(DN_CONFIG, mod)
+ topology_m2.ms["master2"].modify_s(DN_CONFIG, mod)
# add dummy entries
for cpt in range(MAX_OTHERS):
name = "%s%d" % (OTHER_NAME, cpt)
- topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
- 'objectclass': "top person".split(),
- 'sn': name,
- 'cn': name})))
+ topology_m2.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
+ 'objectclass': "top person".split(),
+ 'sn': name,
+ 'cn': name})))
-def test_ticket47721_0(topology):
+def test_ticket47721_0(topology_m2):
dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
loop = 0
ent = None
while loop <= 10:
try:
- ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
+ ent = topology_m2.ms["master2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
break
except ldap.NO_SUCH_OBJECT:
time.sleep(1)
@@ -237,35 +122,35 @@ def test_ticket47721_0(topology):
assert False
-def test_ticket47721_1(topology):
+def test_ticket47721_1(topology_m2):
log.info('Running test 1...')
- #topology.master1.log.info("Attach debugger\n\n")
- #time.sleep(30)
+ # topology_m2.ms["master1"].log.info("Attach debugger\n\n")
+ # time.sleep(30)
new = _add_custom_at_definition()
- topology.master1.log.info("Add (M2) %s " % new)
- topology.master2.schema.add_schema('attributetypes', new)
+ topology_m2.ms["master1"].log.info("Add (M2) %s " % new)
+ topology_m2.ms["master2"].schema.add_schema('attributetypes', new)
new = _chg_std_at_defintion()
- topology.master1.log.info("Chg (M2) %s " % new)
- topology.master2.schema.add_schema('attributetypes', new)
+ topology_m2.ms["master1"].log.info("Chg (M2) %s " % new)
+ topology_m2.ms["master2"].schema.add_schema('attributetypes', new)
new = _add_custom_oc_defintion()
- topology.master1.log.info("Add (M2) %s " % new)
- topology.master2.schema.add_schema('objectClasses', new)
+ topology_m2.ms["master1"].log.info("Add (M2) %s " % new)
+ topology_m2.ms["master2"].schema.add_schema('objectClasses', new)
new = _chg_std_oc_defintion()
- topology.master1.log.info("Chg (M2) %s " % new)
- topology.master2.schema.add_schema('objectClasses', new)
+ topology_m2.ms["master1"].log.info("Chg (M2) %s " % new)
+ topology_m2.ms["master2"].schema.add_schema('objectClasses', new)
mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 1')]
dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
- topology.master2.modify_s(dn, mod)
+ topology_m2.ms["master2"].modify_s(dn, mod)
loop = 0
while loop <= 10:
try:
- ent = topology.master1.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
+ ent = topology_m2.ms["master1"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
if ent.hasAttr('description') and (ent.getValue('description') == 'Hello world 1'):
break
except ldap.NO_SUCH_OBJECT:
@@ -274,23 +159,23 @@ def test_ticket47721_1(topology):
assert loop <= 10
time.sleep(2)
- schema_csn_master1 = topology.master1.schema.get_schema_csn()
- schema_csn_master2 = topology.master2.schema.get_schema_csn()
+ schema_csn_master1 = topology_m2.ms["master1"].schema.get_schema_csn()
+ schema_csn_master2 = topology_m2.ms["master2"].schema.get_schema_csn()
log.debug('Master 1 schemaCSN: %s' % schema_csn_master1)
log.debug('Master 2 schemaCSN: %s' % schema_csn_master2)
-def test_ticket47721_2(topology):
+def test_ticket47721_2(topology_m2):
log.info('Running test 2...')
mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 2')]
dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
- topology.master1.modify_s(dn, mod)
+ topology_m2.ms["master1"].modify_s(dn, mod)
loop = 0
while loop <= 10:
try:
- ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
+ ent = topology_m2.ms["master2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
if ent.hasAttr('description') and (ent.getValue('description') == 'Hello world 2'):
break
except ldap.NO_SUCH_OBJECT:
@@ -299,23 +184,23 @@ def test_ticket47721_2(topology):
assert loop <= 10
time.sleep(2)
- schema_csn_master1 = topology.master1.schema.get_schema_csn()
- schema_csn_master2 = topology.master2.schema.get_schema_csn()
+ schema_csn_master1 = topology_m2.ms["master1"].schema.get_schema_csn()
+ schema_csn_master2 = topology_m2.ms["master2"].schema.get_schema_csn()
log.debug('Master 1 schemaCSN: %s' % schema_csn_master1)
log.debug('Master 2 schemaCSN: %s' % schema_csn_master2)
if schema_csn_master1 != schema_csn_master2:
# We need to give the server a little more time, then check it again
log.info('Schema CSNs are not in sync yet: m1 (%s) vs m2 (%s), wait a little...'
- % (schema_csn_master1, schema_csn_master2))
+ % (schema_csn_master1, schema_csn_master2))
time.sleep(SLEEP_INTERVAL)
- schema_csn_master1 = topology.master1.schema.get_schema_csn()
- schema_csn_master2 = topology.master2.schema.get_schema_csn()
+ schema_csn_master1 = topology_m2.ms["master1"].schema.get_schema_csn()
+ schema_csn_master2 = topology_m2.ms["master2"].schema.get_schema_csn()
assert schema_csn_master1 is not None
assert schema_csn_master1 == schema_csn_master2
-def test_ticket47721_3(topology):
+def test_ticket47721_3(topology_m2):
'''
Check that the supplier can update its schema from consumer schema
Update M2 schema, then trigger a replication M1->M2
@@ -323,26 +208,26 @@ def test_ticket47721_3(topology):
log.info('Running test 3...')
# stop RA M2->M1, so that M1 can only learn being a supplier
- ents = topology.master2.agreement.list(suffix=SUFFIX)
+ ents = topology_m2.ms["master2"].agreement.list(suffix=SUFFIX)
assert len(ents) == 1
- topology.master2.agreement.pause(ents[0].dn)
+ topology_m2.ms["master2"].agreement.pause(ents[0].dn)
new = _add_custom_at_definition('ATtest3')
- topology.master1.log.info("Update schema (M2) %s " % new)
- topology.master2.schema.add_schema('attributetypes', new)
+ topology_m2.ms["master1"].log.info("Update schema (M2) %s " % new)
+ topology_m2.ms["master2"].schema.add_schema('attributetypes', new)
new = _add_custom_oc_defintion('OCtest3')
- topology.master1.log.info("Update schema (M2) %s " % new)
- topology.master2.schema.add_schema('objectClasses', new)
+ topology_m2.ms["master1"].log.info("Update schema (M2) %s " % new)
+ topology_m2.ms["master2"].schema.add_schema('objectClasses', new)
mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 3')]
dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
- topology.master1.modify_s(dn, mod)
+ topology_m2.ms["master1"].modify_s(dn, mod)
loop = 0
while loop <= 10:
try:
- ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
+ ent = topology_m2.ms["master2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
if ent.hasAttr('description') and (ent.getValue('description') == 'Hello world 3'):
break
except ldap.NO_SUCH_OBJECT:
@@ -351,24 +236,24 @@ def test_ticket47721_3(topology):
assert loop <= 10
time.sleep(2)
- schema_csn_master1 = topology.master1.schema.get_schema_csn()
- schema_csn_master2 = topology.master2.schema.get_schema_csn()
+ schema_csn_master1 = topology_m2.ms["master1"].schema.get_schema_csn()
+ schema_csn_master2 = topology_m2.ms["master2"].schema.get_schema_csn()
log.debug('Master 1 schemaCSN: %s' % schema_csn_master1)
log.debug('Master 2 schemaCSN: %s' % schema_csn_master2)
if schema_csn_master1 == schema_csn_master2:
# We need to give the server a little more time, then check it again
log.info('Schema CSNs are not in sync yet: m1 (%s) vs m2 (%s), wait a little...'
- % (schema_csn_master1, schema_csn_master2))
+ % (schema_csn_master1, schema_csn_master2))
time.sleep(SLEEP_INTERVAL)
- schema_csn_master1 = topology.master1.schema.get_schema_csn()
- schema_csn_master2 = topology.master2.schema.get_schema_csn()
+ schema_csn_master1 = topology_m2.ms["master1"].schema.get_schema_csn()
+ schema_csn_master2 = topology_m2.ms["master2"].schema.get_schema_csn()
assert schema_csn_master1 is not None
# schema csn on M2 is larger that on M1. M1 only took the new definitions
assert schema_csn_master1 != schema_csn_master2
-def test_ticket47721_4(topology):
+def test_ticket47721_4(topology_m2):
'''
Here M2->M1 agreement is disabled.
with test_ticket47721_3, M1 schema and M2 should be identical BUT
@@ -378,22 +263,22 @@ def test_ticket47721_4(topology):
log.info('Running test 4...')
new = _add_custom_at_definition('ATtest4')
- topology.master1.log.info("Update schema (M1) %s " % new)
- topology.master1.schema.add_schema('attributetypes', new)
+ topology_m2.ms["master1"].log.info("Update schema (M1) %s " % new)
+ topology_m2.ms["master1"].schema.add_schema('attributetypes', new)
new = _add_custom_oc_defintion('OCtest4')
- topology.master1.log.info("Update schema (M1) %s " % new)
- topology.master1.schema.add_schema('objectClasses', new)
+ topology_m2.ms["master1"].log.info("Update schema (M1) %s " % new)
+ topology_m2.ms["master1"].schema.add_schema('objectClasses', new)
- topology.master1.log.info("trigger replication M1->M2: to update the schema")
+ topology_m2.ms["master1"].log.info("trigger replication M1->M2: to update the schema")
mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 4')]
dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
- topology.master1.modify_s(dn, mod)
+ topology_m2.ms["master1"].modify_s(dn, mod)
loop = 0
while loop <= 10:
try:
- ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
+ ent = topology_m2.ms["master2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
if ent.hasAttr('description') and (ent.getValue('description') == 'Hello world 4'):
break
except ldap.NO_SUCH_OBJECT:
@@ -401,15 +286,15 @@ def test_ticket47721_4(topology):
time.sleep(1)
assert loop <= 10
- topology.master1.log.info("trigger replication M1->M2: to push the schema")
+ topology_m2.ms["master1"].log.info("trigger replication M1->M2: to push the schema")
mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 5')]
dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
- topology.master1.modify_s(dn, mod)
+ topology_m2.ms["master1"].modify_s(dn, mod)
loop = 0
while loop <= 10:
try:
- ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
+ ent = topology_m2.ms["master2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
if ent.hasAttr('description') and (ent.getValue('description') == 'Hello world 5'):
break
except ldap.NO_SUCH_OBJECT:
@@ -418,16 +303,16 @@ def test_ticket47721_4(topology):
assert loop <= 10
time.sleep(2)
- schema_csn_master1 = topology.master1.schema.get_schema_csn()
- schema_csn_master2 = topology.master2.schema.get_schema_csn()
+ schema_csn_master1 = topology_m2.ms["master1"].schema.get_schema_csn()
+ schema_csn_master2 = topology_m2.ms["master2"].schema.get_schema_csn()
log.debug('Master 1 schemaCSN: %s' % schema_csn_master1)
log.debug('Master 2 schemaCSN: %s' % schema_csn_master2)
if schema_csn_master1 != schema_csn_master2:
# We need to give the server a little more time, then check it again
log.info('Schema CSNs are incorrectly in sync, wait a little...')
time.sleep(SLEEP_INTERVAL)
- schema_csn_master1 = topology.master1.schema.get_schema_csn()
- schema_csn_master2 = topology.master2.schema.get_schema_csn()
+ schema_csn_master1 = topology_m2.ms["master1"].schema.get_schema_csn()
+ schema_csn_master2 = topology_m2.ms["master2"].schema.get_schema_csn()
assert schema_csn_master1 is not None
assert schema_csn_master1 == schema_csn_master2
diff --git a/dirsrvtests/tests/tickets/ticket47781_test.py b/dirsrvtests/tests/tickets/ticket47781_test.py
index 40de5bb..c38b3f2 100644
--- a/dirsrvtests/tests/tickets/ticket47781_test.py
+++ b/dirsrvtests/tests/tickets/ticket47781_test.py
@@ -6,63 +6,16 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
import logging
+
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def test_ticket47781(topology):
+def test_ticket47781(topology_st):
"""
Testing for a deadlock after doing an online import of an LDIF with
replication data. The replication agreement should be invalid.
@@ -74,38 +27,38 @@ def test_ticket47781(topology):
# Setup Replication
#
log.info('Setting up replication...')
- topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER,
- replicaId=REPLICAID_MASTER_1)
+ topology_st.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER,
+ replicaId=REPLICAID_MASTER_1)
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ properties = {RA_NAME: r'meTo_$host:$port',
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
# The agreement should point to a server that does NOT exist (invalid port)
- repl_agreement = topology.standalone.agreement.create(suffix=DEFAULT_SUFFIX,
- host=topology.standalone.host,
- port=5555,
- properties=properties)
+ repl_agreement = topology_st.standalone.agreement.create(suffix=DEFAULT_SUFFIX,
+ host=topology_st.standalone.host,
+ port=5555,
+ properties=properties)
#
# add two entries
#
log.info('Adding two entries...')
try:
- topology.standalone.add_s(Entry(('cn=entry1,dc=example,dc=com', {
- 'objectclass': 'top person'.split(),
- 'sn': 'user',
- 'cn': 'entry1'})))
+ topology_st.standalone.add_s(Entry(('cn=entry1,dc=example,dc=com', {
+ 'objectclass': 'top person'.split(),
+ 'sn': 'user',
+ 'cn': 'entry1'})))
except ldap.LDAPError as e:
log.error('Failed to add entry 1: ' + e.message['desc'])
assert False
try:
- topology.standalone.add_s(Entry(('cn=entry2,dc=example,dc=com', {
- 'objectclass': 'top person'.split(),
- 'sn': 'user',
- 'cn': 'entry2'})))
+ topology_st.standalone.add_s(Entry(('cn=entry2,dc=example,dc=com', {
+ 'objectclass': 'top person'.split(),
+ 'sn': 'user',
+ 'cn': 'entry2'})))
except ldap.LDAPError as e:
log.error('Failed to add entry 2: ' + e.message['desc'])
assert False
@@ -115,7 +68,7 @@ def test_ticket47781(topology):
#
log.info('Exporting replication ldif...')
args = {EXPORT_REPL_INFO: True}
- exportTask = Tasks(topology.standalone)
+ exportTask = Tasks(topology_st.standalone)
try:
exportTask.exportLDIF(DEFAULT_SUFFIX, None, "/tmp/export.ldif", args)
except ValueError:
@@ -125,14 +78,14 @@ def test_ticket47781(topology):
# Restart the server
#
log.info('Restarting server...')
- topology.standalone.stop(timeout=5)
- topology.standalone.start(timeout=5)
+ topology_st.standalone.stop(timeout=5)
+ topology_st.standalone.start(timeout=5)
#
# Import the ldif
#
log.info('Import replication LDIF file...')
- importTask = Tasks(topology.standalone)
+ importTask = Tasks(topology_st.standalone)
args = {TASK_WAIT: True}
try:
importTask.importLDIF(DEFAULT_SUFFIX, None, "/tmp/export.ldif", args)
@@ -145,10 +98,10 @@ def test_ticket47781(topology):
# Search for tombstones - we should not hang/timeout
#
log.info('Search for tombstone entries(should find one and not hang)...')
- topology.standalone.set_option(ldap.OPT_NETWORK_TIMEOUT, 5)
- topology.standalone.set_option(ldap.OPT_TIMEOUT, 5)
+ topology_st.standalone.set_option(ldap.OPT_NETWORK_TIMEOUT, 5)
+ topology_st.standalone.set_option(ldap.OPT_TIMEOUT, 5)
try:
- entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=nsTombstone')
+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=nsTombstone')
if not entries:
log.fatal('Search failed to find any entries.')
assert PR_False
diff --git a/dirsrvtests/tests/tickets/ticket47787_test.py b/dirsrvtests/tests/tickets/ticket47787_test.py
index 53e5b00..3e10dc9 100644
--- a/dirsrvtests/tests/tickets/ticket47787_test.py
+++ b/dirsrvtests/tests/tickets/ticket47787_test.py
@@ -11,163 +11,47 @@ Created on April 14, 2014
@author: tbordaz
'''
-import os
-import sys
+import logging
+import re
import time
+
import ldap
-import logging
import pytest
-import re
-from lib389 import DirSrv, Entry, tools, NoSuchEntryError
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
-from lib389.properties import *
-from lib389._constants import REPLICAROLE_MASTER
+from lib389.topologies import topology_m2
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-#
-# important part. We can deploy Master1 and Master2 on different versions
-#
-installation1_prefix = None
-installation2_prefix = None
-
# set this flag to False so that it will assert on failure _status_entry_both_server
DEBUG_FLAG = False
TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
-STAGING_CN = "staged user"
-PRODUCTION_CN = "accounts"
-EXCEPT_CN = "excepts"
+STAGING_CN = "staged user"
+PRODUCTION_CN = "accounts"
+EXCEPT_CN = "excepts"
-STAGING_DN = "cn=%s,%s" % (STAGING_CN, SUFFIX)
+STAGING_DN = "cn=%s,%s" % (STAGING_CN, SUFFIX)
PRODUCTION_DN = "cn=%s,%s" % (PRODUCTION_CN, SUFFIX)
PROD_EXCEPT_DN = "cn=%s,%s" % (EXCEPT_CN, PRODUCTION_DN)
-STAGING_PATTERN = "cn=%s*,%s" % (STAGING_CN[:2], SUFFIX)
+STAGING_PATTERN = "cn=%s*,%s" % (STAGING_CN[:2], SUFFIX)
PRODUCTION_PATTERN = "cn=%s*,%s" % (PRODUCTION_CN[:2], SUFFIX)
-BAD_STAGING_PATTERN = "cn=bad*,%s" % (SUFFIX)
+BAD_STAGING_PATTERN = "cn=bad*,%s" % (SUFFIX)
BAD_PRODUCTION_PATTERN = "cn=bad*,%s" % (SUFFIX)
-BIND_CN = "bind_entry"
-BIND_DN = "cn=%s,%s" % (BIND_CN, SUFFIX)
-BIND_PW = "password"
+BIND_CN = "bind_entry"
+BIND_DN = "cn=%s,%s" % (BIND_CN, SUFFIX)
+BIND_PW = "password"
-NEW_ACCOUNT = "new_account"
-MAX_ACCOUNTS = 20
+NEW_ACCOUNT = "new_account"
+MAX_ACCOUNTS = 20
CONFIG_MODDN_ACI_ATTR = "nsslapd-moddn-aci"
-class TopologyMaster1Master2(object):
- def __init__(self, master1, master2):
- master1.open()
- self.master1 = master1
-
- master2.open()
- self.master2 = master2
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to create a replicated topology for the 'module'.
- The replicated topology is MASTER1 <-> Master2.
- '''
- global installation1_prefix
- global installation2_prefix
-
- # allocate master1 on a given deployement
- master1 = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Args for the master1 instance
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_master = args_instance.copy()
- master1.allocate(args_master)
-
- # allocate master1 on a given deployement
- master2 = DirSrv(verbose=False)
- if installation2_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation2_prefix
-
- # Args for the consumer instance
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_master = args_instance.copy()
- master2.allocate(args_master)
-
- # Get the status of the instance and restart it if it exists
- instance_master1 = master1.exists()
- instance_master2 = master2.exists()
-
- # Remove all the instances
- if instance_master1:
- master1.delete()
- if instance_master2:
- master2.delete()
-
- # Create the instances
- master1.create()
- master1.open()
- master2.create()
- master2.open()
-
- #
- # Now prepare the Master-Consumer topology
- #
- # First Enable replication
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
- master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
- # Initialize the supplier->consumer
-
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
-
- if not repl_agreement:
- log.fatal("Fail to create a replica agreement")
- sys.exit(1)
-
- log.debug("%s created" % repl_agreement)
-
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
-
- master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- master1.waitForReplInit(repl_agreement)
-
- # Check replication is working fine
- if master1.testReplication(DEFAULT_SUFFIX, master2):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- def fin():
- master1.delete()
- master2.delete()
- request.addfinalizer(fin)
-
- # Here we have two instances master and consumer
- # with replication working.
- return TopologyMaster1Master2(master1, master2)
-
-
def _bind_manager(server):
server.log.info("Bind as %s " % DN_DM)
server.simple_bind_s(DN_DM, PASSWORD)
@@ -178,23 +62,23 @@ def _bind_normal(server):
server.simple_bind_s(BIND_DN, BIND_PW)
-def _header(topology, label):
- topology.master1.log.info("\n\n###############################################")
- topology.master1.log.info("#######")
- topology.master1.log.info("####### %s" % label)
- topology.master1.log.info("#######")
- topology.master1.log.info("###############################################")
+def _header(topology_m2, label):
+ topology_m2.ms["master1"].log.info("\n\n###############################################")
+ topology_m2.ms["master1"].log.info("#######")
+ topology_m2.ms["master1"].log.info("####### %s" % label)
+ topology_m2.ms["master1"].log.info("#######")
+ topology_m2.ms["master1"].log.info("###############################################")
-def _status_entry_both_server(topology, name=None, desc=None, debug=True):
+def _status_entry_both_server(topology_m2, name=None, desc=None, debug=True):
if not name:
return
- topology.master1.log.info("\n\n######################### Tombstone on M1 ######################\n")
+ topology_m2.ms["master1"].log.info("\n\n######################### Tombstone on M1 ######################\n")
attr = 'description'
found = False
attempt = 0
while not found and attempt < 10:
- ent_m1 = _find_tombstone(topology.master1, SUFFIX, 'sn', name)
+ ent_m1 = _find_tombstone(topology_m2.ms["master1"], SUFFIX, 'sn', name)
if attr in ent_m1.getAttrs():
found = True
else:
@@ -202,40 +86,40 @@ def _status_entry_both_server(topology, name=None, desc=None, debug=True):
attempt = attempt + 1
assert ent_m1
- topology.master1.log.info("\n\n######################### Tombstone on M2 ######################\n")
- ent_m2 = _find_tombstone(topology.master2, SUFFIX, 'sn', name)
+ topology_m2.ms["master1"].log.info("\n\n######################### Tombstone on M2 ######################\n")
+ ent_m2 = _find_tombstone(topology_m2.ms["master2"], SUFFIX, 'sn', name)
assert ent_m2
- topology.master1.log.info("\n\n######################### Description ######################\n%s\n" % desc)
- topology.master1.log.info("M1 only\n")
+ topology_m2.ms["master1"].log.info("\n\n######################### Description ######################\n%s\n" % desc)
+ topology_m2.ms["master1"].log.info("M1 only\n")
for attr in ent_m1.getAttrs():
if not debug:
assert attr in ent_m2.getAttrs()
if not attr in ent_m2.getAttrs():
- topology.master1.log.info(" %s" % attr)
+ topology_m2.ms["master1"].log.info(" %s" % attr)
for val in ent_m1.getValues(attr):
- topology.master1.log.info(" %s" % val)
+ topology_m2.ms["master1"].log.info(" %s" % val)
- topology.master1.log.info("M2 only\n")
+ topology_m2.ms["master1"].log.info("M2 only\n")
for attr in ent_m2.getAttrs():
if not debug:
assert attr in ent_m1.getAttrs()
if not attr in ent_m1.getAttrs():
- topology.master1.log.info(" %s" % attr)
+ topology_m2.ms["master1"].log.info(" %s" % attr)
for val in ent_m2.getValues(attr):
- topology.master1.log.info(" %s" % val)
+ topology_m2.ms["master1"].log.info(" %s" % val)
- topology.master1.log.info("M1 differs M2\n")
+ topology_m2.ms["master1"].log.info("M1 differs M2\n")
if not debug:
assert ent_m1.dn == ent_m2.dn
if ent_m1.dn != ent_m2.dn:
- topology.master1.log.info(" M1[dn] = %s\n M2[dn] = %s" % (ent_m1.dn, ent_m2.dn))
+ topology_m2.ms["master1"].log.info(" M1[dn] = %s\n M2[dn] = %s" % (ent_m1.dn, ent_m2.dn))
for attr1 in ent_m1.getAttrs():
if attr1 in ent_m2.getAttrs():
@@ -250,7 +134,7 @@ def _status_entry_both_server(topology, name=None, desc=None, debug=True):
assert found
if not found:
- topology.master1.log.info(" M1[%s] = %s" % (attr1, val1))
+ topology_m2.ms["master1"].log.info(" M1[%s] = %s" % (attr1, val1))
for attr2 in ent_m2.getAttrs():
if attr2 in ent_m1.getAttrs():
@@ -265,29 +149,29 @@ def _status_entry_both_server(topology, name=None, desc=None, debug=True):
assert found
if not found:
- topology.master1.log.info(" M2[%s] = %s" % (attr2, val2))
+ topology_m2.ms["master1"].log.info(" M2[%s] = %s" % (attr2, val2))
-def _pause_RAs(topology):
- topology.master1.log.info("\n\n######################### Pause RA M1<->M2 ######################\n")
- ents = topology.master1.agreement.list(suffix=SUFFIX)
+def _pause_RAs(topology_m2):
+ topology_m2.ms["master1"].log.info("\n\n######################### Pause RA M1<->M2 ######################\n")
+ ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX)
assert len(ents) == 1
- topology.master1.agreement.pause(ents[0].dn)
+ topology_m2.ms["master1"].agreement.pause(ents[0].dn)
- ents = topology.master2.agreement.list(suffix=SUFFIX)
+ ents = topology_m2.ms["master2"].agreement.list(suffix=SUFFIX)
assert len(ents) == 1
- topology.master2.agreement.pause(ents[0].dn)
+ topology_m2.ms["master2"].agreement.pause(ents[0].dn)
-def _resume_RAs(topology):
- topology.master1.log.info("\n\n######################### resume RA M1<->M2 ######################\n")
- ents = topology.master1.agreement.list(suffix=SUFFIX)
+def _resume_RAs(topology_m2):
+ topology_m2.ms["master1"].log.info("\n\n######################### resume RA M1<->M2 ######################\n")
+ ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX)
assert len(ents) == 1
- topology.master1.agreement.resume(ents[0].dn)
+ topology_m2.ms["master1"].agreement.resume(ents[0].dn)
- ents = topology.master2.agreement.list(suffix=SUFFIX)
+ ents = topology_m2.ms["master2"].agreement.list(suffix=SUFFIX)
assert len(ents) == 1
- topology.master2.agreement.resume(ents[0].dn)
+ topology_m2.ms["master2"].agreement.resume(ents[0].dn)
def _find_tombstone(instance, base, attr, value):
@@ -299,7 +183,7 @@ def _find_tombstone(instance, base, attr, value):
#
filt = '(objectclass=%s)' % REPLICA_OC_TOMBSTONE
ents = instance.search_s(base, ldap.SCOPE_SUBTREE, filt)
- #found = False
+ # found = False
for ent in ents:
if ent.hasAttr(attr):
for val in ent.getValues(attr):
@@ -357,43 +241,44 @@ def _check_entry_exists(instance, entry_dn):
def _check_mod_received(instance, base, filt, attr, value):
- instance.log.info("\n\n######################### Check MOD replicated on %s ######################\n" % instance.serverid)
+ instance.log.info(
+ "\n\n######################### Check MOD replicated on %s ######################\n" % instance.serverid)
loop = 0
while loop <= 10:
ent = instance.getEntry(base, ldap.SCOPE_SUBTREE, filt)
if ent.hasAttr(attr) and ent.getValue(attr) == value:
- break
+ break
time.sleep(1)
loop += 1
assert loop <= 10
-def _check_replication(topology, entry_dn):
+def _check_replication(topology_m2, entry_dn):
# prepare the filter to retrieve the entry
filt = entry_dn.split(',')[0]
- topology.master1.log.info("\n######################### Check replicat M1->M2 ######################\n")
+ topology_m2.ms["master1"].log.info("\n######################### Check replicat M1->M2 ######################\n")
loop = 0
while loop <= 10:
attr = 'description'
value = 'test_value_%d' % loop
mod = [(ldap.MOD_REPLACE, attr, value)]
- topology.master1.modify_s(entry_dn, mod)
- _check_mod_received(topology.master2, SUFFIX, filt, attr, value)
+ topology_m2.ms["master1"].modify_s(entry_dn, mod)
+ _check_mod_received(topology_m2.ms["master2"], SUFFIX, filt, attr, value)
loop += 1
- topology.master1.log.info("\n######################### Check replicat M2->M1 ######################\n")
+ topology_m2.ms["master1"].log.info("\n######################### Check replicat M2->M1 ######################\n")
loop = 0
while loop <= 10:
attr = 'description'
value = 'test_value_%d' % loop
mod = [(ldap.MOD_REPLACE, attr, value)]
- topology.master2.modify_s(entry_dn, mod)
- _check_mod_received(topology.master1, SUFFIX, filt, attr, value)
+ topology_m2.ms["master2"].modify_s(entry_dn, mod)
+ _check_mod_received(topology_m2.ms["master1"], SUFFIX, filt, attr, value)
loop += 1
-def test_ticket47787_init(topology):
+def test_ticket47787_init(topology_m2):
"""
Creates
- a staging DIT
@@ -402,45 +287,45 @@ def test_ticket47787_init(topology):
"""
- topology.master1.log.info("\n\n######################### INITIALIZATION ######################\n")
+ topology_m2.ms["master1"].log.info("\n\n######################### INITIALIZATION ######################\n")
# entry used to bind with
- topology.master1.log.info("Add %s" % BIND_DN)
- topology.master1.add_s(Entry((BIND_DN, {
- 'objectclass': "top person".split(),
- 'sn': BIND_CN,
- 'cn': BIND_CN,
- 'userpassword': BIND_PW})))
+ topology_m2.ms["master1"].log.info("Add %s" % BIND_DN)
+ topology_m2.ms["master1"].add_s(Entry((BIND_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': BIND_CN,
+ 'cn': BIND_CN,
+ 'userpassword': BIND_PW})))
# DIT for staging
- topology.master1.log.info("Add %s" % STAGING_DN)
- topology.master1.add_s(Entry((STAGING_DN, {
- 'objectclass': "top organizationalRole".split(),
- 'cn': STAGING_CN,
- 'description': "staging DIT"})))
+ topology_m2.ms["master1"].log.info("Add %s" % STAGING_DN)
+ topology_m2.ms["master1"].add_s(Entry((STAGING_DN, {
+ 'objectclass': "top organizationalRole".split(),
+ 'cn': STAGING_CN,
+ 'description': "staging DIT"})))
# DIT for production
- topology.master1.log.info("Add %s" % PRODUCTION_DN)
- topology.master1.add_s(Entry((PRODUCTION_DN, {
- 'objectclass': "top organizationalRole".split(),
- 'cn': PRODUCTION_CN,
- 'description': "production DIT"})))
+ topology_m2.ms["master1"].log.info("Add %s" % PRODUCTION_DN)
+ topology_m2.ms["master1"].add_s(Entry((PRODUCTION_DN, {
+ 'objectclass': "top organizationalRole".split(),
+ 'cn': PRODUCTION_CN,
+ 'description': "production DIT"})))
# enable replication error logging
mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '8192')]
- topology.master1.modify_s(DN_CONFIG, mod)
- topology.master2.modify_s(DN_CONFIG, mod)
+ topology_m2.ms["master1"].modify_s(DN_CONFIG, mod)
+ topology_m2.ms["master2"].modify_s(DN_CONFIG, mod)
# add dummy entries in the staging DIT
for cpt in range(MAX_ACCOUNTS):
name = "%s%d" % (NEW_ACCOUNT, cpt)
- topology.master1.add_s(Entry(("cn=%s,%s" % (name, STAGING_DN), {
- 'objectclass': "top person".split(),
- 'sn': name,
- 'cn': name})))
+ topology_m2.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, STAGING_DN), {
+ 'objectclass': "top person".split(),
+ 'sn': name,
+ 'cn': name})))
-def test_ticket47787_2(topology):
+def test_ticket47787_2(topology_m2):
'''
Disable replication so that updates are not replicated
Delete an entry on M1. Modrdn it on M2 (chg rdn + delold=0 + same superior).
@@ -450,11 +335,11 @@ def test_ticket47787_2(topology):
checks that test entry was replicated on M1 (replication M2->M1 not broken by modrdn)
'''
- _header(topology, "test_ticket47787_2")
- _bind_manager(topology.master1)
- _bind_manager(topology.master2)
+ _header(topology_m2, "test_ticket47787_2")
+ _bind_manager(topology_m2.ms["master1"])
+ _bind_manager(topology_m2.ms["master2"])
- #entry to test the replication is still working
+ # entry to test the replication is still working
name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS - 1)
test_rdn = "cn=%s" % (name)
testentry_dn = "%s,%s" % (test_rdn, STAGING_DN)
@@ -473,33 +358,34 @@ def test_ticket47787_2(topology):
entry_dn = "%s,%s" % (rdn, STAGING_DN)
# created on M1, wait the entry exists on M2
- _check_entry_exists(topology.master2, entry_dn)
- _check_entry_exists(topology.master2, testentry_dn)
+ _check_entry_exists(topology_m2.ms["master2"], entry_dn)
+ _check_entry_exists(topology_m2.ms["master2"], testentry_dn)
- _pause_RAs(topology)
+ _pause_RAs(topology_m2)
# Delete 'entry_dn' on M1.
# dummy update is only have a first CSN before the DEL
# else the DEL will be in min_csn RUV and make diagnostic a bit more complex
- _mod_entry(topology.master1, testentry2_dn, attr, 'dummy')
- _delete_entry(topology.master1, entry_dn, name)
- _mod_entry(topology.master1, testentry2_dn, attr, value)
+ _mod_entry(topology_m2.ms["master1"], testentry2_dn, attr, 'dummy')
+ _delete_entry(topology_m2.ms["master1"], entry_dn, name)
+ _mod_entry(topology_m2.ms["master1"], testentry2_dn, attr, value)
time.sleep(1) # important to have MOD.csn != DEL.csn
# MOD 'entry_dn' on M1.
# dummy update is only have a first CSN before the MOD entry_dn
# else the DEL will be in min_csn RUV and make diagnostic a bit more complex
- _mod_entry(topology.master2, testentry_dn, attr, 'dummy')
- _mod_entry(topology.master2, entry_dn, attr, value)
- _mod_entry(topology.master2, testentry_dn, attr, value)
+ _mod_entry(topology_m2.ms["master2"], testentry_dn, attr, 'dummy')
+ _mod_entry(topology_m2.ms["master2"], entry_dn, attr, value)
+ _mod_entry(topology_m2.ms["master2"], testentry_dn, attr, value)
- _resume_RAs(topology)
+ _resume_RAs(topology_m2)
- topology.master1.log.info("\n\n######################### Check DEL replicated on M2 ######################\n")
+ topology_m2.ms["master1"].log.info(
+ "\n\n######################### Check DEL replicated on M2 ######################\n")
loop = 0
while loop <= 10:
- ent = _find_tombstone(topology.master2, SUFFIX, 'sn', name)
+ ent = _find_tombstone(topology_m2.ms["master2"], SUFFIX, 'sn', name)
if ent:
break
time.sleep(1)
@@ -509,17 +395,18 @@ def test_ticket47787_2(topology):
# the following checks are not necessary
# as this bug is only for failing replicated MOD (entry_dn) on M1
- #_check_mod_received(topology.master1, SUFFIX, "(%s)" % (test_rdn), attr, value)
- #_check_mod_received(topology.master2, SUFFIX, "(%s)" % (test2_rdn), attr, value)
+ # _check_mod_received(topology_m2.ms["master1"], SUFFIX, "(%s)" % (test_rdn), attr, value)
+ # _check_mod_received(topology_m2.ms["master2"], SUFFIX, "(%s)" % (test2_rdn), attr, value)
#
- #_check_replication(topology, testentry_dn)
+ # _check_replication(topology_m2, testentry_dn)
- _status_entry_both_server(topology, name=name, desc="DEL M1 - MOD M2", debug=DEBUG_FLAG)
+ _status_entry_both_server(topology_m2, name=name, desc="DEL M1 - MOD M2", debug=DEBUG_FLAG)
- topology.master1.log.info("\n\n######################### Check MOD replicated on M1 ######################\n")
+ topology_m2.ms["master1"].log.info(
+ "\n\n######################### Check MOD replicated on M1 ######################\n")
loop = 0
while loop <= 10:
- ent = _find_tombstone(topology.master1, SUFFIX, 'sn', name)
+ ent = _find_tombstone(topology_m2.ms["master1"], SUFFIX, 'sn', name)
if ent:
break
time.sleep(1)
diff --git a/dirsrvtests/tests/tickets/ticket47808_test.py b/dirsrvtests/tests/tickets/ticket47808_test.py
index a92059e..d2c7425 100644
--- a/dirsrvtests/tests/tickets/ticket47808_test.py
+++ b/dirsrvtests/tests/tickets/ticket47808_test.py
@@ -6,16 +6,13 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
import logging
+
+import ldap
import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
@@ -23,48 +20,7 @@ ATTRIBUTE_UNIQUENESS_PLUGIN = 'cn=attribute uniqueness,cn=plugins,cn=config'
ENTRY_NAME = 'test_entry'
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def test_ticket47808_run(topology):
+def test_ticket47808_run(topology_st):
"""
It enables attribute uniqueness plugin with sn as a unique attribute
Add an entry 1 with sn = ENTRY_NAME
@@ -74,16 +30,17 @@ def test_ticket47808_run(topology):
"""
# bind as directory manager
- topology.standalone.log.info("Bind as %s" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.log.info("Bind as %s" % DN_DM)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.log.info("\n\n######################### SETUP ATTR UNIQ PLUGIN ######################\n")
+ topology_st.standalone.log.info("\n\n######################### SETUP ATTR UNIQ PLUGIN ######################\n")
# enable attribute uniqueness plugin
- mod = [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', 'on'), (ldap.MOD_REPLACE, 'nsslapd-pluginarg0', 'sn'), (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', SUFFIX)]
- topology.standalone.modify_s(ATTRIBUTE_UNIQUENESS_PLUGIN, mod)
+ mod = [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', 'on'), (ldap.MOD_REPLACE, 'nsslapd-pluginarg0', 'sn'),
+ (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', SUFFIX)]
+ topology_st.standalone.modify_s(ATTRIBUTE_UNIQUENESS_PLUGIN, mod)
- topology.standalone.log.info("\n\n######################### ADD USER 1 ######################\n")
+ topology_st.standalone.log.info("\n\n######################### ADD USER 1 ######################\n")
# Prepare entry 1
entry_name = '%s 1' % (ENTRY_NAME)
@@ -92,14 +49,14 @@ def test_ticket47808_run(topology):
entry_1.setValues('objectclass', 'top', 'person')
entry_1.setValues('sn', ENTRY_NAME)
entry_1.setValues('cn', entry_name)
- topology.standalone.log.info("Try to add Add %s: %r" % (entry_1, entry_1))
- topology.standalone.add_s(entry_1)
+ topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_1, entry_1))
+ topology_st.standalone.add_s(entry_1)
- topology.standalone.log.info("\n\n######################### Restart Server ######################\n")
- topology.standalone.stop(timeout=10)
- topology.standalone.start(timeout=10)
+ topology_st.standalone.log.info("\n\n######################### Restart Server ######################\n")
+ topology_st.standalone.stop(timeout=10)
+ topology_st.standalone.start(timeout=10)
- topology.standalone.log.info("\n\n######################### ADD USER 2 ######################\n")
+ topology_st.standalone.log.info("\n\n######################### ADD USER 2 ######################\n")
# Prepare entry 2 having the same sn, which crashes the server
entry_name = '%s 2' % (ENTRY_NAME)
@@ -108,29 +65,29 @@ def test_ticket47808_run(topology):
entry_2.setValues('objectclass', 'top', 'person')
entry_2.setValues('sn', ENTRY_NAME)
entry_2.setValues('cn', entry_name)
- topology.standalone.log.info("Try to add Add %s: %r" % (entry_2, entry_2))
+ topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_2, entry_2))
try:
- topology.standalone.add_s(entry_2)
+ topology_st.standalone.add_s(entry_2)
except:
- topology.standalone.log.warn("Adding %s failed" % entry_dn_2)
+ topology_st.standalone.log.warn("Adding %s failed" % entry_dn_2)
pass
- topology.standalone.log.info("\n\n######################### IS SERVER UP? ######################\n")
- ents = topology.standalone.search_s(entry_dn_1, ldap.SCOPE_BASE, '(objectclass=*)')
+ topology_st.standalone.log.info("\n\n######################### IS SERVER UP? ######################\n")
+ ents = topology_st.standalone.search_s(entry_dn_1, ldap.SCOPE_BASE, '(objectclass=*)')
assert len(ents) == 1
- topology.standalone.log.info("Yes, it's up.")
+ topology_st.standalone.log.info("Yes, it's up.")
- topology.standalone.log.info("\n\n######################### CHECK USER 2 NOT ADDED ######################\n")
- topology.standalone.log.info("Try to search %s" % entry_dn_2)
+ topology_st.standalone.log.info("\n\n######################### CHECK USER 2 NOT ADDED ######################\n")
+ topology_st.standalone.log.info("Try to search %s" % entry_dn_2)
try:
- ents = topology.standalone.search_s(entry_dn_2, ldap.SCOPE_BASE, '(objectclass=*)')
+ ents = topology_st.standalone.search_s(entry_dn_2, ldap.SCOPE_BASE, '(objectclass=*)')
except ldap.NO_SUCH_OBJECT:
- topology.standalone.log.info("Found none")
+ topology_st.standalone.log.info("Found none")
- topology.standalone.log.info("\n\n######################### DELETE USER 1 ######################\n")
+ topology_st.standalone.log.info("\n\n######################### DELETE USER 1 ######################\n")
- topology.standalone.log.info("Try to delete %s " % entry_dn_1)
- topology.standalone.delete_s(entry_dn_1)
+ topology_st.standalone.log.info("Try to delete %s " % entry_dn_1)
+ topology_st.standalone.delete_s(entry_dn_1)
log.info('Testcase PASSED')
diff --git a/dirsrvtests/tests/tickets/ticket47815_test.py b/dirsrvtests/tests/tickets/ticket47815_test.py
index b00f5e8..0b68ca4 100644
--- a/dirsrvtests/tests/tickets/ticket47815_test.py
+++ b/dirsrvtests/tests/tickets/ticket47815_test.py
@@ -6,62 +6,19 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
+import logging
import time
+
import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def test_ticket47815(topology):
+def test_ticket47815(topology_st):
"""
Test betxn plugins reject an invalid option, and make sure that the rejected entry
is not in the entry cache.
@@ -75,41 +32,42 @@ def test_ticket47815(topology):
result = 0
result2 = 0
- log.info('Testing Ticket 47815 - Add entries that should be rejected by the betxn plugins, and are not left in the entry cache')
+ log.info(
+ 'Testing Ticket 47815 - Add entries that should be rejected by the betxn plugins, and are not left in the entry cache')
# Enabled the plugins
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
- topology.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER)
- topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
+ topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER)
+ topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
# configure automember config entry
log.info('Adding automember config')
try:
- topology.standalone.add_s(Entry(('cn=group cfg,cn=Auto Membership Plugin,cn=plugins,cn=config', {
- 'objectclass': 'top autoMemberDefinition'.split(),
- 'autoMemberScope': 'dc=example,dc=com',
- 'autoMemberFilter': 'cn=user',
- 'autoMemberDefaultGroup': 'cn=group,dc=example,dc=com',
- 'autoMemberGroupingAttr': 'member:dn',
- 'cn': 'group cfg'})))
+ topology_st.standalone.add_s(Entry(('cn=group cfg,cn=Auto Membership Plugin,cn=plugins,cn=config', {
+ 'objectclass': 'top autoMemberDefinition'.split(),
+ 'autoMemberScope': 'dc=example,dc=com',
+ 'autoMemberFilter': 'cn=user',
+ 'autoMemberDefaultGroup': 'cn=group,dc=example,dc=com',
+ 'autoMemberGroupingAttr': 'member:dn',
+ 'cn': 'group cfg'})))
except:
log.error('Failed to add automember config')
exit(1)
- topology.standalone.stop(timeout=120)
+ topology_st.standalone.stop(timeout=120)
time.sleep(1)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
time.sleep(3)
# need to reopen a connection toward the instance
- topology.standalone.open()
+ topology_st.standalone.open()
# add automember group
log.info('Adding automember group')
try:
- topology.standalone.add_s(Entry(('cn=group,dc=example,dc=com', {
- 'objectclass': 'top groupOfNames'.split(),
- 'cn': 'group'})))
+ topology_st.standalone.add_s(Entry(('cn=group,dc=example,dc=com', {
+ 'objectclass': 'top groupOfNames'.split(),
+ 'cn': 'group'})))
except:
log.error('Failed to add automember group')
exit(1)
@@ -118,10 +76,10 @@ def test_ticket47815(topology):
log.info('Adding invalid entry')
try:
- topology.standalone.add_s(Entry(('cn=user,dc=example,dc=com', {
- 'objectclass': 'top person'.split(),
- 'sn': 'user',
- 'cn': 'user'})))
+ topology_st.standalone.add_s(Entry(('cn=user,dc=example,dc=com', {
+ 'objectclass': 'top person'.split(),
+ 'sn': 'user',
+ 'cn': 'user'})))
except ldap.UNWILLING_TO_PERFORM:
log.debug('Adding invalid entry failed as expected')
result = 53
@@ -134,10 +92,10 @@ def test_ticket47815(topology):
# Attempt to add user again, should result in error 53 again
try:
- topology.standalone.add_s(Entry(('cn=user,dc=example,dc=com', {
- 'objectclass': 'top person'.split(),
- 'sn': 'user',
- 'cn': 'user'})))
+ topology_st.standalone.add_s(Entry(('cn=user,dc=example,dc=com', {
+ 'objectclass': 'top person'.split(),
+ 'sn': 'user',
+ 'cn': 'user'})))
except ldap.UNWILLING_TO_PERFORM:
log.debug('2nd add of invalid entry failed as expected')
result2 = 53
diff --git a/dirsrvtests/tests/tickets/ticket47819_test.py b/dirsrvtests/tests/tickets/ticket47819_test.py
index 2b751c8..555ce67 100644
--- a/dirsrvtests/tests/tickets/ticket47819_test.py
+++ b/dirsrvtests/tests/tickets/ticket47819_test.py
@@ -6,63 +6,16 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
import logging
+
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def test_ticket47819(topology):
+def test_ticket47819(topology_st):
"""
Testing precise tombstone purging:
[1] Make sure "nsTombstoneCSN" is added to new tombstones
@@ -78,8 +31,8 @@ def test_ticket47819(topology):
# Setup Replication
#
log.info('Setting up replication...')
- topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER,
- replicaId=REPLICAID_MASTER_1)
+ topology_st.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER,
+ replicaId=REPLICAID_MASTER_1)
#
# Part 1 create a tombstone entry and make sure nsTombstoneCSN is added
@@ -87,24 +40,24 @@ def test_ticket47819(topology):
log.info('Part 1: Add and then delete an entry to create a tombstone...')
try:
- topology.standalone.add_s(Entry(('cn=entry1,dc=example,dc=com', {
- 'objectclass': 'top person'.split(),
- 'sn': 'user',
- 'cn': 'entry1'})))
+ topology_st.standalone.add_s(Entry(('cn=entry1,dc=example,dc=com', {
+ 'objectclass': 'top person'.split(),
+ 'sn': 'user',
+ 'cn': 'entry1'})))
except ldap.LDAPError as e:
log.error('Failed to add entry: ' + e.message['desc'])
assert False
try:
- topology.standalone.delete_s('cn=entry1,dc=example,dc=com')
+ topology_st.standalone.delete_s('cn=entry1,dc=example,dc=com')
except ldap.LDAPError as e:
log.error('Failed to delete entry: ' + e.message['desc'])
assert False
log.info('Search for tombstone entries...')
try:
- entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
- '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
+ '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
if not entries:
log.fatal('Search failed to the new tombstone(nsTombstoneCSN is probably missing).')
assert False
@@ -127,7 +80,7 @@ def test_ticket47819(topology):
args = {EXPORT_REPL_INFO: True,
TASK_WAIT: True}
- exportTask = Tasks(topology.standalone)
+ exportTask = Tasks(topology_st.standalone)
try:
exportTask.exportLDIF(DEFAULT_SUFFIX, None, ldif_file, args)
except ValueError:
@@ -149,7 +102,7 @@ def test_ticket47819(topology):
# import the new ldif file
log.info('Import replication LDIF file...')
- importTask = Tasks(topology.standalone)
+ importTask = Tasks(topology_st.standalone)
args = {TASK_WAIT: True}
try:
importTask.importLDIF(DEFAULT_SUFFIX, None, ldif_file, args)
@@ -162,8 +115,8 @@ def test_ticket47819(topology):
# Search for the tombstone again
log.info('Search for tombstone entries...')
try:
- entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
- '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
+ '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
if not entries:
log.fatal('Search failed to fine the new tombstone(nsTombstoneCSN is probably missing).')
assert False
@@ -182,7 +135,7 @@ def test_ticket47819(topology):
# so we can test if the fixup task works.
args = {TASK_WAIT: True,
TASK_TOMB_STRIP: True}
- fixupTombTask = Tasks(topology.standalone)
+ fixupTombTask = Tasks(topology_st.standalone)
try:
fixupTombTask.fixupTombstones(DEFAULT_BENAME, args)
except:
@@ -192,8 +145,8 @@ def test_ticket47819(topology):
# Search for tombstones with nsTombstoneCSN - better not find any
log.info('Search for tombstone entries...')
try:
- entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
- '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
+ '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
if entries:
log.fatal('Search found tombstones with nsTombstoneCSN')
assert False
@@ -201,10 +154,9 @@ def test_ticket47819(topology):
log.fatal('Search failed: ' + e.message['desc'])
assert False
-
# Now run the fixup task
args = {TASK_WAIT: True}
- fixupTombTask = Tasks(topology.standalone)
+ fixupTombTask = Tasks(topology_st.standalone)
try:
fixupTombTask.fixupTombstones(DEFAULT_BENAME, args)
except:
@@ -214,8 +166,8 @@ def test_ticket47819(topology):
# Search for tombstones with nsTombstoneCSN - better find some
log.info('Search for tombstone entries...')
try:
- entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
- '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
+ '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
if not entries:
log.fatal('Search did not find any fixed-up tombstones')
assert False
@@ -234,7 +186,7 @@ def test_ticket47819(topology):
REPLICA_PURGE_DELAY: '5',
REPLICA_PURGE_INTERVAL: '5'}
try:
- topology.standalone.replica.setProperties(DEFAULT_SUFFIX, None, None, args)
+ topology_st.standalone.replica.setProperties(DEFAULT_SUFFIX, None, None, args)
except:
log.fatal('Failed to configure replica')
assert False
@@ -246,10 +198,10 @@ def test_ticket47819(topology):
# Add an entry to trigger replication
log.info('Perform an update to help trigger tombstone purging...')
try:
- topology.standalone.add_s(Entry(('cn=test_entry,dc=example,dc=com', {
- 'objectclass': 'top person'.split(),
- 'sn': 'user',
- 'cn': 'entry1'})))
+ topology_st.standalone.add_s(Entry(('cn=test_entry,dc=example,dc=com', {
+ 'objectclass': 'top person'.split(),
+ 'sn': 'user',
+ 'cn': 'entry1'})))
except ldap.LDAPError as e:
log.error('Failed to add entry: ' + e.message['desc'])
assert False
@@ -261,8 +213,8 @@ def test_ticket47819(topology):
# search for tombstones, there should be none
log.info('Search for tombstone entries...')
try:
- entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
- '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
+ '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
if entries:
log.fatal('Search unexpectedly found tombstones')
assert False
diff --git a/dirsrvtests/tests/tickets/ticket47823_test.py b/dirsrvtests/tests/tickets/ticket47823_test.py
index 71b7356..92e189b 100644
--- a/dirsrvtests/tests/tickets/ticket47823_test.py
+++ b/dirsrvtests/tests/tickets/ticket47823_test.py
@@ -6,20 +6,17 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
import logging
-import pytest
import re
import shutil
import subprocess
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
+import time
+import ldap
+import pytest
+from lib389 import Entry
+from lib389._constants import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
@@ -27,11 +24,11 @@ PROVISIONING_CN = "provisioning"
PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SUFFIX)
ACTIVE_CN = "accounts"
-STAGE_CN = "staged users"
+STAGE_CN = "staged users"
DELETE_CN = "deleted users"
ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SUFFIX)
-STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN)
-DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN)
+STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN)
+DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN)
STAGE_USER_CN = "stage guy"
STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN)
@@ -53,83 +50,45 @@ ALL_CONFIG_ATTRS = ['nsslapd-pluginarg0', 'nsslapd-pluginarg1', 'nsslapd-plugina
'uniqueness-attribute-name', 'uniqueness-subtrees', 'uniqueness-across-all-subtrees']
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
+def _header(topology_st, label):
+ topology_st.standalone.log.info("\n\n###############################################")
+ topology_st.standalone.log.info("#######")
+ topology_st.standalone.log.info("####### %s" % label)
+ topology_st.standalone.log.info("#######")
+ topology_st.standalone.log.info("###############################################")
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def _header(topology, label):
- topology.standalone.log.info("\n\n###############################################")
- topology.standalone.log.info("#######")
- topology.standalone.log.info("####### %s" % label)
- topology.standalone.log.info("#######")
- topology.standalone.log.info("###############################################")
-
-
-def _uniqueness_config_entry(topology, name=None):
+def _uniqueness_config_entry(topology_st, name=None):
if not name:
return None
- ent = topology.standalone.getEntry("cn=%s,%s" % (PLUGIN_ATTR_UNIQUENESS, DN_PLUGIN), ldap.SCOPE_BASE,
- "(objectclass=nsSlapdPlugin)",
- ['objectClass', 'cn', 'nsslapd-pluginPath', 'nsslapd-pluginInitfunc',
- 'nsslapd-pluginType', 'nsslapd-pluginEnabled', 'nsslapd-plugin-depends-on-type',
- 'nsslapd-pluginId', 'nsslapd-pluginVersion', 'nsslapd-pluginVendor',
- 'nsslapd-pluginDescription'])
+ ent = topology_st.standalone.getEntry("cn=%s,%s" % (PLUGIN_ATTR_UNIQUENESS, DN_PLUGIN), ldap.SCOPE_BASE,
+ "(objectclass=nsSlapdPlugin)",
+ ['objectClass', 'cn', 'nsslapd-pluginPath', 'nsslapd-pluginInitfunc',
+ 'nsslapd-pluginType', 'nsslapd-pluginEnabled',
+ 'nsslapd-plugin-depends-on-type',
+ 'nsslapd-pluginId', 'nsslapd-pluginVersion', 'nsslapd-pluginVendor',
+ 'nsslapd-pluginDescription'])
ent.dn = "cn=%s uniqueness,%s" % (name, DN_PLUGIN)
return ent
-def _build_config(topology, attr_name='cn', subtree_1=None, subtree_2=None, type_config='old', across_subtrees=False):
- assert topology
+def _build_config(topology_st, attr_name='cn', subtree_1=None, subtree_2=None, type_config='old',
+ across_subtrees=False):
+ assert topology_st
assert attr_name
assert subtree_1
if type_config == 'old':
# enable the 'cn' uniqueness on Active
- config = _uniqueness_config_entry(topology, attr_name)
+ config = _uniqueness_config_entry(topology_st, attr_name)
config.setValue('nsslapd-pluginarg0', attr_name)
config.setValue('nsslapd-pluginarg1', subtree_1)
if subtree_2:
config.setValue('nsslapd-pluginarg2', subtree_2)
else:
# prepare the config entry
- config = _uniqueness_config_entry(topology, attr_name)
+ config = _uniqueness_config_entry(topology_st, attr_name)
config.setValue('uniqueness-attribute-name', attr_name)
config.setValue('uniqueness-subtrees', subtree_1)
if subtree_2:
@@ -139,160 +98,163 @@ def _build_config(topology, attr_name='cn', subtree_1=None, subtree_2=None, type
return config
-def _active_container_invalid_cfg_add(topology):
+def _active_container_invalid_cfg_add(topology_st):
'''
Check uniqueness is not enforced with ADD (invalid config)
'''
- topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
- 'objectclass': "top person".split(),
- 'sn': ACTIVE_USER_1_CN,
- 'cn': ACTIVE_USER_1_CN})))
+ topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_1_CN,
+ 'cn': ACTIVE_USER_1_CN})))
- topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
- 'objectclass': "top person".split(),
- 'sn': ACTIVE_USER_2_CN,
- 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]})))
+ topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_2_CN,
+ 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]})))
- topology.standalone.delete_s(ACTIVE_USER_1_DN)
- topology.standalone.delete_s(ACTIVE_USER_2_DN)
+ topology_st.standalone.delete_s(ACTIVE_USER_1_DN)
+ topology_st.standalone.delete_s(ACTIVE_USER_2_DN)
-def _active_container_add(topology, type_config='old'):
+def _active_container_add(topology_st, type_config='old'):
'''
Check uniqueness in a single container (Active)
Add an entry with a given 'cn', then check we can not add an entry with the same 'cn' value
'''
- config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False)
+ config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config,
+ across_subtrees=False)
# remove the 'cn' uniqueness entry
try:
- topology.standalone.delete_s(config.dn)
+ topology_st.standalone.delete_s(config.dn)
except ldap.NO_SUCH_OBJECT:
pass
- topology.standalone.restart(timeout=120)
-
- topology.standalone.log.info('Uniqueness not enforced: create the entries')
+ topology_st.standalone.restart(timeout=120)
- topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
- 'objectclass': "top person".split(),
- 'sn': ACTIVE_USER_1_CN,
- 'cn': ACTIVE_USER_1_CN})))
+ topology_st.standalone.log.info('Uniqueness not enforced: create the entries')
- topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
- 'objectclass': "top person".split(),
- 'sn': ACTIVE_USER_2_CN,
- 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]})))
+ topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_1_CN,
+ 'cn': ACTIVE_USER_1_CN})))
- topology.standalone.delete_s(ACTIVE_USER_1_DN)
- topology.standalone.delete_s(ACTIVE_USER_2_DN)
+ topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_2_CN,
+ 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]})))
+ topology_st.standalone.delete_s(ACTIVE_USER_1_DN)
+ topology_st.standalone.delete_s(ACTIVE_USER_2_DN)
- topology.standalone.log.info('Uniqueness enforced: checks second entry is rejected')
+ topology_st.standalone.log.info('Uniqueness enforced: checks second entry is rejected')
# enable the 'cn' uniqueness on Active
- topology.standalone.add_s(config)
- topology.standalone.restart(timeout=120)
- topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
- 'objectclass': "top person".split(),
- 'sn': ACTIVE_USER_1_CN,
- 'cn': ACTIVE_USER_1_CN})))
+ topology_st.standalone.add_s(config)
+ topology_st.standalone.restart(timeout=120)
+ topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_1_CN,
+ 'cn': ACTIVE_USER_1_CN})))
try:
- topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
- 'objectclass': "top person".split(),
- 'sn': ACTIVE_USER_2_CN,
- 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]})))
+ topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_2_CN,
+ 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]})))
except ldap.CONSTRAINT_VIOLATION:
# yes it is expected
pass
# cleanup the stuff now
- topology.standalone.delete_s(config.dn)
- topology.standalone.delete_s(ACTIVE_USER_1_DN)
+ topology_st.standalone.delete_s(config.dn)
+ topology_st.standalone.delete_s(ACTIVE_USER_1_DN)
-def _active_container_mod(topology, type_config='old'):
+def _active_container_mod(topology_st, type_config='old'):
'''
Check uniqueness in a single container (active)
Add and entry with a given 'cn', then check we can not modify an entry with the same 'cn' value
'''
- config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False)
+ config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config,
+ across_subtrees=False)
# enable the 'cn' uniqueness on Active
- topology.standalone.add_s(config)
- topology.standalone.restart(timeout=120)
+ topology_st.standalone.add_s(config)
+ topology_st.standalone.restart(timeout=120)
- topology.standalone.log.info('Uniqueness enforced: checks MOD ADD entry is rejected')
- topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
- 'objectclass': "top person".split(),
- 'sn': ACTIVE_USER_1_CN,
- 'cn': ACTIVE_USER_1_CN})))
+ topology_st.standalone.log.info('Uniqueness enforced: checks MOD ADD entry is rejected')
+ topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_1_CN,
+ 'cn': ACTIVE_USER_1_CN})))
- topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
- 'objectclass': "top person".split(),
- 'sn': ACTIVE_USER_2_CN,
- 'cn': ACTIVE_USER_2_CN})))
+ topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_2_CN,
+ 'cn': ACTIVE_USER_2_CN})))
try:
- topology.standalone.modify_s(ACTIVE_USER_2_DN, [(ldap.MOD_ADD, 'cn', ACTIVE_USER_1_CN)])
+ topology_st.standalone.modify_s(ACTIVE_USER_2_DN, [(ldap.MOD_ADD, 'cn', ACTIVE_USER_1_CN)])
except ldap.CONSTRAINT_VIOLATION:
# yes it is expected
pass
- topology.standalone.log.info('Uniqueness enforced: checks MOD REPLACE entry is rejected')
+ topology_st.standalone.log.info('Uniqueness enforced: checks MOD REPLACE entry is rejected')
try:
- topology.standalone.modify_s(ACTIVE_USER_2_DN, [(ldap.MOD_REPLACE, 'cn', [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN])])
+ topology_st.standalone.modify_s(ACTIVE_USER_2_DN,
+ [(ldap.MOD_REPLACE, 'cn', [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN])])
except ldap.CONSTRAINT_VIOLATION:
# yes it is expected
pass
# cleanup the stuff now
- topology.standalone.delete_s(config.dn)
- topology.standalone.delete_s(ACTIVE_USER_1_DN)
- topology.standalone.delete_s(ACTIVE_USER_2_DN)
+ topology_st.standalone.delete_s(config.dn)
+ topology_st.standalone.delete_s(ACTIVE_USER_1_DN)
+ topology_st.standalone.delete_s(ACTIVE_USER_2_DN)
-def _active_container_modrdn(topology, type_config='old'):
+def _active_container_modrdn(topology_st, type_config='old'):
'''
Check uniqueness in a single container
Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value
'''
- config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False)
+ config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config,
+ across_subtrees=False)
# enable the 'cn' uniqueness on Active
- topology.standalone.add_s(config)
- topology.standalone.restart(timeout=120)
+ topology_st.standalone.add_s(config)
+ topology_st.standalone.restart(timeout=120)
- topology.standalone.log.info('Uniqueness enforced: checks MODRDN entry is rejected')
+ topology_st.standalone.log.info('Uniqueness enforced: checks MODRDN entry is rejected')
- topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
- 'objectclass': "top person".split(),
- 'sn': ACTIVE_USER_1_CN,
- 'cn': [ACTIVE_USER_1_CN, 'dummy']})))
+ topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_1_CN,
+ 'cn': [ACTIVE_USER_1_CN, 'dummy']})))
- topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
- 'objectclass': "top person".split(),
- 'sn': ACTIVE_USER_2_CN,
- 'cn': ACTIVE_USER_2_CN})))
+ topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_2_CN,
+ 'cn': ACTIVE_USER_2_CN})))
try:
- topology.standalone.rename_s(ACTIVE_USER_2_DN, 'cn=dummy', delold=0)
+ topology_st.standalone.rename_s(ACTIVE_USER_2_DN, 'cn=dummy', delold=0)
except ldap.CONSTRAINT_VIOLATION:
# yes it is expected
pass
# cleanup the stuff now
- topology.standalone.delete_s(config.dn)
- topology.standalone.delete_s(ACTIVE_USER_1_DN)
- topology.standalone.delete_s(ACTIVE_USER_2_DN)
+ topology_st.standalone.delete_s(config.dn)
+ topology_st.standalone.delete_s(ACTIVE_USER_1_DN)
+ topology_st.standalone.delete_s(ACTIVE_USER_2_DN)
-def _active_stage_containers_add(topology, type_config='old', across_subtrees=False):
+def _active_stage_containers_add(topology_st, type_config='old', across_subtrees=False):
'''
Check uniqueness in several containers
Add an entry on a container with a given 'cn'
@@ -300,104 +262,109 @@ def _active_stage_containers_add(topology, type_config='old', across_subtrees=Fa
with across_subtrees=True check we CAN NOT add an entry with the same 'cn' value on the other container
'''
- config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False)
+ config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN,
+ type_config=type_config, across_subtrees=False)
- topology.standalone.add_s(config)
- topology.standalone.restart(timeout=120)
- topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
- 'objectclass': "top person".split(),
- 'sn': ACTIVE_USER_1_CN,
- 'cn': ACTIVE_USER_1_CN})))
+ topology_st.standalone.add_s(config)
+ topology_st.standalone.restart(timeout=120)
+ topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_1_CN,
+ 'cn': ACTIVE_USER_1_CN})))
try:
# adding an entry on a separated contains with the same 'cn'
- topology.standalone.add_s(Entry((STAGE_USER_1_DN, {
- 'objectclass': "top person".split(),
- 'sn': STAGE_USER_1_CN,
- 'cn': ACTIVE_USER_1_CN})))
+ topology_st.standalone.add_s(Entry((STAGE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': STAGE_USER_1_CN,
+ 'cn': ACTIVE_USER_1_CN})))
except ldap.CONSTRAINT_VIOLATION:
- assert across_subtrees
+ assert across_subtrees
# cleanup the stuff now
- topology.standalone.delete_s(config.dn)
- topology.standalone.delete_s(ACTIVE_USER_1_DN)
- topology.standalone.delete_s(STAGE_USER_1_DN)
+ topology_st.standalone.delete_s(config.dn)
+ topology_st.standalone.delete_s(ACTIVE_USER_1_DN)
+ topology_st.standalone.delete_s(STAGE_USER_1_DN)
-def _active_stage_containers_mod(topology, type_config='old', across_subtrees=False):
+def _active_stage_containers_mod(topology_st, type_config='old', across_subtrees=False):
'''
Check uniqueness in a several containers
Add an entry on a container with a given 'cn', then check we CAN mod an entry with the same 'cn' value on the other container
'''
- config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False)
+ config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN,
+ type_config=type_config, across_subtrees=False)
- topology.standalone.add_s(config)
- topology.standalone.restart(timeout=120)
+ topology_st.standalone.add_s(config)
+ topology_st.standalone.restart(timeout=120)
# adding an entry on active with a different 'cn'
- topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
- 'objectclass': "top person".split(),
- 'sn': ACTIVE_USER_1_CN,
- 'cn': ACTIVE_USER_2_CN})))
+ topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_1_CN,
+ 'cn': ACTIVE_USER_2_CN})))
# adding an entry on a stage with a different 'cn'
- topology.standalone.add_s(Entry((STAGE_USER_1_DN, {
- 'objectclass': "top person".split(),
- 'sn': STAGE_USER_1_CN,
- 'cn': STAGE_USER_1_CN})))
+ topology_st.standalone.add_s(Entry((STAGE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': STAGE_USER_1_CN,
+ 'cn': STAGE_USER_1_CN})))
try:
# modify add same value
- topology.standalone.modify_s(STAGE_USER_1_DN, [(ldap.MOD_ADD, 'cn', [ACTIVE_USER_2_CN])])
+ topology_st.standalone.modify_s(STAGE_USER_1_DN, [(ldap.MOD_ADD, 'cn', [ACTIVE_USER_2_CN])])
except ldap.CONSTRAINT_VIOLATION:
assert across_subtrees
- topology.standalone.delete_s(STAGE_USER_1_DN)
- topology.standalone.add_s(Entry((STAGE_USER_1_DN, {
- 'objectclass': "top person".split(),
- 'sn': STAGE_USER_1_CN,
- 'cn': STAGE_USER_2_CN})))
+ topology_st.standalone.delete_s(STAGE_USER_1_DN)
+ topology_st.standalone.add_s(Entry((STAGE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': STAGE_USER_1_CN,
+ 'cn': STAGE_USER_2_CN})))
try:
# modify replace same value
- topology.standalone.modify_s(STAGE_USER_1_DN, [(ldap.MOD_REPLACE, 'cn', [STAGE_USER_2_CN, ACTIVE_USER_1_CN])])
+ topology_st.standalone.modify_s(STAGE_USER_1_DN,
+ [(ldap.MOD_REPLACE, 'cn', [STAGE_USER_2_CN, ACTIVE_USER_1_CN])])
except ldap.CONSTRAINT_VIOLATION:
assert across_subtrees
# cleanup the stuff now
- topology.standalone.delete_s(config.dn)
- topology.standalone.delete_s(ACTIVE_USER_1_DN)
- topology.standalone.delete_s(STAGE_USER_1_DN)
+ topology_st.standalone.delete_s(config.dn)
+ topology_st.standalone.delete_s(ACTIVE_USER_1_DN)
+ topology_st.standalone.delete_s(STAGE_USER_1_DN)
-def _active_stage_containers_modrdn(topology, type_config='old', across_subtrees=False):
+def _active_stage_containers_modrdn(topology_st, type_config='old', across_subtrees=False):
'''
Check uniqueness in a several containers
Add and entry with a given 'cn', then check we CAN modrdn an entry with the same 'cn' value on the other container
'''
- config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False)
+ config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN,
+ type_config=type_config, across_subtrees=False)
# enable the 'cn' uniqueness on Active and Stage
- topology.standalone.add_s(config)
- topology.standalone.restart(timeout=120)
- topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
- 'objectclass': "top person".split(),
- 'sn': ACTIVE_USER_1_CN,
- 'cn': [ACTIVE_USER_1_CN, 'dummy']})))
-
- topology.standalone.add_s(Entry((STAGE_USER_1_DN, {
- 'objectclass': "top person".split(),
- 'sn': STAGE_USER_1_CN,
- 'cn': STAGE_USER_1_CN})))
+ topology_st.standalone.add_s(config)
+ topology_st.standalone.restart(timeout=120)
+ topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_1_CN,
+ 'cn': [ACTIVE_USER_1_CN, 'dummy']})))
+
+ topology_st.standalone.add_s(Entry((STAGE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': STAGE_USER_1_CN,
+ 'cn': STAGE_USER_1_CN})))
try:
- topology.standalone.rename_s(STAGE_USER_1_DN, 'cn=dummy', delold=0)
+ topology_st.standalone.rename_s(STAGE_USER_1_DN, 'cn=dummy', delold=0)
# check stage entry has 'cn=dummy'
- stage_ent = topology.standalone.getEntry("cn=dummy,%s" % (STAGE_DN), ldap.SCOPE_BASE, "objectclass=*", ['cn'])
+ stage_ent = topology_st.standalone.getEntry("cn=dummy,%s" % (STAGE_DN), ldap.SCOPE_BASE, "objectclass=*",
+ ['cn'])
assert stage_ent.hasAttr('cn')
found = False
for value in stage_ent.getValues('cn'):
@@ -406,7 +373,7 @@ def _active_stage_containers_modrdn(topology, type_config='old', across_subtrees
assert found
# check active entry has 'cn=dummy'
- active_ent = topology.standalone.getEntry(ACTIVE_USER_1_DN, ldap.SCOPE_BASE, "objectclass=*", ['cn'])
+ active_ent = topology_st.standalone.getEntry(ACTIVE_USER_1_DN, ldap.SCOPE_BASE, "objectclass=*", ['cn'])
assert active_ent.hasAttr('cn')
found = False
for value in stage_ent.getValues('cn'):
@@ -414,19 +381,19 @@ def _active_stage_containers_modrdn(topology, type_config='old', across_subtrees
found = True
assert found
- topology.standalone.delete_s("cn=dummy,%s" % (STAGE_DN))
+ topology_st.standalone.delete_s("cn=dummy,%s" % (STAGE_DN))
except ldap.CONSTRAINT_VIOLATION:
assert across_subtrees
- topology.standalone.delete_s(STAGE_USER_1_DN)
+ topology_st.standalone.delete_s(STAGE_USER_1_DN)
# cleanup the stuff now
- topology.standalone.delete_s(config.dn)
- topology.standalone.delete_s(ACTIVE_USER_1_DN)
+ topology_st.standalone.delete_s(config.dn)
+ topology_st.standalone.delete_s(ACTIVE_USER_1_DN)
-def _config_file(topology, action='save'):
- dse_ldif = topology.standalone.confdir + '/dse.ldif'
- sav_file = topology.standalone.confdir + '/dse.ldif.ticket47823'
+def _config_file(topology_st, action='save'):
+ dse_ldif = topology_st.standalone.confdir + '/dse.ldif'
+ sav_file = topology_st.standalone.confdir + '/dse.ldif.ticket47823'
if action == 'save':
shutil.copy(dse_ldif, sav_file)
else:
@@ -458,513 +425,531 @@ def _pattern_errorlog(file, log_pattern):
return found
-def test_ticket47823_init(topology):
+def test_ticket47823_init(topology_st):
"""
"""
# Enabled the plugins
- topology.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS)
- topology.standalone.restart(timeout=120)
-
- topology.standalone.add_s(Entry((PROVISIONING_DN, {'objectclass': "top nscontainer".split(),
- 'cn': PROVISIONING_CN})))
- topology.standalone.add_s(Entry((ACTIVE_DN, {'objectclass': "top nscontainer".split(),
- 'cn': ACTIVE_CN})))
- topology.standalone.add_s(Entry((STAGE_DN, {'objectclass': "top nscontainer".split(),
- 'cn': STAGE_CN})))
- topology.standalone.add_s(Entry((DELETE_DN, {'objectclass': "top nscontainer".split(),
- 'cn': DELETE_CN})))
- topology.standalone.errorlog_file = open(topology.standalone.errlog, "r")
-
- topology.standalone.stop(timeout=120)
+ topology_st.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS)
+ topology_st.standalone.restart(timeout=120)
+
+ topology_st.standalone.add_s(Entry((PROVISIONING_DN, {'objectclass': "top nscontainer".split(),
+ 'cn': PROVISIONING_CN})))
+ topology_st.standalone.add_s(Entry((ACTIVE_DN, {'objectclass': "top nscontainer".split(),
+ 'cn': ACTIVE_CN})))
+ topology_st.standalone.add_s(Entry((STAGE_DN, {'objectclass': "top nscontainer".split(),
+ 'cn': STAGE_CN})))
+ topology_st.standalone.add_s(Entry((DELETE_DN, {'objectclass': "top nscontainer".split(),
+ 'cn': DELETE_CN})))
+ topology_st.standalone.errorlog_file = open(topology_st.standalone.errlog, "r")
+
+ topology_st.standalone.stop(timeout=120)
time.sleep(1)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
time.sleep(3)
-def test_ticket47823_one_container_add(topology):
+def test_ticket47823_one_container_add(topology_st):
'''
Check uniqueness in a single container
Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value
'''
- _header(topology, "With former config (args), check attribute uniqueness with 'cn' (ADD) ")
+ _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (ADD) ")
- _active_container_add(topology, type_config='old')
+ _active_container_add(topology_st, type_config='old')
- _header(topology, "With new config (args), check attribute uniqueness with 'cn' (ADD) ")
+ _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (ADD) ")
- _active_container_add(topology, type_config='new')
+ _active_container_add(topology_st, type_config='new')
-def test_ticket47823_one_container_mod(topology):
+def test_ticket47823_one_container_mod(topology_st):
'''
Check uniqueness in a single container
Add and entry with a given 'cn', then check we can not modify an entry with the same 'cn' value
'''
- _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MOD)")
+ _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (MOD)")
- _active_container_mod(topology, type_config='old')
+ _active_container_mod(topology_st, type_config='old')
- _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MOD)")
+ _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (MOD)")
- _active_container_mod(topology, type_config='new')
+ _active_container_mod(topology_st, type_config='new')
-def test_ticket47823_one_container_modrdn(topology):
+def test_ticket47823_one_container_modrdn(topology_st):
'''
Check uniqueness in a single container
Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value
'''
- _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)")
+ _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)")
- _active_container_modrdn(topology, type_config='old')
+ _active_container_modrdn(topology_st, type_config='old')
- _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)")
+ _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)")
- _active_container_modrdn(topology, type_config='new')
+ _active_container_modrdn(topology_st, type_config='new')
-def test_ticket47823_multi_containers_add(topology):
+def test_ticket47823_multi_containers_add(topology_st):
'''
Check uniqueness in a several containers
Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value
'''
- _header(topology, "With former config (args), check attribute uniqueness with 'cn' (ADD) ")
+ _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (ADD) ")
- _active_stage_containers_add(topology, type_config='old', across_subtrees=False)
+ _active_stage_containers_add(topology_st, type_config='old', across_subtrees=False)
- _header(topology, "With new config (args), check attribute uniqueness with 'cn' (ADD) ")
+ _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (ADD) ")
- _active_stage_containers_add(topology, type_config='new', across_subtrees=False)
+ _active_stage_containers_add(topology_st, type_config='new', across_subtrees=False)
-def test_ticket47823_multi_containers_mod(topology):
+def test_ticket47823_multi_containers_mod(topology_st):
'''
Check uniqueness in a several containers
Add an entry on a container with a given 'cn', then check we CAN mod an entry with the same 'cn' value on the other container
'''
- _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MOD) on separated container")
+ _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (MOD) on separated container")
- topology.standalone.log.info('Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers')
- _active_stage_containers_mod(topology, type_config='old', across_subtrees=False)
+ topology_st.standalone.log.info(
+ 'Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers')
+ _active_stage_containers_mod(topology_st, type_config='old', across_subtrees=False)
- _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MOD) on separated container")
+ _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (MOD) on separated container")
- topology.standalone.log.info('Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers')
- _active_stage_containers_mod(topology, type_config='new', across_subtrees=False)
+ topology_st.standalone.log.info(
+ 'Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers')
+ _active_stage_containers_mod(topology_st, type_config='new', across_subtrees=False)
-def test_ticket47823_multi_containers_modrdn(topology):
+def test_ticket47823_multi_containers_modrdn(topology_st):
'''
Check uniqueness in a several containers
Add and entry with a given 'cn', then check we CAN modrdn an entry with the same 'cn' value on the other container
'''
- _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MODRDN) on separated containers")
+ _header(topology_st,
+ "With former config (args), check attribute uniqueness with 'cn' (MODRDN) on separated containers")
- topology.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers')
- _active_stage_containers_modrdn(topology, type_config='old', across_subtrees=False)
+ topology_st.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers')
+ _active_stage_containers_modrdn(topology_st, type_config='old', across_subtrees=False)
- topology.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers')
- _active_stage_containers_modrdn(topology, type_config='old')
+ topology_st.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers')
+ _active_stage_containers_modrdn(topology_st, type_config='old')
-def test_ticket47823_across_multi_containers_add(topology):
+def test_ticket47823_across_multi_containers_add(topology_st):
'''
Check uniqueness across several containers, uniquely with the new configuration
Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value
'''
- _header(topology, "With new config (args), check attribute uniqueness with 'cn' (ADD) across several containers")
+ _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (ADD) across several containers")
- _active_stage_containers_add(topology, type_config='old', across_subtrees=True)
+ _active_stage_containers_add(topology_st, type_config='old', across_subtrees=True)
-def test_ticket47823_across_multi_containers_mod(topology):
+def test_ticket47823_across_multi_containers_mod(topology_st):
'''
Check uniqueness across several containers, uniquely with the new configuration
Add and entry with a given 'cn', then check we can not modifiy an entry with the same 'cn' value
'''
- _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MOD) across several containers")
+ _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (MOD) across several containers")
- _active_stage_containers_mod(topology, type_config='old', across_subtrees=True)
+ _active_stage_containers_mod(topology_st, type_config='old', across_subtrees=True)
-def test_ticket47823_across_multi_containers_modrdn(topology):
+def test_ticket47823_across_multi_containers_modrdn(topology_st):
'''
Check uniqueness across several containers, uniquely with the new configuration
Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value
'''
- _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MODRDN) across several containers")
+ _header(topology_st,
+ "With new config (args), check attribute uniqueness with 'cn' (MODRDN) across several containers")
- _active_stage_containers_modrdn(topology, type_config='old', across_subtrees=True)
+ _active_stage_containers_modrdn(topology_st, type_config='old', across_subtrees=True)
-def test_ticket47823_invalid_config_1(topology):
+def test_ticket47823_invalid_config_1(topology_st):
'''
Check that an invalid config is detected. No uniqueness enforced
Using old config: arg0 is missing
'''
- _header(topology, "Invalid config (old): arg0 is missing")
+ _header(topology_st, "Invalid config (old): arg0 is missing")
- _config_file(topology, action='save')
+ _config_file(topology_st, action='save')
# create an invalid config without arg0
- config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False)
+ config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old',
+ across_subtrees=False)
del config.data['nsslapd-pluginarg0']
# replace 'cn' uniqueness entry
try:
- topology.standalone.delete_s(config.dn)
+ topology_st.standalone.delete_s(config.dn)
except ldap.NO_SUCH_OBJECT:
pass
- topology.standalone.add_s(config)
+ topology_st.standalone.add_s(config)
- topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
# Check the server did not restart
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')])
try:
- topology.standalone.restart(timeout=5)
- ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ topology_st.standalone.restart(timeout=5)
+ ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)",
+ ALL_CONFIG_ATTRS)
if ent:
# be sure to restore a valid config before assert
- _config_file(topology, action='restore')
+ _config_file(topology_st, action='restore')
assert not ent
except subprocess.CalledProcessError:
- pass
+ pass
# Check the expected error message
regex = re.compile("Unable to parse old style")
- res = _pattern_errorlog(topology.standalone.errorlog_file, regex)
+ res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex)
if not res:
# be sure to restore a valid config before assert
- _config_file(topology, action='restore')
+ _config_file(topology_st, action='restore')
assert res
# Check we can restart the server
- _config_file(topology, action='restore')
- topology.standalone.start(timeout=5)
+ _config_file(topology_st, action='restore')
+ topology_st.standalone.start(timeout=5)
try:
- topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
except ldap.NO_SUCH_OBJECT:
pass
-def test_ticket47823_invalid_config_2(topology):
+def test_ticket47823_invalid_config_2(topology_st):
'''
Check that an invalid config is detected. No uniqueness enforced
Using old config: arg1 is missing
'''
- _header(topology, "Invalid config (old): arg1 is missing")
+ _header(topology_st, "Invalid config (old): arg1 is missing")
- _config_file(topology, action='save')
+ _config_file(topology_st, action='save')
# create an invalid config without arg0
- config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False)
+ config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old',
+ across_subtrees=False)
del config.data['nsslapd-pluginarg1']
# replace 'cn' uniqueness entry
try:
- topology.standalone.delete_s(config.dn)
+ topology_st.standalone.delete_s(config.dn)
except ldap.NO_SUCH_OBJECT:
pass
- topology.standalone.add_s(config)
+ topology_st.standalone.add_s(config)
- topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
# Check the server did not restart
try:
- topology.standalone.restart(timeout=5)
- ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ topology_st.standalone.restart(timeout=5)
+ ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)",
+ ALL_CONFIG_ATTRS)
if ent:
# be sure to restore a valid config before assert
- _config_file(topology, action='restore')
+ _config_file(topology_st, action='restore')
assert not ent
except subprocess.CalledProcessError:
- pass
+ pass
# Check the expected error message
regex = re.compile("No valid subtree is defined")
- res = _pattern_errorlog(topology.standalone.errorlog_file, regex)
+ res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex)
if not res:
# be sure to restore a valid config before assert
- _config_file(topology, action='restore')
+ _config_file(topology_st, action='restore')
assert res
# Check we can restart the server
- _config_file(topology, action='restore')
- topology.standalone.start(timeout=5)
+ _config_file(topology_st, action='restore')
+ topology_st.standalone.start(timeout=5)
try:
- topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
except ldap.NO_SUCH_OBJECT:
pass
-def test_ticket47823_invalid_config_3(topology):
+def test_ticket47823_invalid_config_3(topology_st):
'''
Check that an invalid config is detected. No uniqueness enforced
Using old config: arg0 is missing
'''
- _header(topology, "Invalid config (old): arg0 is missing but new config attrname exists")
+ _header(topology_st, "Invalid config (old): arg0 is missing but new config attrname exists")
- _config_file(topology, action='save')
+ _config_file(topology_st, action='save')
# create an invalid config without arg0
- config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False)
+ config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old',
+ across_subtrees=False)
del config.data['nsslapd-pluginarg0']
config.data['uniqueness-attribute-name'] = 'cn'
# replace 'cn' uniqueness entry
try:
- topology.standalone.delete_s(config.dn)
+ topology_st.standalone.delete_s(config.dn)
except ldap.NO_SUCH_OBJECT:
pass
- topology.standalone.add_s(config)
+ topology_st.standalone.add_s(config)
- topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
# Check the server did not restart
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')])
try:
- topology.standalone.restart(timeout=5)
- ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ topology_st.standalone.restart(timeout=5)
+ ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)",
+ ALL_CONFIG_ATTRS)
if ent:
# be sure to restore a valid config before assert
- _config_file(topology, action='restore')
+ _config_file(topology_st, action='restore')
assert not ent
except subprocess.CalledProcessError:
- pass
+ pass
# Check the expected error message
regex = re.compile("Unable to parse old style")
- res = _pattern_errorlog(topology.standalone.errorlog_file, regex)
+ res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex)
if not res:
# be sure to restore a valid config before assert
- _config_file(topology, action='restore')
+ _config_file(topology_st, action='restore')
assert res
# Check we can restart the server
- _config_file(topology, action='restore')
- topology.standalone.start(timeout=5)
+ _config_file(topology_st, action='restore')
+ topology_st.standalone.start(timeout=5)
try:
- topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
except ldap.NO_SUCH_OBJECT:
pass
-def test_ticket47823_invalid_config_4(topology):
+def test_ticket47823_invalid_config_4(topology_st):
'''
Check that an invalid config is detected. No uniqueness enforced
Using old config: arg1 is missing
'''
- _header(topology, "Invalid config (old): arg1 is missing but new config exist")
+ _header(topology_st, "Invalid config (old): arg1 is missing but new config exist")
- _config_file(topology, action='save')
+ _config_file(topology_st, action='save')
# create an invalid config without arg0
- config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False)
+ config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old',
+ across_subtrees=False)
del config.data['nsslapd-pluginarg1']
config.data['uniqueness-subtrees'] = ACTIVE_DN
# replace 'cn' uniqueness entry
try:
- topology.standalone.delete_s(config.dn)
+ topology_st.standalone.delete_s(config.dn)
except ldap.NO_SUCH_OBJECT:
pass
- topology.standalone.add_s(config)
+ topology_st.standalone.add_s(config)
- topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
# Check the server did not restart
try:
- topology.standalone.restart(timeout=5)
- ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ topology_st.standalone.restart(timeout=5)
+ ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)",
+ ALL_CONFIG_ATTRS)
if ent:
# be sure to restore a valid config before assert
- _config_file(topology, action='restore')
+ _config_file(topology_st, action='restore')
assert not ent
except subprocess.CalledProcessError:
- pass
+ pass
# Check the expected error message
regex = re.compile("No valid subtree is defined")
- res = _pattern_errorlog(topology.standalone.errorlog_file, regex)
+ res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex)
if not res:
# be sure to restore a valid config before assert
- _config_file(topology, action='restore')
+ _config_file(topology_st, action='restore')
assert res
# Check we can restart the server
- _config_file(topology, action='restore')
- topology.standalone.start(timeout=5)
+ _config_file(topology_st, action='restore')
+ topology_st.standalone.start(timeout=5)
try:
- topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
except ldap.NO_SUCH_OBJECT:
pass
-def test_ticket47823_invalid_config_5(topology):
+def test_ticket47823_invalid_config_5(topology_st):
'''
Check that an invalid config is detected. No uniqueness enforced
Using new config: uniqueness-attribute-name is missing
'''
- _header(topology, "Invalid config (new): uniqueness-attribute-name is missing")
+ _header(topology_st, "Invalid config (new): uniqueness-attribute-name is missing")
- _config_file(topology, action='save')
+ _config_file(topology_st, action='save')
# create an invalid config without arg0
- config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new', across_subtrees=False)
+ config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new',
+ across_subtrees=False)
del config.data['uniqueness-attribute-name']
# replace 'cn' uniqueness entry
try:
- topology.standalone.delete_s(config.dn)
+ topology_st.standalone.delete_s(config.dn)
except ldap.NO_SUCH_OBJECT:
pass
- topology.standalone.add_s(config)
+ topology_st.standalone.add_s(config)
- topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
# Check the server did not restart
try:
- topology.standalone.restart(timeout=5)
- ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ topology_st.standalone.restart(timeout=5)
+ ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)",
+ ALL_CONFIG_ATTRS)
if ent:
# be sure to restore a valid config before assert
- _config_file(topology, action='restore')
+ _config_file(topology_st, action='restore')
assert not ent
except subprocess.CalledProcessError:
- pass
+ pass
# Check the expected error message
regex = re.compile("Attribute name not defined")
- res = _pattern_errorlog(topology.standalone.errorlog_file, regex)
+ res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex)
if not res:
# be sure to restore a valid config before assert
- _config_file(topology, action='restore')
+ _config_file(topology_st, action='restore')
assert res
# Check we can restart the server
- _config_file(topology, action='restore')
- topology.standalone.start(timeout=5)
+ _config_file(topology_st, action='restore')
+ topology_st.standalone.start(timeout=5)
try:
- topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
except ldap.NO_SUCH_OBJECT:
pass
-def test_ticket47823_invalid_config_6(topology):
+def test_ticket47823_invalid_config_6(topology_st):
'''
Check that an invalid config is detected. No uniqueness enforced
Using new config: uniqueness-subtrees is missing
'''
- _header(topology, "Invalid config (new): uniqueness-subtrees is missing")
+ _header(topology_st, "Invalid config (new): uniqueness-subtrees is missing")
- _config_file(topology, action='save')
+ _config_file(topology_st, action='save')
# create an invalid config without arg0
- config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new', across_subtrees=False)
+ config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new',
+ across_subtrees=False)
del config.data['uniqueness-subtrees']
# replace 'cn' uniqueness entry
try:
- topology.standalone.delete_s(config.dn)
+ topology_st.standalone.delete_s(config.dn)
except ldap.NO_SUCH_OBJECT:
pass
- topology.standalone.add_s(config)
+ topology_st.standalone.add_s(config)
- topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
# Check the server did not restart
try:
- topology.standalone.restart(timeout=5)
- ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ topology_st.standalone.restart(timeout=5)
+ ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)",
+ ALL_CONFIG_ATTRS)
if ent:
# be sure to restore a valid config before assert
- _config_file(topology, action='restore')
+ _config_file(topology_st, action='restore')
assert not ent
except subprocess.CalledProcessError:
- pass
+ pass
# Check the expected error message
regex = re.compile("Objectclass for subtree entries is not defined")
- res = _pattern_errorlog(topology.standalone.errorlog_file, regex)
+ res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex)
if not res:
# be sure to restore a valid config before assert
- _config_file(topology, action='restore')
+ _config_file(topology_st, action='restore')
assert res
# Check we can restart the server
- _config_file(topology, action='restore')
- topology.standalone.start(timeout=5)
+ _config_file(topology_st, action='restore')
+ topology_st.standalone.start(timeout=5)
try:
- topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
except ldap.NO_SUCH_OBJECT:
pass
-def test_ticket47823_invalid_config_7(topology):
+def test_ticket47823_invalid_config_7(topology_st):
'''
Check that an invalid config is detected. No uniqueness enforced
Using new config: uniqueness-subtrees is missing
'''
- _header(topology, "Invalid config (new): uniqueness-subtrees are invalid")
+ _header(topology_st, "Invalid config (new): uniqueness-subtrees are invalid")
- _config_file(topology, action='save')
+ _config_file(topology_st, action='save')
# create an invalid config without arg0
- config = _build_config(topology, attr_name='cn', subtree_1="this_is dummy DN", subtree_2="an other=dummy DN", type_config='new', across_subtrees=False)
+ config = _build_config(topology_st, attr_name='cn', subtree_1="this_is dummy DN", subtree_2="an other=dummy DN",
+ type_config='new', across_subtrees=False)
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')])
# replace 'cn' uniqueness entry
try:
- topology.standalone.delete_s(config.dn)
+ topology_st.standalone.delete_s(config.dn)
except ldap.NO_SUCH_OBJECT:
pass
- topology.standalone.add_s(config)
+ topology_st.standalone.add_s(config)
- topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
# Check the server did not restart
try:
- topology.standalone.restart(timeout=5)
- ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ topology_st.standalone.restart(timeout=5)
+ ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)",
+ ALL_CONFIG_ATTRS)
if ent:
# be sure to restore a valid config before assert
- _config_file(topology, action='restore')
+ _config_file(topology_st, action='restore')
assert not ent
except subprocess.CalledProcessError:
- pass
+ pass
# Check the expected error message
regex = re.compile("No valid subtree is defined")
- res = _pattern_errorlog(topology.standalone.errorlog_file, regex)
+ res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex)
if not res:
# be sure to restore a valid config before assert
- _config_file(topology, action='restore')
+ _config_file(topology_st, action='restore')
assert res
# Check we can restart the server
- _config_file(topology, action='restore')
- topology.standalone.start(timeout=5)
+ _config_file(topology_st, action='restore')
+ topology_st.standalone.start(timeout=5)
try:
- topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
except ldap.NO_SUCH_OBJECT:
pass
diff --git a/dirsrvtests/tests/tickets/ticket47828_test.py b/dirsrvtests/tests/tickets/ticket47828_test.py
index e3b8306..8626344 100644
--- a/dirsrvtests/tests/tickets/ticket47828_test.py
+++ b/dirsrvtests/tests/tickets/ticket47828_test.py
@@ -6,18 +6,13 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
import logging
-import socket
+
+import ldap
import pytest
-import shutil
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
@@ -32,73 +27,33 @@ ACTIVE_USER1_CN = 'active user1'
ACTIVE_USER1_DN = 'cn=%s,%s' % (ACTIVE_USER1_CN, SUFFIX)
STAGED_USER1_CN = 'staged user1'
STAGED_USER1_DN = 'cn=%s,%s' % (STAGED_USER1_CN, PROVISIONING)
-DUMMY_USER1_CN = 'dummy user1'
-DUMMY_USER1_DN = 'cn=%s,%s' % (DUMMY_USER1_CN, DUMMY_CONTAINER)
+DUMMY_USER1_CN = 'dummy user1'
+DUMMY_USER1_DN = 'cn=%s,%s' % (DUMMY_USER1_CN, DUMMY_CONTAINER)
ALLOCATED_ATTR = 'employeeNumber'
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- At the beginning, It may exists a standalone instance.
- It may also exists a backup for the standalone instance.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
+def _header(topology_st, label):
+ topology_st.standalone.log.info("\n\n###############################################")
+ topology_st.standalone.log.info("#######")
+ topology_st.standalone.log.info("####### %s" % label)
+ topology_st.standalone.log.info("#######")
+ topology_st.standalone.log.info("###############################################")
- return TopologyStandalone(standalone)
-
-def _header(topology, label):
- topology.standalone.log.info("\n\n###############################################")
- topology.standalone.log.info("#######")
- topology.standalone.log.info("####### %s" % label)
- topology.standalone.log.info("#######")
- topology.standalone.log.info("###############################################")
-
-
-def test_ticket47828_init(topology):
+def test_ticket47828_init(topology_st):
"""
Enable DNA
"""
- topology.standalone.plugins.enable(name=PLUGIN_DNA)
+ topology_st.standalone.plugins.enable(name=PLUGIN_DNA)
- topology.standalone.add_s(Entry((PROVISIONING,{'objectclass': "top nscontainer".split(),
- 'cn': 'provisioning'})))
- topology.standalone.add_s(Entry((DUMMY_CONTAINER,{'objectclass': "top nscontainer".split(),
- 'cn': 'dummy container'})))
+ topology_st.standalone.add_s(Entry((PROVISIONING, {'objectclass': "top nscontainer".split(),
+ 'cn': 'provisioning'})))
+ topology_st.standalone.add_s(Entry((DUMMY_CONTAINER, {'objectclass': "top nscontainer".split(),
+ 'cn': 'dummy container'})))
dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN)
- topology.standalone.add_s(Entry((dn_config, {'objectclass': "top extensibleObject".split(),
+ topology_st.standalone.add_s(Entry((dn_config, {'objectclass': "top extensibleObject".split(),
'cn': 'excluded scope',
'dnaType': ALLOCATED_ATTR,
'dnaNextValue': str(1000),
@@ -106,538 +61,583 @@ def test_ticket47828_init(topology):
'dnaMagicRegen': str(-1),
'dnaFilter': '(&(objectClass=person)(objectClass=organizationalPerson)(objectClass=inetOrgPerson))',
'dnaScope': SUFFIX})))
- topology.standalone.restart(timeout=10)
+ topology_st.standalone.restart(timeout=10)
-def test_ticket47828_run_0(topology):
+def test_ticket47828_run_0(topology_st):
"""
NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is set
"""
- _header(topology, 'NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is set')
+ _header(topology_st, 'NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is set')
- topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': ACTIVE_USER1_CN,
- 'sn': ACTIVE_USER1_CN,
- ALLOCATED_ATTR: str(-1)})))
- ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': ACTIVE_USER1_CN,
+ 'sn': ACTIVE_USER1_CN,
+ ALLOCATED_ATTR: str(-1)})))
+ ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) != str(-1)
- topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(ACTIVE_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(ACTIVE_USER1_DN)
-def test_ticket47828_run_1(topology):
+def test_ticket47828_run_1(topology_st):
"""
NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)
"""
- _header(topology, 'NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+ _header(topology_st, 'NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
- topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': ACTIVE_USER1_CN,
- 'sn': ACTIVE_USER1_CN,
- ALLOCATED_ATTR: str(20)})))
- ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': ACTIVE_USER1_CN,
+ 'sn': ACTIVE_USER1_CN,
+ ALLOCATED_ATTR: str(20)})))
+ ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) == str(20)
- topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(ACTIVE_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(ACTIVE_USER1_DN)
-def test_ticket47828_run_2(topology):
+def test_ticket47828_run_2(topology_st):
"""
NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is set
"""
- _header(topology, 'NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is set')
+ _header(topology_st, 'NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is set')
- topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': STAGED_USER1_CN,
- 'sn': STAGED_USER1_CN,
- ALLOCATED_ATTR: str(-1)})))
- ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': STAGED_USER1_CN,
+ 'sn': STAGED_USER1_CN,
+ ALLOCATED_ATTR: str(-1)})))
+ ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) != str(-1)
- topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(STAGED_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(STAGED_USER1_DN)
-def test_ticket47828_run_3(topology):
+def test_ticket47828_run_3(topology_st):
"""
NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)
"""
- _header(topology, 'NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+ _header(topology_st, 'NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
- topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': STAGED_USER1_CN,
- 'sn': STAGED_USER1_CN,
- ALLOCATED_ATTR: str(20)})))
- ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': STAGED_USER1_CN,
+ 'sn': STAGED_USER1_CN,
+ ALLOCATED_ATTR: str(20)})))
+ ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) == str(20)
- topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(STAGED_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(STAGED_USER1_DN)
-def test_ticket47828_run_4(topology):
+def test_ticket47828_run_4(topology_st):
'''
Exclude the provisioning container
'''
- _header(topology, 'Exclude the provisioning container')
+ _header(topology_st, 'Exclude the provisioning container')
dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN)
mod = [(ldap.MOD_REPLACE, 'dnaExcludeScope', PROVISIONING)]
- topology.standalone.modify_s(dn_config, mod)
+ topology_st.standalone.modify_s(dn_config, mod)
-def test_ticket47828_run_5(topology):
+def test_ticket47828_run_5(topology_st):
"""
Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is set
"""
- _header(topology, 'Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is set')
+ _header(topology_st, 'Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is set')
- topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': ACTIVE_USER1_CN,
- 'sn': ACTIVE_USER1_CN,
- ALLOCATED_ATTR: str(-1)})))
- ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': ACTIVE_USER1_CN,
+ 'sn': ACTIVE_USER1_CN,
+ ALLOCATED_ATTR: str(-1)})))
+ ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) != str(-1)
- topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(ACTIVE_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(ACTIVE_USER1_DN)
-def test_ticket47828_run_6(topology):
+def test_ticket47828_run_6(topology_st):
"""
Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)
"""
- _header(topology, 'Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+ _header(topology_st,
+ 'Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
- topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': ACTIVE_USER1_CN,
- 'sn': ACTIVE_USER1_CN,
- ALLOCATED_ATTR: str(20)})))
- ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': ACTIVE_USER1_CN,
+ 'sn': ACTIVE_USER1_CN,
+ ALLOCATED_ATTR: str(20)})))
+ ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) == str(20)
- topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(ACTIVE_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(ACTIVE_USER1_DN)
-def test_ticket47828_run_7(topology):
+def test_ticket47828_run_7(topology_st):
"""
Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set
"""
- _header(topology, 'Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set')
+ _header(topology_st, 'Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set')
- topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': STAGED_USER1_CN,
- 'sn': STAGED_USER1_CN,
- ALLOCATED_ATTR: str(-1)})))
- ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': STAGED_USER1_CN,
+ 'sn': STAGED_USER1_CN,
+ ALLOCATED_ATTR: str(-1)})))
+ ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) == str(-1)
- topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(STAGED_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(STAGED_USER1_DN)
-def test_ticket47828_run_8(topology):
+def test_ticket47828_run_8(topology_st):
"""
Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)
"""
- _header(topology, 'Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+ _header(topology_st,
+ 'Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
- topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': STAGED_USER1_CN,
- 'sn': STAGED_USER1_CN,
- ALLOCATED_ATTR: str(20)})))
- ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': STAGED_USER1_CN,
+ 'sn': STAGED_USER1_CN,
+ ALLOCATED_ATTR: str(20)})))
+ ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) == str(20)
- topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(STAGED_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(STAGED_USER1_DN)
-def test_ticket47828_run_9(topology):
+def test_ticket47828_run_9(topology_st):
"""
Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set
"""
- _header(topology, 'Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set')
+ _header(topology_st, 'Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set')
- topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': DUMMY_USER1_CN,
- 'sn': DUMMY_USER1_CN,
- ALLOCATED_ATTR: str(-1)})))
- ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': DUMMY_USER1_CN,
+ 'sn': DUMMY_USER1_CN,
+ ALLOCATED_ATTR: str(-1)})))
+ ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) != str(-1)
- topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(DUMMY_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(DUMMY_USER1_DN)
-def test_ticket47828_run_10(topology):
+def test_ticket47828_run_10(topology_st):
"""
Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)
"""
- _header(topology, 'Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+ _header(topology_st,
+ 'Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
- topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': DUMMY_USER1_CN,
- 'sn': DUMMY_USER1_CN,
- ALLOCATED_ATTR: str(20)})))
- ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': DUMMY_USER1_CN,
+ 'sn': DUMMY_USER1_CN,
+ ALLOCATED_ATTR: str(20)})))
+ ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) == str(20)
- topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(DUMMY_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(DUMMY_USER1_DN)
-def test_ticket47828_run_11(topology):
+def test_ticket47828_run_11(topology_st):
'''
Exclude (in addition) the dummy container
'''
- _header(topology, 'Exclude (in addition) the dummy container')
+ _header(topology_st, 'Exclude (in addition) the dummy container')
dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN)
mod = [(ldap.MOD_ADD, 'dnaExcludeScope', DUMMY_CONTAINER)]
- topology.standalone.modify_s(dn_config, mod)
+ topology_st.standalone.modify_s(dn_config, mod)
-def test_ticket47828_run_12(topology):
+def test_ticket47828_run_12(topology_st):
"""
Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is set
"""
- _header(topology, 'Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is set')
+ _header(topology_st, 'Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is set')
- topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': ACTIVE_USER1_CN,
- 'sn': ACTIVE_USER1_CN,
- ALLOCATED_ATTR: str(-1)})))
- ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': ACTIVE_USER1_CN,
+ 'sn': ACTIVE_USER1_CN,
+ ALLOCATED_ATTR: str(-1)})))
+ ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) != str(-1)
- topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(ACTIVE_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(ACTIVE_USER1_DN)
-def test_ticket47828_run_13(topology):
+def test_ticket47828_run_13(topology_st):
"""
Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)
"""
- _header(topology, 'Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+ _header(topology_st,
+ 'Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
- topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': ACTIVE_USER1_CN,
- 'sn': ACTIVE_USER1_CN,
- ALLOCATED_ATTR: str(20)})))
- ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': ACTIVE_USER1_CN,
+ 'sn': ACTIVE_USER1_CN,
+ ALLOCATED_ATTR: str(20)})))
+ ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) == str(20)
- topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(ACTIVE_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(ACTIVE_USER1_DN)
-def test_ticket47828_run_14(topology):
+def test_ticket47828_run_14(topology_st):
"""
Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set
"""
- _header(topology, 'Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set')
+ _header(topology_st,
+ 'Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set')
- topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': STAGED_USER1_CN,
- 'sn': STAGED_USER1_CN,
- ALLOCATED_ATTR: str(-1)})))
- ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': STAGED_USER1_CN,
+ 'sn': STAGED_USER1_CN,
+ ALLOCATED_ATTR: str(-1)})))
+ ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) == str(-1)
- topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(STAGED_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(STAGED_USER1_DN)
-def test_ticket47828_run_15(topology):
+def test_ticket47828_run_15(topology_st):
"""
Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)
"""
- _header(topology, 'Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+ _header(topology_st,
+ 'Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
- topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': STAGED_USER1_CN,
- 'sn': STAGED_USER1_CN,
- ALLOCATED_ATTR: str(20)})))
- ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': STAGED_USER1_CN,
+ 'sn': STAGED_USER1_CN,
+ ALLOCATED_ATTR: str(20)})))
+ ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) == str(20)
- topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(STAGED_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(STAGED_USER1_DN)
-def test_ticket47828_run_16(topology):
+def test_ticket47828_run_16(topology_st):
"""
Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is not set
"""
- _header(topology, 'Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR not is set')
+ _header(topology_st,
+ 'Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR not is set')
- topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': DUMMY_USER1_CN,
- 'sn': DUMMY_USER1_CN,
- ALLOCATED_ATTR: str(-1)})))
- ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': DUMMY_USER1_CN,
+ 'sn': DUMMY_USER1_CN,
+ ALLOCATED_ATTR: str(-1)})))
+ ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) == str(-1)
- topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(DUMMY_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(DUMMY_USER1_DN)
-def test_ticket47828_run_17(topology):
+def test_ticket47828_run_17(topology_st):
"""
Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)
"""
- _header(topology, 'Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+ _header(topology_st,
+ 'Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
- topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': DUMMY_USER1_CN,
- 'sn': DUMMY_USER1_CN,
- ALLOCATED_ATTR: str(20)})))
- ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': DUMMY_USER1_CN,
+ 'sn': DUMMY_USER1_CN,
+ ALLOCATED_ATTR: str(20)})))
+ ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) == str(20)
- topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(DUMMY_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(DUMMY_USER1_DN)
-def test_ticket47828_run_18(topology):
+def test_ticket47828_run_18(topology_st):
'''
Exclude PROVISIONING and a wrong container
'''
- _header(topology, 'Exclude PROVISIONING and a wrong container')
+ _header(topology_st, 'Exclude PROVISIONING and a wrong container')
dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN)
mod = [(ldap.MOD_REPLACE, 'dnaExcludeScope', PROVISIONING)]
- topology.standalone.modify_s(dn_config, mod)
+ topology_st.standalone.modify_s(dn_config, mod)
try:
mod = [(ldap.MOD_ADD, 'dnaExcludeScope', "invalidDN,%s" % SUFFIX)]
- topology.standalone.modify_s(dn_config, mod)
+ topology_st.standalone.modify_s(dn_config, mod)
raise ValueError("invalid dnaExcludeScope value (not a DN)")
except ldap.INVALID_SYNTAX:
pass
-def test_ticket47828_run_19(topology):
+def test_ticket47828_run_19(topology_st):
"""
Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set
"""
- _header(topology, 'Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set')
+ _header(topology_st,
+ 'Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set')
- topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': ACTIVE_USER1_CN,
- 'sn': ACTIVE_USER1_CN,
- ALLOCATED_ATTR: str(-1)})))
- ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': ACTIVE_USER1_CN,
+ 'sn': ACTIVE_USER1_CN,
+ ALLOCATED_ATTR: str(-1)})))
+ ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) != str(-1)
- topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(ACTIVE_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(ACTIVE_USER1_DN)
-def test_ticket47828_run_20(topology):
+def test_ticket47828_run_20(topology_st):
"""
Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)
"""
- _header(topology, 'Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+ _header(topology_st,
+ 'Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
- topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': ACTIVE_USER1_CN,
- 'sn': ACTIVE_USER1_CN,
- ALLOCATED_ATTR: str(20)})))
- ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': ACTIVE_USER1_CN,
+ 'sn': ACTIVE_USER1_CN,
+ ALLOCATED_ATTR: str(20)})))
+ ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) == str(20)
- topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(ACTIVE_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(ACTIVE_USER1_DN)
-def test_ticket47828_run_21(topology):
+def test_ticket47828_run_21(topology_st):
"""
Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set
"""
- _header(topology, 'Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set')
+ _header(topology_st,
+ 'Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set')
- topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': STAGED_USER1_CN,
- 'sn': STAGED_USER1_CN,
- ALLOCATED_ATTR: str(-1)})))
- ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': STAGED_USER1_CN,
+ 'sn': STAGED_USER1_CN,
+ ALLOCATED_ATTR: str(-1)})))
+ ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) == str(-1)
- topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(STAGED_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(STAGED_USER1_DN)
-def test_ticket47828_run_22(topology):
+def test_ticket47828_run_22(topology_st):
"""
Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)
"""
- _header(topology, 'Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+ _header(topology_st,
+ 'Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
- topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': STAGED_USER1_CN,
- 'sn': STAGED_USER1_CN,
- ALLOCATED_ATTR: str(20)})))
- ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': STAGED_USER1_CN,
+ 'sn': STAGED_USER1_CN,
+ ALLOCATED_ATTR: str(20)})))
+ ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) == str(20)
- topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(STAGED_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(STAGED_USER1_DN)
-def test_ticket47828_run_23(topology):
+def test_ticket47828_run_23(topology_st):
"""
Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set
"""
- _header(topology, 'Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set')
+ _header(topology_st,
+ 'Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set')
- topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': DUMMY_USER1_CN,
- 'sn': DUMMY_USER1_CN,
- ALLOCATED_ATTR: str(-1)})))
- ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': DUMMY_USER1_CN,
+ 'sn': DUMMY_USER1_CN,
+ ALLOCATED_ATTR: str(-1)})))
+ ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) != str(-1)
- topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(DUMMY_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(DUMMY_USER1_DN)
-def test_ticket47828_run_24(topology):
+def test_ticket47828_run_24(topology_st):
"""
Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)
"""
- _header(topology, 'Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+ _header(topology_st,
+ 'Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
- topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': DUMMY_USER1_CN,
- 'sn': DUMMY_USER1_CN,
- ALLOCATED_ATTR: str(20)})))
- ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': DUMMY_USER1_CN,
+ 'sn': DUMMY_USER1_CN,
+ ALLOCATED_ATTR: str(20)})))
+ ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) == str(20)
- topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(DUMMY_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(DUMMY_USER1_DN)
-def test_ticket47828_run_25(topology):
+def test_ticket47828_run_25(topology_st):
'''
Exclude a wrong container
'''
- _header(topology, 'Exclude a wrong container')
+ _header(topology_st, 'Exclude a wrong container')
dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN)
try:
mod = [(ldap.MOD_REPLACE, 'dnaExcludeScope', "invalidDN,%s" % SUFFIX)]
- topology.standalone.modify_s(dn_config, mod)
+ topology_st.standalone.modify_s(dn_config, mod)
raise ValueError("invalid dnaExcludeScope value (not a DN)")
except ldap.INVALID_SYNTAX:
pass
-def test_ticket47828_run_26(topology):
+def test_ticket47828_run_26(topology_st):
"""
Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set
"""
- _header(topology, 'Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set')
+ _header(topology_st, 'Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set')
- topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': ACTIVE_USER1_CN,
- 'sn': ACTIVE_USER1_CN,
- ALLOCATED_ATTR: str(-1)})))
- ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': ACTIVE_USER1_CN,
+ 'sn': ACTIVE_USER1_CN,
+ ALLOCATED_ATTR: str(-1)})))
+ ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) != str(-1)
- topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(ACTIVE_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(ACTIVE_USER1_DN)
-def test_ticket47828_run_27(topology):
+def test_ticket47828_run_27(topology_st):
"""
Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)
"""
- _header(topology, 'Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+ _header(topology_st,
+ 'Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
- topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': ACTIVE_USER1_CN,
- 'sn': ACTIVE_USER1_CN,
- ALLOCATED_ATTR: str(20)})))
- ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': ACTIVE_USER1_CN,
+ 'sn': ACTIVE_USER1_CN,
+ ALLOCATED_ATTR: str(20)})))
+ ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) == str(20)
- topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(ACTIVE_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(ACTIVE_USER1_DN)
-def test_ticket47828_run_28(topology):
+def test_ticket47828_run_28(topology_st):
"""
Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set
"""
- _header(topology, 'Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set')
+ _header(topology_st, 'Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set')
- topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': STAGED_USER1_CN,
- 'sn': STAGED_USER1_CN,
- ALLOCATED_ATTR: str(-1)})))
- ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': STAGED_USER1_CN,
+ 'sn': STAGED_USER1_CN,
+ ALLOCATED_ATTR: str(-1)})))
+ ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) == str(-1)
- topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(STAGED_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(STAGED_USER1_DN)
-def test_ticket47828_run_29(topology):
+def test_ticket47828_run_29(topology_st):
"""
Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)
"""
- _header(topology, 'Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+ _header(topology_st,
+ 'Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
- topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': STAGED_USER1_CN,
- 'sn': STAGED_USER1_CN,
- ALLOCATED_ATTR: str(20)})))
- ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': STAGED_USER1_CN,
+ 'sn': STAGED_USER1_CN,
+ ALLOCATED_ATTR: str(20)})))
+ ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) == str(20)
- topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(STAGED_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(STAGED_USER1_DN)
-def test_ticket47828_run_30(topology):
+def test_ticket47828_run_30(topology_st):
"""
Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set
"""
- _header(topology, 'Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set')
+ _header(topology_st, 'Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set')
- topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': DUMMY_USER1_CN,
- 'sn': DUMMY_USER1_CN,
- ALLOCATED_ATTR: str(-1)})))
- ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': DUMMY_USER1_CN,
+ 'sn': DUMMY_USER1_CN,
+ ALLOCATED_ATTR: str(-1)})))
+ ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) != str(-1)
- topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(DUMMY_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(DUMMY_USER1_DN)
-def test_ticket47828_run_31(topology):
+def test_ticket47828_run_31(topology_st):
"""
Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)
"""
- _header(topology, 'Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+ _header(topology_st,
+ 'Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
- topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': DUMMY_USER1_CN,
- 'sn': DUMMY_USER1_CN,
- ALLOCATED_ATTR: str(20)})))
- ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ topology_st.standalone.add_s(
+ Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': DUMMY_USER1_CN,
+ 'sn': DUMMY_USER1_CN,
+ ALLOCATED_ATTR: str(20)})))
+ ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
assert ent.hasAttr(ALLOCATED_ATTR)
assert ent.getValue(ALLOCATED_ATTR) == str(20)
- topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
- topology.standalone.delete_s(DUMMY_USER1_DN)
+ topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+ topology_st.standalone.delete_s(DUMMY_USER1_DN)
if __name__ == '__main__':
@@ -645,4 +645,3 @@ if __name__ == '__main__':
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
diff --git a/dirsrvtests/tests/tickets/ticket47829_test.py b/dirsrvtests/tests/tickets/ticket47829_test.py
index 0e95adc..e585c57 100644
--- a/dirsrvtests/tests/tickets/ticket47829_test.py
+++ b/dirsrvtests/tests/tickets/ticket47829_test.py
@@ -6,32 +6,29 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
+import logging
import time
+
import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
-from lib389.properties import *
-
+from lib389.topologies import topology_st
-SCOPE_IN_CN = 'in'
+SCOPE_IN_CN = 'in'
SCOPE_OUT_CN = 'out'
-SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX)
+SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX)
SCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX)
PROVISIONING_CN = "provisioning"
PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SCOPE_IN_DN)
ACTIVE_CN = "accounts"
-STAGE_CN = "staged users"
+STAGE_CN = "staged users"
DELETE_CN = "deleted users"
ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SCOPE_IN_DN)
-STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN)
-DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN)
+STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN)
+DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN)
STAGE_USER_CN = "stage guy"
STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN)
@@ -57,126 +54,85 @@ INDIRECT_ACTIVE_GROUP_DN = "cn=%s,%s" % (INDIRECT_ACTIVE_GROUP_CN, ACTIVE_DN)
log = logging.getLogger(__name__)
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
+def _header(topology_st, label):
+ topology_st.standalone.log.info("\n\n###############################################")
+ topology_st.standalone.log.info("#######")
+ topology_st.standalone.log.info("####### %s" % label)
+ topology_st.standalone.log.info("#######")
+ topology_st.standalone.log.info("###############################################")
- # Remove the instance
- if instance_standalone:
- standalone.delete()
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def _header(topology, label):
- topology.standalone.log.info("\n\n###############################################")
- topology.standalone.log.info("#######")
- topology.standalone.log.info("####### %s" % label)
- topology.standalone.log.info("#######")
- topology.standalone.log.info("###############################################")
-
-
-def _add_user(topology, type='active'):
+def _add_user(topology_st, type='active'):
if type == 'active':
- topology.standalone.add_s(Entry((ACTIVE_USER_DN, {
- 'objectclass': "top person inetuser".split(),
- 'sn': ACTIVE_USER_CN,
- 'cn': ACTIVE_USER_CN})))
+ topology_st.standalone.add_s(Entry((ACTIVE_USER_DN, {
+ 'objectclass': "top person inetuser".split(),
+ 'sn': ACTIVE_USER_CN,
+ 'cn': ACTIVE_USER_CN})))
elif type == 'stage':
- topology.standalone.add_s(Entry((STAGE_USER_DN, {
- 'objectclass': "top person inetuser".split(),
- 'sn': STAGE_USER_CN,
- 'cn': STAGE_USER_CN})))
+ topology_st.standalone.add_s(Entry((STAGE_USER_DN, {
+ 'objectclass': "top person inetuser".split(),
+ 'sn': STAGE_USER_CN,
+ 'cn': STAGE_USER_CN})))
else:
- topology.standalone.add_s(Entry((OUT_USER_DN, {
- 'objectclass': "top person inetuser".split(),
- 'sn': OUT_USER_CN,
- 'cn': OUT_USER_CN})))
+ topology_st.standalone.add_s(Entry((OUT_USER_DN, {
+ 'objectclass': "top person inetuser".split(),
+ 'sn': OUT_USER_CN,
+ 'cn': OUT_USER_CN})))
-def _find_memberof(topology, user_dn=None, group_dn=None, find_result=True):
- assert(topology)
- assert(user_dn)
- assert(group_dn)
- ent = topology.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof'])
+def _find_memberof(topology_st, user_dn=None, group_dn=None, find_result=True):
+ assert (topology_st)
+ assert (user_dn)
+ assert (group_dn)
+ ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof'])
found = False
if ent.hasAttr('memberof'):
for val in ent.getValues('memberof'):
- topology.standalone.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val))
+ topology_st.standalone.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val))
if val == group_dn:
found = True
break
if find_result:
- assert(found)
+ assert (found)
else:
- assert(not found)
+ assert (not found)
-def _find_member(topology, user_dn=None, group_dn=None, find_result=True):
- assert(topology)
- assert(user_dn)
- assert(group_dn)
- ent = topology.standalone.getEntry(group_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['member'])
+def _find_member(topology_st, user_dn=None, group_dn=None, find_result=True):
+ assert (topology_st)
+ assert (user_dn)
+ assert (group_dn)
+ ent = topology_st.standalone.getEntry(group_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['member'])
found = False
if ent.hasAttr('member'):
for val in ent.getValues('member'):
- topology.standalone.log.info("!!!!!!! %s: member ->%s" % (group_dn, val))
+ topology_st.standalone.log.info("!!!!!!! %s: member ->%s" % (group_dn, val))
if val == user_dn:
found = True
break
if find_result:
- assert(found)
+ assert (found)
else:
- assert(not found)
+ assert (not found)
-def _modrdn_entry(topology=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None):
- assert topology is not None
+def _modrdn_entry(topology_st=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None):
+ assert topology_st is not None
assert entry_dn is not None
assert new_rdn is not None
- topology.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn)
+ topology_st.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn)
try:
if new_superior:
- topology.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old)
+ topology_st.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old)
else:
- topology.standalone.rename_s(entry_dn, new_rdn, delold=del_old)
+ topology_st.standalone.rename_s(entry_dn, new_rdn, delold=del_old)
except ldap.NO_SUCH_ATTRIBUTE:
- topology.standalone.log.info("accepted failure due to 47833: modrdn reports error.. but succeeds")
+ topology_st.standalone.log.info("accepted failure due to 47833: modrdn reports error.. but succeeds")
attempt = 0
if new_superior:
dn = "%s,%s" % (new_rdn, new_superior)
@@ -188,289 +144,327 @@ def _modrdn_entry(topology=None, entry_dn=None, new_rdn=None, del_old=0, new_sup
while attempt < 10:
try:
- ent = topology.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter)
+ ent = topology_st.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter)
break
except ldap.NO_SUCH_OBJECT:
- topology.standalone.log.info("Accept failure due to 47833: unable to find (base) a modrdn entry")
+ topology_st.standalone.log.info("Accept failure due to 47833: unable to find (base) a modrdn entry")
attempt += 1
time.sleep(1)
if attempt == 10:
- ent = topology.standalone.getEntry(base, ldap.SCOPE_SUBTREE, myfilter)
- ent = topology.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter)
+ ent = topology_st.standalone.getEntry(base, ldap.SCOPE_SUBTREE, myfilter)
+ ent = topology_st.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter)
-def _check_memberof(topology=None, action=None, user_dn=None, group_dn=None, find_result=None):
- assert(topology)
- assert(user_dn)
- assert(group_dn)
+def _check_memberof(topology_st=None, action=None, user_dn=None, group_dn=None, find_result=None):
+ assert (topology_st)
+ assert (user_dn)
+ assert (group_dn)
if action == ldap.MOD_ADD:
txt = 'add'
elif action == ldap.MOD_DELETE:
txt = 'delete'
else:
txt = 'replace'
- topology.standalone.log.info('\n%s entry %s' % (txt, user_dn))
- topology.standalone.log.info('to group %s' % group_dn)
+ topology_st.standalone.log.info('\n%s entry %s' % (txt, user_dn))
+ topology_st.standalone.log.info('to group %s' % group_dn)
- topology.standalone.modify_s(group_dn, [(action, 'member', user_dn)])
+ topology_st.standalone.modify_s(group_dn, [(action, 'member', user_dn)])
time.sleep(1)
- _find_memberof(topology, user_dn=user_dn, group_dn=group_dn, find_result=find_result)
-
-
-def test_ticket47829_init(topology):
- topology.standalone.add_s(Entry((SCOPE_IN_DN, {
- 'objectclass': "top nscontainer".split(),
- 'cn': SCOPE_IN_DN})))
- topology.standalone.add_s(Entry((SCOPE_OUT_DN, {
- 'objectclass': "top nscontainer".split(),
- 'cn': SCOPE_OUT_DN})))
- topology.standalone.add_s(Entry((PROVISIONING_DN, {
- 'objectclass': "top nscontainer".split(),
- 'cn': PROVISIONING_CN})))
- topology.standalone.add_s(Entry((ACTIVE_DN, {
- 'objectclass': "top nscontainer".split(),
- 'cn': ACTIVE_CN})))
- topology.standalone.add_s(Entry((STAGE_DN, {
- 'objectclass': "top nscontainer".split(),
- 'cn': STAGE_DN})))
- topology.standalone.add_s(Entry((DELETE_DN, {
- 'objectclass': "top nscontainer".split(),
- 'cn': DELETE_CN})))
+ _find_memberof(topology_st, user_dn=user_dn, group_dn=group_dn, find_result=find_result)
+
+
+def test_ticket47829_init(topology_st):
+ topology_st.standalone.add_s(Entry((SCOPE_IN_DN, {
+ 'objectclass': "top nscontainer".split(),
+ 'cn': SCOPE_IN_DN})))
+ topology_st.standalone.add_s(Entry((SCOPE_OUT_DN, {
+ 'objectclass': "top nscontainer".split(),
+ 'cn': SCOPE_OUT_DN})))
+ topology_st.standalone.add_s(Entry((PROVISIONING_DN, {
+ 'objectclass': "top nscontainer".split(),
+ 'cn': PROVISIONING_CN})))
+ topology_st.standalone.add_s(Entry((ACTIVE_DN, {
+ 'objectclass': "top nscontainer".split(),
+ 'cn': ACTIVE_CN})))
+ topology_st.standalone.add_s(Entry((STAGE_DN, {
+ 'objectclass': "top nscontainer".split(),
+ 'cn': STAGE_DN})))
+ topology_st.standalone.add_s(Entry((DELETE_DN, {
+ 'objectclass': "top nscontainer".split(),
+ 'cn': DELETE_CN})))
# add groups
- topology.standalone.add_s(Entry((ACTIVE_GROUP_DN, {
- 'objectclass': "top groupOfNames inetuser".split(),
- 'cn': ACTIVE_GROUP_CN})))
- topology.standalone.add_s(Entry((STAGE_GROUP_DN, {
- 'objectclass': "top groupOfNames inetuser".split(),
- 'cn': STAGE_GROUP_CN})))
- topology.standalone.add_s(Entry((OUT_GROUP_DN, {
- 'objectclass': "top groupOfNames inetuser".split(),
- 'cn': OUT_GROUP_CN})))
- topology.standalone.add_s(Entry((INDIRECT_ACTIVE_GROUP_DN, {
- 'objectclass': "top groupOfNames".split(),
- 'cn': INDIRECT_ACTIVE_GROUP_CN})))
+ topology_st.standalone.add_s(Entry((ACTIVE_GROUP_DN, {
+ 'objectclass': "top groupOfNames inetuser".split(),
+ 'cn': ACTIVE_GROUP_CN})))
+ topology_st.standalone.add_s(Entry((STAGE_GROUP_DN, {
+ 'objectclass': "top groupOfNames inetuser".split(),
+ 'cn': STAGE_GROUP_CN})))
+ topology_st.standalone.add_s(Entry((OUT_GROUP_DN, {
+ 'objectclass': "top groupOfNames inetuser".split(),
+ 'cn': OUT_GROUP_CN})))
+ topology_st.standalone.add_s(Entry((INDIRECT_ACTIVE_GROUP_DN, {
+ 'objectclass': "top groupOfNames".split(),
+ 'cn': INDIRECT_ACTIVE_GROUP_CN})))
# add users
- _add_user(topology, 'active')
- _add_user(topology, 'stage')
- _add_user(topology, 'out')
+ _add_user(topology_st, 'active')
+ _add_user(topology_st, 'stage')
+ _add_user(topology_st, 'out')
# enable memberof of with scope IN except provisioning
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
dn = "cn=%s,%s" % (PLUGIN_MEMBER_OF, DN_PLUGIN)
- topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScope', SCOPE_IN_DN)])
- topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScopeExcludeSubtree', PROVISIONING_DN)])
+ topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScope', SCOPE_IN_DN)])
+ topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScopeExcludeSubtree', PROVISIONING_DN)])
# enable RI with scope IN except provisioning
- topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
+ topology_st.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
dn = "cn=%s,%s" % (PLUGIN_REFER_INTEGRITY, DN_PLUGIN)
- topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-pluginentryscope', SCOPE_IN_DN)])
- topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-plugincontainerscope', SCOPE_IN_DN)])
- topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-pluginExcludeEntryScope', PROVISIONING_DN)])
+ topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-pluginentryscope', SCOPE_IN_DN)])
+ topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-plugincontainerscope', SCOPE_IN_DN)])
+ topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-pluginExcludeEntryScope', PROVISIONING_DN)])
- topology.standalone.restart(timeout=10)
+ topology_st.standalone.restart(timeout=10)
-def test_ticket47829_mod_active_user_1(topology):
- _header(topology, 'MOD: add an active user to an active group')
+def test_ticket47829_mod_active_user_1(topology_st):
+ _header(topology_st, 'MOD: add an active user to an active group')
# add active user to active group
- _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
- _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
+ _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+ find_result=True)
+ _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
# remove active user to active group
- _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
+ _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
-def test_ticket47829_mod_active_user_2(topology):
- _header(topology, 'MOD: add an Active user to a Stage group')
+def test_ticket47829_mod_active_user_2(topology_st):
+ _header(topology_st, 'MOD: add an Active user to a Stage group')
# add active user to stage group
- _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False)
- _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True)
+ _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN,
+ find_result=False)
+ _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True)
# remove active user to stage group
- _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False)
+ _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN,
+ find_result=False)
-def test_ticket47829_mod_active_user_3(topology):
- _header(topology, 'MOD: add an Active user to a out of scope group')
+def test_ticket47829_mod_active_user_3(topology_st):
+ _header(topology_st, 'MOD: add an Active user to a out of scope group')
# add active user to out of scope group
- _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False)
- _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=True)
+ _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False)
+ _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=True)
# remove active user to out of scope group
- _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False)
+ _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN,
+ find_result=False)
-def test_ticket47829_mod_stage_user_1(topology):
- _header(topology, 'MOD: add an Stage user to a Active group')
+def test_ticket47829_mod_stage_user_1(topology_st):
+ _header(topology_st, 'MOD: add an Stage user to a Active group')
# add stage user to active group
- _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
- _find_member(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
+ _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
+ _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
# remove stage user to active group
- _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
+ _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
-def test_ticket47829_mod_stage_user_2(topology):
- _header(topology, 'MOD: add an Stage user to a Stage group')
+def test_ticket47829_mod_stage_user_2(topology_st):
+ _header(topology_st, 'MOD: add an Stage user to a Stage group')
# add stage user to stage group
- _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False)
- _find_member(topology, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True)
+ _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False)
+ _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True)
# remove stage user to stage group
- _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False)
+ _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN,
+ find_result=False)
-def test_ticket47829_mod_stage_user_3(topology):
- _header(topology, 'MOD: add an Stage user to a out of scope group')
+def test_ticket47829_mod_stage_user_3(topology_st):
+ _header(topology_st, 'MOD: add an Stage user to a out of scope group')
# add stage user to an out of scope group
- _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False)
- _find_member(topology, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=True)
+ _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False)
+ _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=True)
# remove stage user to out of scope group
- _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False)
+ _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN,
+ find_result=False)
-def test_ticket47829_mod_out_user_1(topology):
- _header(topology, 'MOD: add an out of scope user to an active group')
+def test_ticket47829_mod_out_user_1(topology_st):
+ _header(topology_st, 'MOD: add an out of scope user to an active group')
# add out of scope user to active group
- _check_memberof(topology, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
- _find_member(topology, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
+ _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
+ _find_member(topology_st, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
# remove out of scope user to active group
- _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
+ _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
-def test_ticket47829_mod_out_user_2(topology):
- _header(topology, 'MOD: add an out of scope user to a Stage group')
+def test_ticket47829_mod_out_user_2(topology_st):
+ _header(topology_st, 'MOD: add an out of scope user to a Stage group')
# add out of scope user to stage group
- _check_memberof(topology, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False)
- _find_member(topology, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True)
+ _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False)
+ _find_member(topology_st, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True)
# remove out of scope user to stage group
- _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False)
+ _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN,
+ find_result=False)
+
-def test_ticket47829_mod_out_user_3(topology):
- _header(topology, 'MOD: add an out of scope user to an out of scope group')
+def test_ticket47829_mod_out_user_3(topology_st):
+ _header(topology_st, 'MOD: add an out of scope user to an out of scope group')
# add out of scope user to stage group
- _check_memberof(topology, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=False)
- _find_member(topology, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=True)
+ _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=False)
+ _find_member(topology_st, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=True)
# remove out of scope user to stage group
- _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=False)
+ _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=False)
-def test_ticket47829_mod_active_user_modrdn_active_user_1(topology):
- _header(topology, 'add an Active user to a Active group. Then move Active user to Active')
+def test_ticket47829_mod_active_user_modrdn_active_user_1(topology_st):
+ _header(topology_st, 'add an Active user to a Active group. Then move Active user to Active')
# add Active user to active group
- _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
- _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
+ _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+ find_result=True)
+ _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
# move the Active entry to active, expect 'member' and 'memberof'
- _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=x%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN)
- _find_memberof(topology, user_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True)
- _find_member(topology, user_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True)
+ _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=x%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN)
+ _find_memberof(topology_st, user_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+ find_result=True)
+ _find_member(topology_st, user_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+ find_result=True)
# move the Active entry to active, expect 'member' and no 'memberof'
- _modrdn_entry(topology, entry_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN)
- _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True)
- _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True)
+ _modrdn_entry(topology_st, entry_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN,
+ new_superior=ACTIVE_DN)
+ _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+ find_result=True)
+ _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+ find_result=True)
# remove active user to active group
- _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
+ _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
-def test_ticket47829_mod_active_user_modrdn_stage_user_1(topology):
- _header(topology, 'add an Active user to a Active group. Then move Active user to Stage')
+def test_ticket47829_mod_active_user_modrdn_stage_user_1(topology_st):
+ _header(topology_st, 'add an Active user to a Active group. Then move Active user to Stage')
# add Active user to active group
- _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
- _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
+ _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+ find_result=True)
+ _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
# move the Active entry to stage, expect no 'member' and 'memberof'
- _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=STAGE_DN)
- _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
- _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
+ _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=STAGE_DN)
+ _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
+ _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
# move the Active entry to Stage, expect 'member' and no 'memberof'
- _modrdn_entry(topology, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN)
- _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
- _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
+ _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN,
+ new_superior=ACTIVE_DN)
+ _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
+ _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
-def test_ticket47829_mod_active_user_modrdn_out_user_1(topology):
- _header(topology, 'add an Active user to a Active group. Then move Active user to out of scope')
+def test_ticket47829_mod_active_user_modrdn_out_user_1(topology_st):
+ _header(topology_st, 'add an Active user to a Active group. Then move Active user to out of scope')
# add Active user to active group
- _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
- _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
+ _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+ find_result=True)
+ _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
# move the Active entry to out of scope, expect no 'member' and no 'memberof'
- _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=OUT_GROUP_DN)
- _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
- _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
+ _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=OUT_GROUP_DN)
+ _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
+ _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
# move the Active entry to out of scope, expect no 'member' and no 'memberof'
- _modrdn_entry(topology, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN)
- _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
- _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
+ _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), new_rdn="cn=%s" % ACTIVE_USER_CN,
+ new_superior=ACTIVE_DN)
+ _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
+ _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
-def test_ticket47829_mod_modrdn_1(topology):
- _header(topology, 'add an Stage user to a Active group. Then move Stage user to Active')
+def test_ticket47829_mod_modrdn_1(topology_st):
+ _header(topology_st, 'add an Stage user to a Active group. Then move Stage user to Active')
# add Stage user to active group
- _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
- _find_member(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
+ _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
+ _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
# move the Stage entry to active, expect 'member' and 'memberof'
- _modrdn_entry(topology, entry_dn=STAGE_USER_DN, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=ACTIVE_DN)
- _find_memberof(topology, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True)
- _find_member(topology, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True)
+ _modrdn_entry(topology_st, entry_dn=STAGE_USER_DN, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=ACTIVE_DN)
+ _find_memberof(topology_st, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+ find_result=True)
+ _find_member(topology_st, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+ find_result=True)
# move the Active entry to Stage, expect no 'member' and no 'memberof'
- _modrdn_entry(topology, entry_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), new_rdn="cn=%s" % STAGE_USER_CN, new_superior=STAGE_DN)
- _find_memberof(topology, user_dn="cn=%s,%s" % (STAGE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
- _find_member(topology, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
+ _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), new_rdn="cn=%s" % STAGE_USER_CN,
+ new_superior=STAGE_DN)
+ _find_memberof(topology_st, user_dn="cn=%s,%s" % (STAGE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
+ _find_member(topology_st, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
-def test_ticket47829_mod_stage_user_modrdn_active_user_1(topology):
- _header(topology, 'add an Stage user to a Active group. Then move Stage user to Active')
+def test_ticket47829_mod_stage_user_modrdn_active_user_1(topology_st):
+ _header(topology_st, 'add an Stage user to a Active group. Then move Stage user to Active')
- stage_user_dn = STAGE_USER_DN
+ stage_user_dn = STAGE_USER_DN
stage_user_rdn = "cn=%s" % STAGE_USER_CN
active_user_dn = "cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN)
# add Stage user to active group
- _check_memberof(topology, action=ldap.MOD_ADD, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
- _find_member(topology, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
+ _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
+ _find_member(topology_st, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
# move the Stage entry to Actve, expect 'member' and 'memberof'
- _modrdn_entry(topology, entry_dn=stage_user_dn, new_rdn=stage_user_rdn, new_superior=ACTIVE_DN)
- _find_memberof(topology, user_dn=active_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
- _find_member(topology, user_dn=active_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
+ _modrdn_entry(topology_st, entry_dn=stage_user_dn, new_rdn=stage_user_rdn, new_superior=ACTIVE_DN)
+ _find_memberof(topology_st, user_dn=active_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
+ _find_member(topology_st, user_dn=active_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
# move the Active entry to Stage, expect no 'member' and no 'memberof'
- _modrdn_entry(topology, entry_dn=active_user_dn, new_rdn=stage_user_rdn, new_superior=STAGE_DN)
- _find_memberof(topology, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
- _find_member(topology, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
+ _modrdn_entry(topology_st, entry_dn=active_user_dn, new_rdn=stage_user_rdn, new_superior=STAGE_DN)
+ _find_memberof(topology_st, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
+ _find_member(topology_st, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
-def test_ticket47829_mod_stage_user_modrdn_stage_user_1(topology):
- _header(topology, 'add an Stage user to a Active group. Then move Stage user to Stage')
+def test_ticket47829_mod_stage_user_modrdn_stage_user_1(topology_st):
+ _header(topology_st, 'add an Stage user to a Active group. Then move Stage user to Stage')
- _header(topology, 'Return because it requires a fix for 47833')
+ _header(topology_st, 'Return because it requires a fix for 47833')
return
old_stage_user_dn = STAGE_USER_DN
@@ -479,126 +473,149 @@ def test_ticket47829_mod_stage_user_modrdn_stage_user_1(topology):
new_stage_user_dn = "%s,%s" % (new_stage_user_rdn, STAGE_DN)
# add Stage user to active group
- _check_memberof(topology, action=ldap.MOD_ADD, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
- _find_member(topology, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
+ _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
+ _find_member(topology_st, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
# move the Stage entry to Stage, expect no 'member' and 'memberof'
- _modrdn_entry(topology, entry_dn=old_stage_user_dn, new_rdn=new_stage_user_rdn, new_superior=STAGE_DN)
- _find_memberof(topology, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
- _find_member(topology, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
+ _modrdn_entry(topology_st, entry_dn=old_stage_user_dn, new_rdn=new_stage_user_rdn, new_superior=STAGE_DN)
+ _find_memberof(topology_st, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
+ _find_member(topology_st, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
# move the Stage entry to Stage, expect no 'member' and no 'memberof'
- _modrdn_entry(topology, entry_dn=new_stage_user_dn, new_rdn=old_stage_user_rdn, new_superior=STAGE_DN)
- _find_memberof(topology, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
- _find_member(topology, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
+ _modrdn_entry(topology_st, entry_dn=new_stage_user_dn, new_rdn=old_stage_user_rdn, new_superior=STAGE_DN)
+ _find_memberof(topology_st, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
+ _find_member(topology_st, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
-def test_ticket47829_indirect_active_group_1(topology):
- _header(topology, 'add an Active group (G1) to an active group (G0). Then add active user to G1')
+def test_ticket47829_indirect_active_group_1(topology_st):
+ _header(topology_st, 'add an Active group (G1) to an active group (G0). Then add active user to G1')
- topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)])
+ topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)])
# add an active user to G1. Checks that user is memberof G1
- _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
- _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True)
+ _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+ find_result=True)
+ _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True)
# remove G1 from G0
- topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ACTIVE_GROUP_DN)])
- _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
- _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
+ topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ACTIVE_GROUP_DN)])
+ _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+ _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
# remove active user from G1
- _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
+ _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
-def test_ticket47829_indirect_active_group_2(topology):
- _header(topology, 'add an Active group (G1) to an active group (G0). Then add active user to G1. Then move active user to stage')
+def test_ticket47829_indirect_active_group_2(topology_st):
+ _header(topology_st,
+ 'add an Active group (G1) to an active group (G0). Then add active user to G1. Then move active user to stage')
- topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)])
+ topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)])
# add an active user to G1. Checks that user is memberof G1
- _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
- _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True)
+ _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+ find_result=True)
+ _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True)
# remove G1 from G0
- topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ACTIVE_GROUP_DN)])
- _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
- _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
+ topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ACTIVE_GROUP_DN)])
+ _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+ _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
# move active user to stage
- _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=STAGE_DN)
+ _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=STAGE_DN)
# stage user is no long member of active group and indirect active group
- _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
- _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+ _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
+ _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN,
+ find_result=False)
# active group and indirect active group do no longer have stage user as member
- _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
- _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+ _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
+ _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN,
+ find_result=False)
# return back the entry to active. It remains not member
- _modrdn_entry(topology, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN)
- _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
- _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+ _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN,
+ new_superior=ACTIVE_DN)
+ _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
+ _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN,
+ find_result=False)
-def test_ticket47829_indirect_active_group_3(topology):
- _header(topology, 'add an Active group (G1) to an active group (G0). Then add active user to G1. Then move active user to out of the scope')
+def test_ticket47829_indirect_active_group_3(topology_st):
+ _header(topology_st,
+ 'add an Active group (G1) to an active group (G0). Then add active user to G1. Then move active user to out of the scope')
- topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)])
+ topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)])
# add an active user to G1. Checks that user is memberof G1
- _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
- _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True)
+ _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+ find_result=True)
+ _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True)
# remove G1 from G0
- topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ACTIVE_GROUP_DN)])
- _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
- _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
+ topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ACTIVE_GROUP_DN)])
+ _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+ _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
# move active user to out of the scope
- _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=SCOPE_OUT_DN)
+ _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=SCOPE_OUT_DN)
# stage user is no long member of active group and indirect active group
- _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
- _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+ _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
+ _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN,
+ find_result=False)
# active group and indirect active group do no longer have stage user as member
- _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
- _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+ _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
+ _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN,
+ find_result=False)
# return back the entry to active. It remains not member
- _modrdn_entry(topology, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN)
- _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
- _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+ _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), new_rdn="cn=%s" % ACTIVE_USER_CN,
+ new_superior=ACTIVE_DN)
+ _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
+ _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN,
+ find_result=False)
-def test_ticket47829_indirect_active_group_4(topology):
- _header(topology, 'add an Active group (G1) to an active group (G0). Then add stage user to G1. Then move user to active. Then move it back')
+def test_ticket47829_indirect_active_group_4(topology_st):
+ _header(topology_st,
+ 'add an Active group (G1) to an active group (G0). Then add stage user to G1. Then move user to active. Then move it back')
- topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)])
+ topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)])
# add stage user to active group
- _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
- _find_member(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
- _find_member(topology, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
- _find_memberof(topology, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
- _find_memberof(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
+ _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
+ _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
+ _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+ _find_memberof(topology_st, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+ _find_memberof(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
# move stage user to active
- _modrdn_entry(topology, entry_dn=STAGE_USER_DN, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=ACTIVE_DN)
+ _modrdn_entry(topology_st, entry_dn=STAGE_USER_DN, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=ACTIVE_DN)
renamed_stage_dn = "cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN)
- _find_member(topology, user_dn=renamed_stage_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
- _find_member(topology, user_dn=renamed_stage_dn, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
- _find_memberof(topology, user_dn=renamed_stage_dn, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True)
- _find_memberof(topology, user_dn=renamed_stage_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
+ _find_member(topology_st, user_dn=renamed_stage_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
+ _find_member(topology_st, user_dn=renamed_stage_dn, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+ _find_memberof(topology_st, user_dn=renamed_stage_dn, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True)
+ _find_memberof(topology_st, user_dn=renamed_stage_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
# move back active to stage
- _modrdn_entry(topology, entry_dn=renamed_stage_dn, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=STAGE_DN)
- _find_member(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
- _find_member(topology, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
- _find_memberof(topology, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
- _find_memberof(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
+ _modrdn_entry(topology_st, entry_dn=renamed_stage_dn, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=STAGE_DN)
+ _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
+ _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+ _find_memberof(topology_st, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+ _find_memberof(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
if __name__ == '__main__':
diff --git a/dirsrvtests/tests/tickets/ticket47833_test.py b/dirsrvtests/tests/tickets/ticket47833_test.py
index 419f6eb..e2b38cd 100644
--- a/dirsrvtests/tests/tickets/ticket47833_test.py
+++ b/dirsrvtests/tests/tickets/ticket47833_test.py
@@ -6,33 +6,25 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
-SCOPE_IN_CN = 'in'
+SCOPE_IN_CN = 'in'
SCOPE_OUT_CN = 'out'
-SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX)
+SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX)
SCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX)
PROVISIONING_CN = "provisioning"
PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SCOPE_IN_DN)
ACTIVE_CN = "accounts"
-STAGE_CN = "staged users"
+STAGE_CN = "staged users"
DELETE_CN = "deleted users"
ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SCOPE_IN_DN)
-STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN)
-DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN)
+STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN)
+DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN)
STAGE_USER_CN = "stage guy"
STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN)
@@ -52,208 +44,167 @@ ACTIVE_GROUP_DN = "cn=%s,%s" % (ACTIVE_GROUP_CN, ACTIVE_DN)
OUT_GROUP_CN = "out group"
OUT_GROUP_DN = "cn=%s,%s" % (OUT_GROUP_CN, SCOPE_OUT_DN)
-
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
+def _header(topology_st, label):
+ topology_st.standalone.log.info("\n\n###############################################")
+ topology_st.standalone.log.info("#######")
+ topology_st.standalone.log.info("####### %s" % label)
+ topology_st.standalone.log.info("#######")
+ topology_st.standalone.log.info("###############################################")
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
- return TopologyStandalone(standalone)
-
-
-def _header(topology, label):
- topology.standalone.log.info("\n\n###############################################")
- topology.standalone.log.info("#######")
- topology.standalone.log.info("####### %s" % label)
- topology.standalone.log.info("#######")
- topology.standalone.log.info("###############################################")
-
-def _add_user(topology, type='active'):
+def _add_user(topology_st, type='active'):
if type == 'active':
- topology.standalone.add_s(Entry((ACTIVE_USER_DN, {
- 'objectclass': "top person inetuser".split(),
- 'sn': ACTIVE_USER_CN,
- 'cn': ACTIVE_USER_CN})))
+ topology_st.standalone.add_s(Entry((ACTIVE_USER_DN, {
+ 'objectclass': "top person inetuser".split(),
+ 'sn': ACTIVE_USER_CN,
+ 'cn': ACTIVE_USER_CN})))
elif type == 'stage':
- topology.standalone.add_s(Entry((STAGE_USER_DN, {
- 'objectclass': "top person inetuser".split(),
- 'sn': STAGE_USER_CN,
- 'cn': STAGE_USER_CN})))
+ topology_st.standalone.add_s(Entry((STAGE_USER_DN, {
+ 'objectclass': "top person inetuser".split(),
+ 'sn': STAGE_USER_CN,
+ 'cn': STAGE_USER_CN})))
else:
- topology.standalone.add_s(Entry((OUT_USER_DN, {
- 'objectclass': "top person inetuser".split(),
- 'sn': OUT_USER_CN,
- 'cn': OUT_USER_CN})))
-
-def _find_memberof(topology, user_dn=None, group_dn=None, find_result=True):
- assert(topology)
- assert(user_dn)
- assert(group_dn)
- ent = topology.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof'])
+ topology_st.standalone.add_s(Entry((OUT_USER_DN, {
+ 'objectclass': "top person inetuser".split(),
+ 'sn': OUT_USER_CN,
+ 'cn': OUT_USER_CN})))
+
+
+def _find_memberof(topology_st, user_dn=None, group_dn=None, find_result=True):
+ assert (topology_st)
+ assert (user_dn)
+ assert (group_dn)
+ ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof'])
found = False
if ent.hasAttr('memberof'):
for val in ent.getValues('memberof'):
- topology.standalone.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val))
+ topology_st.standalone.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val))
if val == group_dn:
found = True
break
if find_result:
- assert(found)
+ assert (found)
else:
- assert(not found)
+ assert (not found)
-def _find_member(topology, user_dn=None, group_dn=None, find_result=True):
- assert(topology)
- assert(user_dn)
- assert(group_dn)
- ent = topology.standalone.getEntry(group_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['member'])
+
+def _find_member(topology_st, user_dn=None, group_dn=None, find_result=True):
+ assert (topology_st)
+ assert (user_dn)
+ assert (group_dn)
+ ent = topology_st.standalone.getEntry(group_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['member'])
found = False
if ent.hasAttr('member'):
for val in ent.getValues('member'):
- topology.standalone.log.info("!!!!!!! %s: member ->%s" % (group_dn, val))
+ topology_st.standalone.log.info("!!!!!!! %s: member ->%s" % (group_dn, val))
if val == user_dn:
found = True
break
if find_result:
- assert(found)
+ assert (found)
else:
- assert(not found)
+ assert (not found)
+
-def _modrdn_entry(topology=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None):
- assert topology != None
+def _modrdn_entry(topology_st=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None):
+ assert topology_st != None
assert entry_dn != None
assert new_rdn != None
-
- topology.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn)
+ topology_st.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn)
if new_superior:
- topology.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old)
+ topology_st.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old)
else:
- topology.standalone.rename_s(entry_dn, new_rdn, delold=del_old)
+ topology_st.standalone.rename_s(entry_dn, new_rdn, delold=del_old)
-def _check_memberof(topology=None, action=None, user_dn=None, group_dn=None, find_result=None):
- assert(topology)
- assert(user_dn)
- assert(group_dn)
+
+def _check_memberof(topology_st=None, action=None, user_dn=None, group_dn=None, find_result=None):
+ assert (topology_st)
+ assert (user_dn)
+ assert (group_dn)
if action == ldap.MOD_ADD:
txt = 'add'
elif action == ldap.MOD_DELETE:
txt = 'delete'
else:
txt = 'replace'
- topology.standalone.log.info('\n%s entry %s' % (txt, user_dn))
- topology.standalone.log.info('to group %s' % group_dn)
+ topology_st.standalone.log.info('\n%s entry %s' % (txt, user_dn))
+ topology_st.standalone.log.info('to group %s' % group_dn)
- topology.standalone.modify_s(group_dn, [(action, 'member', user_dn)])
+ topology_st.standalone.modify_s(group_dn, [(action, 'member', user_dn)])
time.sleep(1)
- _find_memberof(topology, user_dn=user_dn, group_dn=group_dn, find_result=find_result)
-
-
-
-
-def test_ticket47829_init(topology):
- topology.standalone.add_s(Entry((SCOPE_IN_DN, {
- 'objectclass': "top nscontainer".split(),
- 'cn': SCOPE_IN_DN})))
- topology.standalone.add_s(Entry((SCOPE_OUT_DN, {
- 'objectclass': "top nscontainer".split(),
- 'cn': SCOPE_OUT_DN})))
- topology.standalone.add_s(Entry((PROVISIONING_DN, {
- 'objectclass': "top nscontainer".split(),
- 'cn': PROVISIONING_CN})))
- topology.standalone.add_s(Entry((ACTIVE_DN, {
- 'objectclass': "top nscontainer".split(),
- 'cn': ACTIVE_CN})))
- topology.standalone.add_s(Entry((STAGE_DN, {
- 'objectclass': "top nscontainer".split(),
- 'cn': STAGE_DN})))
- topology.standalone.add_s(Entry((DELETE_DN, {
- 'objectclass': "top nscontainer".split(),
- 'cn': DELETE_CN})))
+ _find_memberof(topology_st, user_dn=user_dn, group_dn=group_dn, find_result=find_result)
+
+
+def test_ticket47829_init(topology_st):
+ topology_st.standalone.add_s(Entry((SCOPE_IN_DN, {
+ 'objectclass': "top nscontainer".split(),
+ 'cn': SCOPE_IN_DN})))
+ topology_st.standalone.add_s(Entry((SCOPE_OUT_DN, {
+ 'objectclass': "top nscontainer".split(),
+ 'cn': SCOPE_OUT_DN})))
+ topology_st.standalone.add_s(Entry((PROVISIONING_DN, {
+ 'objectclass': "top nscontainer".split(),
+ 'cn': PROVISIONING_CN})))
+ topology_st.standalone.add_s(Entry((ACTIVE_DN, {
+ 'objectclass': "top nscontainer".split(),
+ 'cn': ACTIVE_CN})))
+ topology_st.standalone.add_s(Entry((STAGE_DN, {
+ 'objectclass': "top nscontainer".split(),
+ 'cn': STAGE_DN})))
+ topology_st.standalone.add_s(Entry((DELETE_DN, {
+ 'objectclass': "top nscontainer".split(),
+ 'cn': DELETE_CN})))
# add groups
- topology.standalone.add_s(Entry((ACTIVE_GROUP_DN, {
- 'objectclass': "top groupOfNames".split(),
- 'cn': ACTIVE_GROUP_CN})))
- topology.standalone.add_s(Entry((STAGE_GROUP_DN, {
- 'objectclass': "top groupOfNames".split(),
- 'cn': STAGE_GROUP_CN})))
- topology.standalone.add_s(Entry((OUT_GROUP_DN, {
- 'objectclass': "top groupOfNames".split(),
- 'cn': OUT_GROUP_CN})))
+ topology_st.standalone.add_s(Entry((ACTIVE_GROUP_DN, {
+ 'objectclass': "top groupOfNames".split(),
+ 'cn': ACTIVE_GROUP_CN})))
+ topology_st.standalone.add_s(Entry((STAGE_GROUP_DN, {
+ 'objectclass': "top groupOfNames".split(),
+ 'cn': STAGE_GROUP_CN})))
+ topology_st.standalone.add_s(Entry((OUT_GROUP_DN, {
+ 'objectclass': "top groupOfNames".split(),
+ 'cn': OUT_GROUP_CN})))
# add users
- _add_user(topology, 'active')
- _add_user(topology, 'stage')
- _add_user(topology, 'out')
-
-
+ _add_user(topology_st, 'active')
+ _add_user(topology_st, 'stage')
+ _add_user(topology_st, 'out')
# enable memberof of with scope account
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
dn = "cn=%s,%s" % (PLUGIN_MEMBER_OF, DN_PLUGIN)
- topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScope', ACTIVE_DN)])
-
-
-
- topology.standalone.restart(timeout=10)
-
+ topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScope', ACTIVE_DN)])
+ topology_st.standalone.restart(timeout=10)
-def test_ticket47829_mod_stage_user_modrdn_stage_user_1(topology):
- _header(topology, 'add an Stage user to a Active group. Then move Stage user to Stage')
+def test_ticket47829_mod_stage_user_modrdn_stage_user_1(topology_st):
+ _header(topology_st, 'add an Stage user to a Active group. Then move Stage user to Stage')
- old_stage_user_dn = STAGE_USER_DN
+ old_stage_user_dn = STAGE_USER_DN
old_stage_user_rdn = "cn=%s" % STAGE_USER_CN
new_stage_user_rdn = "cn=x%s" % STAGE_USER_CN
new_stage_user_dn = "%s,%s" % (new_stage_user_rdn, STAGE_DN)
# add Stage user to active group
- _check_memberof(topology, action=ldap.MOD_ADD, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
- _find_member (topology, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
+ _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN,
+ find_result=False)
+ _find_member(topology_st, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
# move the Stage entry to Stage, expect no 'member' and 'memberof'
- _modrdn_entry (topology, entry_dn=old_stage_user_dn, new_rdn=new_stage_user_rdn, new_superior=STAGE_DN)
- _find_memberof(topology, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
- _find_member (topology, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
+ _modrdn_entry(topology_st, entry_dn=old_stage_user_dn, new_rdn=new_stage_user_rdn, new_superior=STAGE_DN)
+ _find_memberof(topology_st, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
+ _find_member(topology_st, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
if __name__ == '__main__':
diff --git a/dirsrvtests/tests/tickets/ticket47838_test.py b/dirsrvtests/tests/tickets/ticket47838_test.py
index 9023878..1f6f8ce 100644
--- a/dirsrvtests/tests/tickets/ticket47838_test.py
+++ b/dirsrvtests/tests/tickets/ticket47838_test.py
@@ -6,18 +6,14 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
+import logging
import time
+
import ldap
-import logging
import pytest
-import shutil
-from lib389 import DirSrv, Entry, tools
-from lib389 import DirSrvTools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
@@ -38,69 +34,29 @@ NSS320 = '3.20.0'
NSS321 = '3.21.0' # RHEL6
NSS323 = '3.23.0' # F22
NSS325 = '3.25.0' # F23/F24
+NSS327 = '3.27.0' # F25
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
+def _header(topology_st, label):
+ topology_st.standalone.log.info("\n\n###############################################")
+ topology_st.standalone.log.info("#######")
+ topology_st.standalone.log.info("####### %s" % label)
+ topology_st.standalone.log.info("#######")
+ topology_st.standalone.log.info("###############################################")
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def _header(topology, label):
- topology.standalone.log.info("\n\n###############################################")
- topology.standalone.log.info("#######")
- topology.standalone.log.info("####### %s" % label)
- topology.standalone.log.info("#######")
- topology.standalone.log.info("###############################################")
-
-
-def test_47838_init(topology):
+def test_47838_init(topology_st):
"""
Generate self signed cert and import it to the DS cert db.
Enable SSL
"""
- _header(topology, 'Testing Ticket 47838 - harden the list of ciphers available by default')
+ _header(topology_st, 'Testing Ticket 47838 - harden the list of ciphers available by default')
onss_version = os.popen("rpm -q nss | awk -F'-' '{print $2}'", "r")
global nss_version
nss_version = onss_version.readline()
- conf_dir = topology.standalone.confdir
+ conf_dir = topology_st.standalone.confdir
log.info("\n######################### Checking existing certs ######################\n")
os.system('certutil -L -d %s -n "CA certificate"' % conf_dir)
@@ -131,8 +87,9 @@ def test_47838_init(topology):
os.system('certutil -G -d %s -z %s -f %s' % (conf_dir, noisefile, pwdfile))
log.info("\n######################### Creating self-signed CA certificate ######################\n")
- os.system('( echo y ; echo ; echo y ) | certutil -S -n "CA certificate" -s "cn=CAcert" -x -t "CT,," -m 1000 -v 120 -d %s -z %s -f %s -2' %
- (conf_dir, noisefile, pwdfile))
+ os.system(
+ '( echo y ; echo ; echo y ) | certutil -S -n "CA certificate" -s "cn=CAcert" -x -t "CT,," -m 1000 -v 120 -d %s -z %s -f %s -2' %
+ (conf_dir, noisefile, pwdfile))
log.info("\n######################### Exporting the CA certificate to cacert.asc ######################\n")
cafile = '%s/cacert.asc' % conf_dir
@@ -148,7 +105,9 @@ def test_47838_init(topology):
log.info("\n######################### Generate the server certificate ######################\n")
ohostname = os.popen('hostname --fqdn', "r")
myhostname = ohostname.readline()
- os.system('certutil -S -n "%s" -s "cn=%s,ou=389 Directory Server" -c "CA certificate" -t "u,u,u" -m 1001 -v 120 -d %s -z %s -f %s' % (SERVERCERT, myhostname.rstrip(), conf_dir, noisefile, pwdfile))
+ os.system(
+ 'certutil -S -n "%s" -s "cn=%s,ou=389 Directory Server" -c "CA certificate" -t "u,u,u" -m 1001 -v 120 -d %s -z %s -f %s' % (
+ SERVERCERT, myhostname.rstrip(), conf_dir, noisefile, pwdfile))
log.info("\n######################### create the pin file ######################\n")
pinfile = '%s/pin.txt' % (conf_dir)
@@ -159,106 +118,106 @@ def test_47838_init(topology):
time.sleep(1)
log.info("\n######################### enable SSL in the directory server with all ciphers ######################\n")
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', 'off'),
- (ldap.MOD_REPLACE, 'nsTLS1', 'on'),
- (ldap.MOD_REPLACE, 'nsSSLClientAuth', 'allowed'),
- (ldap.MOD_REPLACE, 'allowWeakCipher', 'on'),
- (ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', 'off'),
+ (ldap.MOD_REPLACE, 'nsTLS1', 'on'),
+ (ldap.MOD_REPLACE, 'nsSSLClientAuth', 'allowed'),
+ (ldap.MOD_REPLACE, 'allowWeakCipher', 'on'),
+ (ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all')])
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-security', 'on'),
- (ldap.MOD_REPLACE, 'nsslapd-ssl-check-hostname', 'off'),
- (ldap.MOD_REPLACE, 'nsslapd-secureport', MY_SECURE_PORT)])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-security', 'on'),
+ (ldap.MOD_REPLACE, 'nsslapd-ssl-check-hostname', 'off'),
+ (ldap.MOD_REPLACE, 'nsslapd-secureport', MY_SECURE_PORT)])
- topology.standalone.add_s(Entry((RSA_DN, {'objectclass': "top nsEncryptionModule".split(),
- 'cn': RSA,
- 'nsSSLPersonalitySSL': SERVERCERT,
- 'nsSSLToken': 'internal (software)',
- 'nsSSLActivation': 'on'})))
+ topology_st.standalone.add_s(Entry((RSA_DN, {'objectclass': "top nsEncryptionModule".split(),
+ 'cn': RSA,
+ 'nsSSLPersonalitySSL': SERVERCERT,
+ 'nsSSLToken': 'internal (software)',
+ 'nsSSLActivation': 'on'})))
-def comp_nsSSLEnableCipherCount(topology, ecount):
+def comp_nsSSLEnableCipherCount(topology_st, ecount):
"""
Check nsSSLEnabledCipher count with ecount
"""
log.info("Checking nsSSLEnabledCiphers...")
- msgid = topology.standalone.search_ext(ENCRYPTION_DN, ldap.SCOPE_BASE, 'cn=*', ['nsSSLEnabledCiphers'])
+ msgid = topology_st.standalone.search_ext(ENCRYPTION_DN, ldap.SCOPE_BASE, 'cn=*', ['nsSSLEnabledCiphers'])
enabledciphercnt = 0
- rtype, rdata, rmsgid = topology.standalone.result2(msgid)
- topology.standalone.log.info("%d results" % len(rdata))
+ rtype, rdata, rmsgid = topology_st.standalone.result2(msgid)
+ topology_st.standalone.log.info("%d results" % len(rdata))
- topology.standalone.log.info("Results:")
+ topology_st.standalone.log.info("Results:")
for dn, attrs in rdata:
- topology.standalone.log.info("dn: %s" % dn)
+ topology_st.standalone.log.info("dn: %s" % dn)
if 'nsSSLEnabledCiphers' in attrs:
enabledciphercnt = len(attrs['nsSSLEnabledCiphers'])
- topology.standalone.log.info("enabledCipherCount: %d" % enabledciphercnt)
+ topology_st.standalone.log.info("enabledCipherCount: %d" % enabledciphercnt)
assert ecount == enabledciphercnt
-def test_47838_run_0(topology):
+def test_47838_run_0(topology_st):
"""
Check nsSSL3Ciphers: +all
All ciphers are enabled except null.
Note: allowWeakCipher: on
"""
- _header(topology, 'Test Case 1 - Check the ciphers availability for "+all"; allowWeakCipher: on')
+ _header(topology_st, 'Test Case 1 - Check the ciphers availability for "+all"; allowWeakCipher: on')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')])
time.sleep(5)
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.restart(timeout=120)
- enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog)
- disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog)
+ topology_st.standalone.restart(timeout=120)
+ enabled = os.popen('egrep "SSL info:" %s | egrep \": enabled\" | wc -l' % topology_st.standalone.errlog)
+ disabled = os.popen('egrep "SSL info:" %s | egrep \": disabled\" | wc -l' % topology_st.standalone.errlog)
ecount = int(enabled.readline().rstrip())
dcount = int(disabled.readline().rstrip())
log.info("Enabled ciphers: %d" % ecount)
log.info("Disabled ciphers: %d" % dcount)
if nss_version >= NSS320:
- assert ecount >= 53
- assert dcount <= 17
+ assert ecount >= 53
+ assert dcount <= 17
else:
- assert ecount >= 60
- assert dcount <= 7
+ assert ecount >= 60
+ assert dcount <= 7
global plus_all_ecount
global plus_all_dcount
plus_all_ecount = ecount
plus_all_dcount = dcount
- weak = os.popen('egrep "SSL alert:" %s | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog)
+ weak = os.popen('egrep "SSL info:" %s | egrep "WEAK CIPHER" | wc -l' % topology_st.standalone.errlog)
wcount = int(weak.readline().rstrip())
log.info("Weak ciphers: %d" % wcount)
assert wcount <= 29
- comp_nsSSLEnableCipherCount(topology, ecount)
+ comp_nsSSLEnableCipherCount(topology_st, ecount)
-def test_47838_run_1(topology):
+def test_47838_run_1(topology_st):
"""
Check nsSSL3Ciphers: +all
All ciphers are enabled except null.
Note: default allowWeakCipher (i.e., off) for +all
"""
- _header(topology, 'Test Case 2 - Check the ciphers availability for "+all" with default allowWeakCiphers')
+ _header(topology_st, 'Test Case 2 - Check the ciphers availability for "+all" with default allowWeakCiphers')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')])
time.sleep(1)
# Make sure allowWeakCipher is not set.
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'allowWeakCipher', None)])
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'allowWeakCipher', None)])
log.info("\n######################### Restarting the server ######################\n")
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.47838_0' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_0' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(1)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog)
- disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog)
+ enabled = os.popen('egrep "SSL info:" %s | egrep \": enabled\" | wc -l' % topology_st.standalone.errlog)
+ disabled = os.popen('egrep "SSL info:" %s | egrep \": disabled\" | wc -l' % topology_st.standalone.errlog)
ecount = int(enabled.readline().rstrip())
dcount = int(disabled.readline().rstrip())
@@ -271,34 +230,36 @@ def test_47838_run_1(topology):
log.info("Disabled ciphers: %d" % dcount)
assert ecount >= 31
assert dcount <= 36
- weak = os.popen('egrep "SSL alert:" %s | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog)
+ weak = os.popen('egrep "SSL info:" %s | egrep "WEAK CIPHER" | wc -l' % topology_st.standalone.errlog)
wcount = int(weak.readline().rstrip())
log.info("Weak ciphers: %d" % wcount)
assert wcount <= 29
- comp_nsSSLEnableCipherCount(topology, ecount)
+ comp_nsSSLEnableCipherCount(topology_st, ecount)
-def test_47838_run_2(topology):
+def test_47838_run_2(topology_st):
"""
Check nsSSL3Ciphers: +rsa_aes_128_sha,+rsa_aes_256_sha
rsa_aes_128_sha, tls_rsa_aes_128_sha, rsa_aes_256_sha, tls_rsa_aes_256_sha are enabled.
default allowWeakCipher
"""
- _header(topology, 'Test Case 3 - Check the ciphers availability for "+rsa_aes_128_sha,+rsa_aes_256_sha" with default allowWeakCipher')
+ _header(topology_st,
+ 'Test Case 3 - Check the ciphers availability for "+rsa_aes_128_sha,+rsa_aes_256_sha" with default allowWeakCipher')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+rsa_aes_128_sha,+rsa_aes_256_sha')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN,
+ [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+rsa_aes_128_sha,+rsa_aes_256_sha')])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.47838_1' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_1' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(1)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog)
- disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog)
+ enabled = os.popen('egrep "SSL info:" %s | egrep \": enabled\" | wc -l' % topology_st.standalone.errlog)
+ disabled = os.popen('egrep "SSL info:" %s | egrep \": disabled\" | wc -l' % topology_st.standalone.errlog)
ecount = int(enabled.readline().rstrip())
dcount = int(disabled.readline().rstrip())
@@ -309,61 +270,61 @@ def test_47838_run_2(topology):
assert ecount == 2
assert dcount == (plus_all_ecount + plus_all_dcount - ecount)
- comp_nsSSLEnableCipherCount(topology, ecount)
+ comp_nsSSLEnableCipherCount(topology_st, ecount)
-def test_47838_run_3(topology):
+def test_47838_run_3(topology_st):
"""
Check nsSSL3Ciphers: -all
All ciphers are disabled.
default allowWeakCipher
"""
- _header(topology, 'Test Case 4 - Check the ciphers availability for "-all"')
+ _header(topology_st, 'Test Case 4 - Check the ciphers availability for "-all"')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '-all')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '-all')])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.47838_2' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_2' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(1)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog)
+ enabled = os.popen('egrep "SSL info:" %s | egrep \": enabled\" | wc -l' % topology_st.standalone.errlog)
ecount = int(enabled.readline().rstrip())
log.info("Enabled ciphers: %d" % ecount)
global plus_all_ecount
assert ecount == 0
- disabledmsg = os.popen('egrep "Disabling SSL" %s' % topology.standalone.errlog)
+ disabledmsg = os.popen('egrep "Disabling SSL" %s' % topology_st.standalone.errlog)
log.info("Disabling SSL message?: %s" % disabledmsg.readline())
assert disabledmsg != ''
- comp_nsSSLEnableCipherCount(topology, ecount)
+ comp_nsSSLEnableCipherCount(topology_st, ecount)
-def test_47838_run_4(topology):
+def test_47838_run_4(topology_st):
"""
Check no nsSSL3Ciphers
Default ciphers are enabled.
default allowWeakCipher
"""
- _header(topology, 'Test Case 5 - Check no nsSSL3Ciphers (default setting) with default allowWeakCipher')
+ _header(topology_st, 'Test Case 5 - Check no nsSSL3Ciphers (default setting) with default allowWeakCipher')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'nsSSL3Ciphers', '-all')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'nsSSL3Ciphers', '-all')])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.47838_3' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_3' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(1)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog)
- disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog)
+ enabled = os.popen('egrep "SSL info:" %s | egrep \": enabled\" | wc -l' % topology_st.standalone.errlog)
+ disabled = os.popen('egrep "SSL info:" %s | egrep \": disabled\" | wc -l' % topology_st.standalone.errlog)
ecount = int(enabled.readline().rstrip())
dcount = int(disabled.readline().rstrip())
@@ -376,34 +337,35 @@ def test_47838_run_4(topology):
else:
assert ecount == 20
assert dcount == (plus_all_ecount + plus_all_dcount - ecount)
- weak = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog)
+ weak = os.popen(
+ 'egrep "SSL info:" %s | egrep \": enabled\" | egrep "WEAK CIPHER" | wc -l' % topology_st.standalone.errlog)
wcount = int(weak.readline().rstrip())
log.info("Weak ciphers in the default setting: %d" % wcount)
assert wcount == 0
- comp_nsSSLEnableCipherCount(topology, ecount)
+ comp_nsSSLEnableCipherCount(topology_st, ecount)
-def test_47838_run_5(topology):
+def test_47838_run_5(topology_st):
"""
Check nsSSL3Ciphers: default
Default ciphers are enabled.
default allowWeakCipher
"""
- _header(topology, 'Test Case 6 - Check default nsSSL3Ciphers (default setting) with default allowWeakCipher')
+ _header(topology_st, 'Test Case 6 - Check default nsSSL3Ciphers (default setting) with default allowWeakCipher')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default')])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.47838_4' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_4' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(1)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog)
- disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog)
+ enabled = os.popen('egrep "SSL info:" %s | egrep \": enabled\" | wc -l' % topology_st.standalone.errlog)
+ disabled = os.popen('egrep "SSL info:" %s | egrep \": disabled\" | wc -l' % topology_st.standalone.errlog)
ecount = int(enabled.readline().rstrip())
dcount = int(disabled.readline().rstrip())
@@ -416,34 +378,37 @@ def test_47838_run_5(topology):
else:
assert ecount == 23
assert dcount == (plus_all_ecount + plus_all_dcount - ecount)
- weak = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog)
+ weak = os.popen(
+ 'egrep "SSL info:" %s | egrep \": enabled\" | egrep "WEAK CIPHER" | wc -l' % topology_st.standalone.errlog)
wcount = int(weak.readline().rstrip())
log.info("Weak ciphers in the default setting: %d" % wcount)
assert wcount == 0
- comp_nsSSLEnableCipherCount(topology, ecount)
+ comp_nsSSLEnableCipherCount(topology_st, ecount)
-def test_47838_run_6(topology):
+def test_47838_run_6(topology_st):
"""
Check nsSSL3Ciphers: +all,-rsa_rc4_128_md5
All ciphers are disabled.
default allowWeakCipher
"""
- _header(topology, 'Test Case 7 - Check nsSSL3Ciphers: +all,-tls_dhe_rsa_aes_128_gcm_sha with default allowWeakCipher')
+ _header(topology_st,
+ 'Test Case 7 - Check nsSSL3Ciphers: +all,-tls_dhe_rsa_aes_128_gcm_sha with default allowWeakCipher')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all,-tls_dhe_rsa_aes_128_gcm_sha')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN,
+ [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all,-tls_dhe_rsa_aes_128_gcm_sha')])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.47838_5' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_5' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(1)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog)
- disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog)
+ enabled = os.popen('egrep "SSL info:" %s | egrep \": enabled\" | wc -l' % topology_st.standalone.errlog)
+ disabled = os.popen('egrep "SSL info:" %s | egrep \": disabled\" | wc -l' % topology_st.standalone.errlog)
ecount = int(enabled.readline().rstrip())
dcount = int(disabled.readline().rstrip())
@@ -456,29 +421,29 @@ def test_47838_run_6(topology):
assert ecount == (plus_all_ecount_noweak - 1)
assert dcount == (plus_all_dcount_noweak + 1)
- comp_nsSSLEnableCipherCount(topology, ecount)
+ comp_nsSSLEnableCipherCount(topology_st, ecount)
-def test_47838_run_7(topology):
+def test_47838_run_7(topology_st):
"""
Check nsSSL3Ciphers: -all,+rsa_rc4_128_md5
All ciphers are disabled.
default allowWeakCipher
"""
- _header(topology, 'Test Case 8 - Check nsSSL3Ciphers: -all,+rsa_rc4_128_md5 with default allowWeakCipher')
+ _header(topology_st, 'Test Case 8 - Check nsSSL3Ciphers: -all,+rsa_rc4_128_md5 with default allowWeakCipher')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '-all,+rsa_rc4_128_md5')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '-all,+rsa_rc4_128_md5')])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.47838_6' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_6' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(1)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog)
- disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog)
+ enabled = os.popen('egrep "SSL info:" %s | egrep \": enabled\" | wc -l' % topology_st.standalone.errlog)
+ disabled = os.popen('egrep "SSL info:" %s | egrep \": disabled\" | wc -l' % topology_st.standalone.errlog)
ecount = int(enabled.readline().rstrip())
dcount = int(disabled.readline().rstrip())
@@ -489,29 +454,29 @@ def test_47838_run_7(topology):
assert ecount == 1
assert dcount == (plus_all_ecount + plus_all_dcount - ecount)
- comp_nsSSLEnableCipherCount(topology, ecount)
+ comp_nsSSLEnableCipherCount(topology_st, ecount)
-def test_47838_run_8(topology):
+def test_47838_run_8(topology_st):
"""
Check nsSSL3Ciphers: default + allowWeakCipher: off
Strong Default ciphers are enabled.
"""
- _header(topology, 'Test Case 9 - Check default nsSSL3Ciphers (default setting + allowWeakCipher: off)')
+ _header(topology_st, 'Test Case 9 - Check default nsSSL3Ciphers (default setting + allowWeakCipher: off)')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default'),
- (ldap.MOD_REPLACE, 'allowWeakCipher', 'off')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default'),
+ (ldap.MOD_REPLACE, 'allowWeakCipher', 'off')])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.47838_7' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_7' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(1)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog)
- disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog)
+ enabled = os.popen('egrep "SSL info:" %s | egrep \": enabled\" | wc -l' % topology_st.standalone.errlog)
+ disabled = os.popen('egrep "SSL info:" %s | egrep \": disabled\" | wc -l' % topology_st.standalone.errlog)
ecount = int(enabled.readline().rstrip())
dcount = int(disabled.readline().rstrip())
@@ -520,63 +485,70 @@ def test_47838_run_8(topology):
global plus_all_ecount
global plus_all_dcount
if nss_version >= NSS323:
- assert ecount == 29
+ assert ecount == 29
else:
- assert ecount == 23
+ assert ecount == 23
assert dcount == (plus_all_ecount + plus_all_dcount - ecount)
- weak = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog)
+ weak = os.popen(
+ 'egrep "SSL info:" %s | egrep \": enabled\" | egrep "WEAK CIPHER" | wc -l' % topology_st.standalone.errlog)
wcount = int(weak.readline().rstrip())
log.info("Weak ciphers in the default setting: %d" % wcount)
assert wcount == 0
- comp_nsSSLEnableCipherCount(topology, ecount)
+ comp_nsSSLEnableCipherCount(topology_st, ecount)
-def test_47838_run_9(topology):
+def test_47838_run_9(topology_st):
"""
Check no nsSSL3Ciphers
Default ciphers are enabled.
allowWeakCipher: on
nsslapd-errorlog-level: 0
"""
- _header(topology, 'Test Case 10 - Check no nsSSL3Ciphers (default setting) with no errorlog-level & allowWeakCipher on')
+ _header(topology_st,
+ 'Test Case 10 - Check no nsSSL3Ciphers (default setting) with no errorlog-level & allowWeakCipher on')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', None),
- (ldap.MOD_REPLACE, 'allowWeakCipher', 'on')])
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', None)])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', None),
+ (ldap.MOD_REPLACE, 'allowWeakCipher', 'on')])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', None)])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.47838_8' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_8' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(1)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog)
- disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog)
+ enabled = os.popen('egrep "SSL info:" %s | egrep \": enabled\" | wc -l' % topology_st.standalone.errlog)
+ disabled = os.popen('egrep "SSL info:" %s | egrep \": disabled\" | wc -l' % topology_st.standalone.errlog)
ecount = int(enabled.readline().rstrip())
dcount = int(disabled.readline().rstrip())
log.info("Enabled ciphers: %d" % ecount)
log.info("Disabled ciphers: %d" % dcount)
- if nss_version >= NSS323:
+ if nss_version >= NSS327:
+ assert ecount == 34
+ elif nss_version >= NSS323:
assert ecount == 36
else:
assert ecount == 30
assert dcount == 0
- weak = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog)
+ weak = os.popen(
+ 'egrep "SSL info:" %s | egrep \": enabled\" | egrep "WEAK CIPHER" | wc -l' % topology_st.standalone.errlog)
wcount = int(weak.readline().rstrip())
log.info("Weak ciphers in the default setting: %d" % wcount)
- if nss_version >= NSS320:
+ if nss_version >= NSS327:
+ assert wcount == 5
+ elif nss_version >= NSS320:
assert wcount == 7
else:
assert wcount == 11
- comp_nsSSLEnableCipherCount(topology, ecount)
+ comp_nsSSLEnableCipherCount(topology_st, ecount)
-def test_47838_run_10(topology):
+def test_47838_run_10(topology_st):
"""
Check nsSSL3Ciphers: -TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5,
+TLS_RSA_EXPORT_WITH_RC4_40_MD5,+TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
@@ -589,21 +561,22 @@ def test_47838_run_10(topology):
allowWeakCipher: on
nsslapd-errorlog-level: 0
"""
- _header(topology, 'Test Case 11 - Check nsSSL3Ciphers: long list using the NSS Cipher Suite name with allowWeakCipher on')
+ _header(topology_st,
+ 'Test Case 11 - Check nsSSL3Ciphers: long list using the NSS Cipher Suite name with allowWeakCipher on')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers',
- '-TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5,+TLS_RSA_EXPORT_WITH_RC4_40_MD5,+TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,+TLS_DHE_RSA_WITH_DES_CBC_SHA,+SSL_RSA_FIPS_WITH_DES_CBC_SHA,+TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,+SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,+TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,+TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5,-SSL_CK_RC2_128_CBC_WITH_MD5,-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5,-SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers',
+ '-TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5,+TLS_RSA_EXPORT_WITH_RC4_40_MD5,+TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,+TLS_DHE_RSA_WITH_DES_CBC_SHA,+SSL_RSA_FIPS_WITH_DES_CBC_SHA,+TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,+SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,+TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,+TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5,-SSL_CK_RC2_128_CBC_WITH_MD5,-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5,-SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5')])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.47838_9' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_9' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(1)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog)
- disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog)
+ enabled = os.popen('egrep "SSL info:" %s | egrep \": enabled\" | wc -l' % topology_st.standalone.errlog)
+ disabled = os.popen('egrep "SSL info:" %s | egrep \": disabled\" | wc -l' % topology_st.standalone.errlog)
ecount = int(enabled.readline().rstrip())
dcount = int(disabled.readline().rstrip())
@@ -613,33 +586,34 @@ def test_47838_run_10(topology):
global plus_all_dcount
assert ecount == 9
assert dcount == 0
- weak = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog)
+ weak = os.popen(
+ 'egrep "SSL info:" %s | egrep \": enabled\" | egrep "WEAK CIPHER" | wc -l' % topology_st.standalone.errlog)
wcount = int(weak.readline().rstrip())
log.info("Weak ciphers in the default setting: %d" % wcount)
- topology.standalone.log.info("ticket47838 was successfully verified.")
+ topology_st.standalone.log.info("ticket47838 was successfully verified.")
- comp_nsSSLEnableCipherCount(topology, ecount)
+ comp_nsSSLEnableCipherCount(topology_st, ecount)
-def test_47838_run_11(topology):
+def test_47838_run_11(topology_st):
"""
Check nsSSL3Ciphers: +fortezza
SSL_GetImplementedCiphers does not return this as a secuire cipher suite
"""
- _header(topology, 'Test Case 12 - Check nsSSL3Ciphers: +fortezza, which is not supported')
+ _header(topology_st, 'Test Case 12 - Check nsSSL3Ciphers: +fortezza, which is not supported')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+fortezza')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+fortezza')])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.47838_10' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_10' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(1)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- errmsg = os.popen('egrep "SSL alert:" %s | egrep "is not available in NSS"' % topology.standalone.errlog)
+ errmsg = os.popen('egrep "SSL info:" %s | egrep "is not available in NSS"' % topology_st.standalone.errlog)
if errmsg != "":
log.info("Expected error message:")
log.info("%s" % errmsg.readline())
@@ -647,37 +621,38 @@ def test_47838_run_11(topology):
log.info("Expected error message was not found")
assert False
- comp_nsSSLEnableCipherCount(topology, 0)
+ comp_nsSSLEnableCipherCount(topology_st, 0)
-def test_47928_run_0(topology):
+def test_47928_run_0(topology_st):
"""
No SSL version config parameters.
Check SSL3 (TLS1.0) is off.
"""
- _header(topology, 'Test Case 13 - No SSL version config parameters')
+ _header(topology_st, 'Test Case 13 - No SSL version config parameters')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
# add them once and remove them
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', 'off'),
- (ldap.MOD_REPLACE, 'nsTLS1', 'on'),
- (ldap.MOD_REPLACE, 'sslVersionMin', 'TLS1.1'),
- (ldap.MOD_REPLACE, 'sslVersionMax', 'TLS1.2')])
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'nsSSL3', None),
- (ldap.MOD_DELETE, 'nsTLS1', None),
- (ldap.MOD_DELETE, 'sslVersionMin', None),
- (ldap.MOD_DELETE, 'sslVersionMax', None)])
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')])
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', 'off'),
+ (ldap.MOD_REPLACE, 'nsTLS1', 'on'),
+ (ldap.MOD_REPLACE, 'sslVersionMin', 'TLS1.1'),
+ (ldap.MOD_REPLACE, 'sslVersionMax', 'TLS1.2')])
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'nsSSL3', None),
+ (ldap.MOD_DELETE, 'nsTLS1', None),
+ (ldap.MOD_DELETE, 'sslVersionMin', None),
+ (ldap.MOD_DELETE, 'sslVersionMax', None)])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')])
time.sleep(5)
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.47838_11' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_11' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(1)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- errmsg = os.popen('egrep "SSL alert:" %s | egrep "Default SSL Version settings; Configuring the version range as min: TLS1.1"' % topology.standalone.errlog)
+ errmsg = os.popen(
+ 'egrep "SSL info:" %s | egrep "Default SSL Version settings; Configuring the version range as min: TLS1.1"' % topology_st.standalone.errlog)
if errmsg != "":
log.info("Expected message:")
log.info("%s" % errmsg.readline())
@@ -686,24 +661,25 @@ def test_47928_run_0(topology):
assert False
-def test_47928_run_1(topology):
+def test_47928_run_1(topology_st):
"""
No nsSSL3, nsTLS1; sslVersionMin > sslVersionMax
Check sslVersionMax is ignored.
"""
- _header(topology, 'Test Case 14 - No nsSSL3, nsTLS1; sslVersionMin > sslVersionMax')
+ _header(topology_st, 'Test Case 14 - No nsSSL3, nsTLS1; sslVersionMin > sslVersionMax')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'sslVersionMin', 'TLS1.2'),
- (ldap.MOD_REPLACE, 'sslVersionMax', 'TLS1.1')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'sslVersionMin', 'TLS1.2'),
+ (ldap.MOD_REPLACE, 'sslVersionMax', 'TLS1.1')])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.47838_12' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
- topology.standalone.start(timeout=120)
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_12' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
+ topology_st.standalone.start(timeout=120)
- errmsg = os.popen('egrep "SSL alert:" %s | egrep "The min value of NSS version range"' % topology.standalone.errlog)
+ errmsg = os.popen(
+ 'egrep "SSL info:" %s | egrep "The min value of NSS version range"' % topology_st.standalone.errlog)
if errmsg != "":
log.info("Expected message:")
log.info("%s" % errmsg.readline())
@@ -711,7 +687,8 @@ def test_47928_run_1(topology):
log.info("Expected message was not found")
assert False
- errmsg = os.popen('egrep "SSL Initialization" %s | egrep "Configured SSL version range: min: TLS1.2, max: TLS1"' % topology.standalone.errlog)
+ errmsg = os.popen(
+ 'egrep "SSL Initialization" %s | egrep "Configured SSL version range: min: TLS1.2, max: TLS1"' % topology_st.standalone.errlog)
if errmsg != "":
log.info("Expected message:")
log.info("%s" % errmsg.readline())
@@ -720,26 +697,27 @@ def test_47928_run_1(topology):
assert False
-def test_47928_run_2(topology):
+def test_47928_run_2(topology_st):
"""
nsSSL3: on; sslVersionMin: TLS1.1; sslVersionMax: TLS1.2
Conflict between nsSSL3 and range; nsSSL3 is disabled
"""
- _header(topology, 'Test Case 15 - nsSSL3: on; sslVersionMin: TLS1.1; sslVersionMax: TLS1.2')
+ _header(topology_st, 'Test Case 15 - nsSSL3: on; sslVersionMin: TLS1.1; sslVersionMax: TLS1.2')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'sslVersionMin', 'TLS1.1'),
- (ldap.MOD_REPLACE, 'sslVersionMax', 'TLS1.2'),
- (ldap.MOD_REPLACE, 'nsSSL3', 'on')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'sslVersionMin', 'TLS1.1'),
+ (ldap.MOD_REPLACE, 'sslVersionMax', 'TLS1.2'),
+ (ldap.MOD_REPLACE, 'nsSSL3', 'on')])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.47838_13' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_13' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(1)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- errmsg = os.popen('egrep "SSL alert:" %s | egrep "Found unsecure configuration: nsSSL3: on"' % topology.standalone.errlog)
+ errmsg = os.popen(
+ 'egrep "SSL info:" %s | egrep "Found unsecure configuration: nsSSL3: on"' % topology_st.standalone.errlog)
if errmsg != "":
log.info("Expected message:")
log.info("%s" % errmsg.readline())
@@ -747,7 +725,7 @@ def test_47928_run_2(topology):
log.info("Expected message was not found")
assert False
- errmsg = os.popen('egrep "SSL alert:" %s | egrep "Respect the supported range."' % topology.standalone.errlog)
+ errmsg = os.popen('egrep "SSL info:" %s | egrep "Respect the supported range."' % topology_st.standalone.errlog)
if errmsg != "":
log.info("Expected message:")
log.info("%s" % errmsg.readline())
@@ -755,7 +733,8 @@ def test_47928_run_2(topology):
log.info("Expected message was not found")
assert False
- errmsg = os.popen('egrep "SSL Initialization" %s | egrep "Configured SSL version range: min: TLS1.1, max: TLS1"' % topology.standalone.errlog)
+ errmsg = os.popen(
+ 'egrep "SSL Initialization" %s | egrep "Configured SSL version range: min: TLS1.1, max: TLS1"' % topology_st.standalone.errlog)
if errmsg != "":
log.info("Expected message:")
log.info("%s" % errmsg.readline())
@@ -764,27 +743,28 @@ def test_47928_run_2(topology):
assert False
-def test_47928_run_3(topology):
+def test_47928_run_3(topology_st):
"""
nsSSL3: on; nsTLS1: off; sslVersionMin: TLS1.1; sslVersionMax: TLS1.2
Conflict between nsSSL3/nsTLS1 and range; nsSSL3 is disabled; nsTLS1 is enabled.
"""
- _header(topology, 'Test Case 16 - nsSSL3: on; nsTLS1: off; sslVersionMin: TLS1.1; sslVersionMax: TLS1.2')
+ _header(topology_st, 'Test Case 16 - nsSSL3: on; nsTLS1: off; sslVersionMin: TLS1.1; sslVersionMax: TLS1.2')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'sslVersionMin', 'TLS1.1'),
- (ldap.MOD_REPLACE, 'sslVersionMax', 'TLS1.2'),
- (ldap.MOD_REPLACE, 'nsSSL3', 'on'),
- (ldap.MOD_REPLACE, 'nsTLS1', 'off')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'sslVersionMin', 'TLS1.1'),
+ (ldap.MOD_REPLACE, 'sslVersionMax', 'TLS1.2'),
+ (ldap.MOD_REPLACE, 'nsSSL3', 'on'),
+ (ldap.MOD_REPLACE, 'nsTLS1', 'off')])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.47838_14' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_14' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(1)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- errmsg = os.popen('egrep "SSL alert:" %s | egrep "Found unsecure configuration: nsSSL3: on"' % topology.standalone.errlog)
+ errmsg = os.popen(
+ 'egrep "SSL info:" %s | egrep "Found unsecure configuration: nsSSL3: on"' % topology_st.standalone.errlog)
if errmsg != "":
log.info("Expected message:")
log.info("%s" % errmsg.readline())
@@ -792,7 +772,7 @@ def test_47928_run_3(topology):
log.info("Expected message was not found")
assert False
- errmsg = os.popen('egrep "SSL alert:" %s | egrep "Respect the configured range."' % topology.standalone.errlog)
+ errmsg = os.popen('egrep "SSL info:" %s | egrep "Respect the configured range."' % topology_st.standalone.errlog)
if errmsg != "":
log.info("Expected message:")
log.info("%s" % errmsg.readline())
@@ -800,7 +780,8 @@ def test_47928_run_3(topology):
log.info("Expected message was not found")
assert False
- errmsg = os.popen('egrep "SSL Initialization" %s | egrep "Configured SSL version range: min: TLS1.1, max: TLS1"' % topology.standalone.errlog)
+ errmsg = os.popen(
+ 'egrep "SSL Initialization" %s | egrep "Configured SSL version range: min: TLS1.1, max: TLS1"' % topology_st.standalone.errlog)
if errmsg != "":
log.info("Expected message:")
log.info("%s" % errmsg.readline())
@@ -809,25 +790,25 @@ def test_47928_run_3(topology):
assert False
-def test_47838_run_last(topology):
+def test_47838_run_last(topology_st):
"""
Check nsSSL3Ciphers: all <== invalid value
All ciphers are disabled.
"""
- _header(topology, 'Test Case 17 - Check nsSSL3Ciphers: all, which is invalid')
+ _header(topology_st, 'Test Case 17 - Check nsSSL3Ciphers: all, which is invalid')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', None)])
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'all')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', None)])
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'all')])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.47838_15' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.47838_15' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(1)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- errmsg = os.popen('egrep "SSL alert:" %s | egrep "invalid ciphers"' % topology.standalone.errlog)
+ errmsg = os.popen('egrep "SSL info:" %s | egrep "invalid ciphers"' % topology_st.standalone.errlog)
if errmsg != "":
log.info("Expected error message:")
log.info("%s" % errmsg.readline())
@@ -835,9 +816,9 @@ def test_47838_run_last(topology):
log.info("Expected error message was not found")
assert False
- comp_nsSSLEnableCipherCount(topology, 0)
+ comp_nsSSLEnableCipherCount(topology_st, 0)
- topology.standalone.log.info("ticket47838, 47880, 47908, 47928 were successfully verified.")
+ topology_st.standalone.log.info("ticket47838, 47880, 47908, 47928 were successfully verified.")
if __name__ == '__main__':
diff --git a/dirsrvtests/tests/tickets/ticket47869MMR_test.py b/dirsrvtests/tests/tickets/ticket47869MMR_test.py
index a52db5e..56b5273 100644
--- a/dirsrvtests/tests/tickets/ticket47869MMR_test.py
+++ b/dirsrvtests/tests/tickets/ticket47869MMR_test.py
@@ -6,142 +6,28 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
+import logging
import time
+
import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_m2
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-#
-# important part. We can deploy Master1 and Master2 on different versions
-#
-installation1_prefix = None
-installation2_prefix = None
-
TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
ENTRY_NAME = 'test_entry'
MAX_ENTRIES = 10
-BIND_NAME = 'bind_entry'
-BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
-BIND_PW = 'password'
-
-
-class TopologyMaster1Master2(object):
- def __init__(self, master1, master2):
- master1.open()
- self.master1 = master1
-
- master2.open()
- self.master2 = master2
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to create a replicated topology for the 'module'.
- The replicated topology is MASTER1 <-> Master2.
- '''
- global installation1_prefix
- global installation2_prefix
-
- # allocate master1 on a given deployement
- master1 = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Args for the master1 instance
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_master = args_instance.copy()
- master1.allocate(args_master)
-
- # allocate master1 on a given deployement
- master2 = DirSrv(verbose=False)
- if installation2_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation2_prefix
-
- # Args for the consumer instance
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_master = args_instance.copy()
- master2.allocate(args_master)
-
- # Get the status of the instance
- instance_master1 = master1.exists()
- instance_master2 = master2.exists()
-
- # Remove all the instances
- if instance_master1:
- master1.delete()
- if instance_master2:
- master2.delete()
-
- # Create the instances
- master1.create()
- master1.open()
- master2.create()
- master2.open()
-
- #
- # Now prepare the Master-Consumer topology
- #
- # First Enable replication
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
- master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
- # Initialize the supplier->consumer
-
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
-
- if not repl_agreement:
- log.fatal("Fail to create a replica agreement")
- sys.exit(1)
-
- log.debug("%s created" % repl_agreement)
-
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
-
- master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- master1.waitForReplInit(repl_agreement)
-
- # Check replication is working fine
- if master1.testReplication(DEFAULT_SUFFIX, master2):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- def fin():
- master1.delete()
- master2.delete()
- request.addfinalizer(fin)
-
- # Here we have two instances master and consumer
- return TopologyMaster1Master2(master1, master2)
+BIND_NAME = 'bind_entry'
+BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
+BIND_PW = 'password'
-def test_ticket47869_init(topology):
+def test_ticket47869_init(topology_m2):
"""
It adds an entry ('bind_entry') and 10 test entries
It sets the anonymous aci
@@ -149,21 +35,21 @@ def test_ticket47869_init(topology):
"""
# enable acl error logging
mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192))] # REPL
- topology.master1.modify_s(DN_CONFIG, mod)
- topology.master2.modify_s(DN_CONFIG, mod)
+ topology_m2.ms["master1"].modify_s(DN_CONFIG, mod)
+ topology_m2.ms["master2"].modify_s(DN_CONFIG, mod)
# entry used to bind with
- topology.master1.log.info("Add %s" % BIND_DN)
- topology.master1.add_s(Entry((BIND_DN, {
- 'objectclass': "top person".split(),
- 'sn': BIND_NAME,
- 'cn': BIND_NAME,
- 'userpassword': BIND_PW})))
+ topology_m2.ms["master1"].log.info("Add %s" % BIND_DN)
+ topology_m2.ms["master1"].add_s(Entry((BIND_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': BIND_NAME,
+ 'cn': BIND_NAME,
+ 'userpassword': BIND_PW})))
loop = 0
ent = None
while loop <= 10:
try:
- ent = topology.master2.getEntry(BIND_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ ent = topology_m2.ms["master2"].getEntry(BIND_DN, ldap.SCOPE_BASE, "(objectclass=*)")
break
except ldap.NO_SUCH_OBJECT:
time.sleep(1)
@@ -174,22 +60,22 @@ def test_ticket47869_init(topology):
# keep anonymous ACI for use 'read-search' aci in SEARCH test
ACI_ANONYMOUS = "(targetattr!=\"userPassword\")(version 3.0; acl \"Enable anonymous access\"; allow (read, search, compare) userdn=\"ldap:///anyone\";)"
mod = [(ldap.MOD_REPLACE, 'aci', ACI_ANONYMOUS)]
- topology.master1.modify_s(SUFFIX, mod)
- topology.master2.modify_s(SUFFIX, mod)
+ topology_m2.ms["master1"].modify_s(SUFFIX, mod)
+ topology_m2.ms["master2"].modify_s(SUFFIX, mod)
# add entries
for cpt in range(MAX_ENTRIES):
name = "%s%d" % (ENTRY_NAME, cpt)
mydn = "cn=%s,%s" % (name, SUFFIX)
- topology.master1.add_s(Entry((mydn,
- {'objectclass': "top person".split(),
- 'sn': name,
- 'cn': name})))
+ topology_m2.ms["master1"].add_s(Entry((mydn,
+ {'objectclass': "top person".split(),
+ 'sn': name,
+ 'cn': name})))
loop = 0
ent = None
while loop <= 10:
try:
- ent = topology.master2.getEntry(mydn, ldap.SCOPE_BASE, "(objectclass=*)")
+ ent = topology_m2.ms["master2"].getEntry(mydn, ldap.SCOPE_BASE, "(objectclass=*)")
break
except ldap.NO_SUCH_OBJECT:
time.sleep(1)
@@ -198,7 +84,7 @@ def test_ticket47869_init(topology):
assert False
-def test_ticket47869_check(topology):
+def test_ticket47869_check(topology_m2):
'''
On Master 1 and 2:
Bind as Directory Manager.
@@ -213,107 +99,107 @@ def test_ticket47869_check(topology):
Search all specifying nscpEntryWsi in the attribute list.
Check nscpEntryWsi is not returned.
'''
- topology.master1.log.info("\n\n######################### CHECK nscpentrywsi ######################\n")
+ topology_m2.ms["master1"].log.info("\n\n######################### CHECK nscpentrywsi ######################\n")
- topology.master1.log.info("##### Master1: Bind as %s #####" % DN_DM)
- topology.master1.simple_bind_s(DN_DM, PASSWORD)
+ topology_m2.ms["master1"].log.info("##### Master1: Bind as %s #####" % DN_DM)
+ topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD)
- topology.master1.log.info("Master1: Calling search_ext...")
- msgid = topology.master1.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
+ topology_m2.ms["master1"].log.info("Master1: Calling search_ext...")
+ msgid = topology_m2.ms["master1"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
nscpentrywsicnt = 0
- rtype, rdata, rmsgid = topology.master1.result2(msgid)
- topology.master1.log.info("%d results" % len(rdata))
+ rtype, rdata, rmsgid = topology_m2.ms["master1"].result2(msgid)
+ topology_m2.ms["master1"].log.info("%d results" % len(rdata))
- topology.master1.log.info("Results:")
+ topology_m2.ms["master1"].log.info("Results:")
for dn, attrs in rdata:
- topology.master1.log.info("dn: %s" % dn)
+ topology_m2.ms["master1"].log.info("dn: %s" % dn)
if 'nscpentrywsi' in attrs:
nscpentrywsicnt += 1
- topology.master1.log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt)
+ topology_m2.ms["master1"].log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt)
- topology.master2.log.info("##### Master2: Bind as %s #####" % DN_DM)
- topology.master2.simple_bind_s(DN_DM, PASSWORD)
+ topology_m2.ms["master2"].log.info("##### Master2: Bind as %s #####" % DN_DM)
+ topology_m2.ms["master2"].simple_bind_s(DN_DM, PASSWORD)
- topology.master2.log.info("Master2: Calling search_ext...")
- msgid = topology.master2.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
+ topology_m2.ms["master2"].log.info("Master2: Calling search_ext...")
+ msgid = topology_m2.ms["master2"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
nscpentrywsicnt = 0
- rtype, rdata, rmsgid = topology.master2.result2(msgid)
- topology.master2.log.info("%d results" % len(rdata))
+ rtype, rdata, rmsgid = topology_m2.ms["master2"].result2(msgid)
+ topology_m2.ms["master2"].log.info("%d results" % len(rdata))
- topology.master2.log.info("Results:")
+ topology_m2.ms["master2"].log.info("Results:")
for dn, attrs in rdata:
- topology.master2.log.info("dn: %s" % dn)
+ topology_m2.ms["master2"].log.info("dn: %s" % dn)
if 'nscpentrywsi' in attrs:
nscpentrywsicnt += 1
- topology.master2.log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt)
+ topology_m2.ms["master2"].log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt)
# bind as bind_entry
- topology.master1.log.info("##### Master1: Bind as %s #####" % BIND_DN)
- topology.master1.simple_bind_s(BIND_DN, BIND_PW)
+ topology_m2.ms["master1"].log.info("##### Master1: Bind as %s #####" % BIND_DN)
+ topology_m2.ms["master1"].simple_bind_s(BIND_DN, BIND_PW)
- topology.master1.log.info("Master1: Calling search_ext...")
- msgid = topology.master1.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
+ topology_m2.ms["master1"].log.info("Master1: Calling search_ext...")
+ msgid = topology_m2.ms["master1"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
nscpentrywsicnt = 0
- rtype, rdata, rmsgid = topology.master1.result2(msgid)
- topology.master1.log.info("%d results" % len(rdata))
+ rtype, rdata, rmsgid = topology_m2.ms["master1"].result2(msgid)
+ topology_m2.ms["master1"].log.info("%d results" % len(rdata))
for dn, attrs in rdata:
if 'nscpentrywsi' in attrs:
nscpentrywsicnt += 1
assert nscpentrywsicnt == 0
- topology.master1.log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt)
+ topology_m2.ms["master1"].log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt)
# bind as bind_entry
- topology.master2.log.info("##### Master2: Bind as %s #####" % BIND_DN)
- topology.master2.simple_bind_s(BIND_DN, BIND_PW)
+ topology_m2.ms["master2"].log.info("##### Master2: Bind as %s #####" % BIND_DN)
+ topology_m2.ms["master2"].simple_bind_s(BIND_DN, BIND_PW)
- topology.master2.log.info("Master2: Calling search_ext...")
- msgid = topology.master2.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
+ topology_m2.ms["master2"].log.info("Master2: Calling search_ext...")
+ msgid = topology_m2.ms["master2"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
nscpentrywsicnt = 0
- rtype, rdata, rmsgid = topology.master2.result2(msgid)
- topology.master2.log.info("%d results" % len(rdata))
+ rtype, rdata, rmsgid = topology_m2.ms["master2"].result2(msgid)
+ topology_m2.ms["master2"].log.info("%d results" % len(rdata))
for dn, attrs in rdata:
if 'nscpentrywsi' in attrs:
nscpentrywsicnt += 1
assert nscpentrywsicnt == 0
- topology.master2.log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt)
+ topology_m2.ms["master2"].log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt)
# bind as anonymous
- topology.master1.log.info("##### Master1: Bind as anonymous #####")
- topology.master1.simple_bind_s("", "")
+ topology_m2.ms["master1"].log.info("##### Master1: Bind as anonymous #####")
+ topology_m2.ms["master1"].simple_bind_s("", "")
- topology.master1.log.info("Master1: Calling search_ext...")
- msgid = topology.master1.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
+ topology_m2.ms["master1"].log.info("Master1: Calling search_ext...")
+ msgid = topology_m2.ms["master1"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
nscpentrywsicnt = 0
- rtype, rdata, rmsgid = topology.master1.result2(msgid)
- topology.master1.log.info("%d results" % len(rdata))
+ rtype, rdata, rmsgid = topology_m2.ms["master1"].result2(msgid)
+ topology_m2.ms["master1"].log.info("%d results" % len(rdata))
for dn, attrs in rdata:
if 'nscpentrywsi' in attrs:
nscpentrywsicnt += 1
assert nscpentrywsicnt == 0
- topology.master1.log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt)
+ topology_m2.ms["master1"].log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt)
# bind as bind_entry
- topology.master2.log.info("##### Master2: Bind as anonymous #####")
- topology.master2.simple_bind_s("", "")
+ topology_m2.ms["master2"].log.info("##### Master2: Bind as anonymous #####")
+ topology_m2.ms["master2"].simple_bind_s("", "")
- topology.master2.log.info("Master2: Calling search_ext...")
- msgid = topology.master2.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
+ topology_m2.ms["master2"].log.info("Master2: Calling search_ext...")
+ msgid = topology_m2.ms["master2"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
nscpentrywsicnt = 0
- rtype, rdata, rmsgid = topology.master2.result2(msgid)
- topology.master2.log.info("%d results" % len(rdata))
+ rtype, rdata, rmsgid = topology_m2.ms["master2"].result2(msgid)
+ topology_m2.ms["master2"].log.info("%d results" % len(rdata))
for dn, attrs in rdata:
if 'nscpentrywsi' in attrs:
nscpentrywsicnt += 1
assert nscpentrywsicnt == 0
- topology.master2.log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt)
+ topology_m2.ms["master2"].log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt)
- topology.master1.log.info("##### ticket47869 was successfully verified. #####")
+ topology_m2.ms["master1"].log.info("##### ticket47869 was successfully verified. #####")
if __name__ == '__main__':
diff --git a/dirsrvtests/tests/tickets/ticket47871_test.py b/dirsrvtests/tests/tickets/ticket47871_test.py
index 417a87e..826ca44 100644
--- a/dirsrvtests/tests/tickets/ticket47871_test.py
+++ b/dirsrvtests/tests/tickets/ticket47871_test.py
@@ -11,16 +11,14 @@ Created on Nov 7, 2013
@author: tbordaz
'''
-import os
-import sys
+import logging
import time
+
import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_m1c1
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
@@ -34,143 +32,50 @@ MAX_OTHERS = 10
ATTRIBUTES = ['street', 'countryName', 'description', 'postalAddress', 'postalCode', 'title', 'l', 'roomNumber']
-class TopologyMasterConsumer(object):
- def __init__(self, master, consumer):
- master.open()
- self.master = master
-
- consumer.open()
- self.consumer = consumer
-
- def __repr__(self):
- return "Master[%s] -> Consumer[%s" % (self.master, self.consumer)
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to create a replicated topology for the 'module'.
- The replicated topology is MASTER -> Consumer.
- '''
- master = DirSrv(verbose=False)
- consumer = DirSrv(verbose=False)
-
- # Args for the master instance
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_master = args_instance.copy()
- master.allocate(args_master)
-
- # Args for the consumer instance
- args_instance[SER_HOST] = HOST_CONSUMER_1
- args_instance[SER_PORT] = PORT_CONSUMER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
- args_consumer = args_instance.copy()
- consumer.allocate(args_consumer)
-
- # Get the status of the instance and restart it if it exists
- instance_master = master.exists()
- instance_consumer = consumer.exists()
-
- # Remove all the instances
- if instance_master:
- master.delete()
- if instance_consumer:
- consumer.delete()
-
- # Create the instances
- master.create()
- master.open()
- consumer.create()
- consumer.open()
-
- #
- # Now prepare the Master-Consumer topology
- #
- # First Enable replication
- master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
- consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER)
-
- # Initialize the supplier->consumer
-
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties)
-
- if not repl_agreement:
- log.fatal("Fail to create a replica agreement")
- sys.exit(1)
-
- log.debug("%s created" % repl_agreement)
- master.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
- master.waitForReplInit(repl_agreement)
-
- # Check replication is working fine
- if master.testReplication(DEFAULT_SUFFIX, consumer):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- def fin():
- master.delete()
- consumer.delete()
- request.addfinalizer(fin)
- #
- # Here we have two instances master and consumer
- # with replication working. Either coming from a backup recovery
- # or from a fresh (re)init
- # Time to return the topology
- return TopologyMasterConsumer(master, consumer)
-
-
-def test_ticket47871_init(topology):
+def test_ticket47871_init(topology_m1c1):
"""
Initialize the test environment
"""
- topology.master.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
+ topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
mod = [(ldap.MOD_REPLACE, 'nsslapd-changelogmaxage', "10s"), # 10 second triming
(ldap.MOD_REPLACE, 'nsslapd-changelog-trim-interval', "5s")]
- topology.master.modify_s("cn=%s,%s" % (PLUGIN_RETRO_CHANGELOG, DN_PLUGIN), mod)
- #topology.master.plugins.enable(name=PLUGIN_MEMBER_OF)
- #topology.master.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
- topology.master.stop(timeout=10)
- topology.master.start(timeout=10)
+ topology_m1c1.ms["master1"].modify_s("cn=%s,%s" % (PLUGIN_RETRO_CHANGELOG, DN_PLUGIN), mod)
+ # topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_MEMBER_OF)
+ # topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_REFER_INTEGRITY)
+ topology_m1c1.ms["master1"].stop(timeout=10)
+ topology_m1c1.ms["master1"].start(timeout=10)
- topology.master.log.info("test_ticket47871_init topology %r" % (topology))
+ topology_m1c1.ms["master1"].log.info("test_ticket47871_init topology_m1c1 %r" % (topology_m1c1))
# the test case will check if a warning message is logged in the
# error log of the supplier
- topology.master.errorlog_file = open(topology.master.errlog, "r")
+ topology_m1c1.ms["master1"].errorlog_file = open(topology_m1c1.ms["master1"].errlog, "r")
-def test_ticket47871_1(topology):
+def test_ticket47871_1(topology_m1c1):
'''
ADD entries and check they are all in the retrocl
'''
# add dummy entries
for cpt in range(MAX_OTHERS):
name = "%s%d" % (OTHER_NAME, cpt)
- topology.master.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
- 'objectclass': "top person".split(),
- 'sn': name,
- 'cn': name})))
+ topology_m1c1.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
+ 'objectclass': "top person".split(),
+ 'sn': name,
+ 'cn': name})))
- topology.master.log.info("test_ticket47871_init: %d entries ADDed %s[0..%d]" % (MAX_OTHERS, OTHER_NAME, MAX_OTHERS - 1))
+ topology_m1c1.ms["master1"].log.info(
+ "test_ticket47871_init: %d entries ADDed %s[0..%d]" % (MAX_OTHERS, OTHER_NAME, MAX_OTHERS - 1))
# Check the number of entries in the retro changelog
time.sleep(1)
- ents = topology.master.search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)")
+ ents = topology_m1c1.ms["master1"].search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)")
assert len(ents) == MAX_OTHERS
- topology.master.log.info("Added entries are")
+ topology_m1c1.ms["master1"].log.info("Added entries are")
for ent in ents:
- topology.master.log.info("%s" % ent.dn)
+ topology_m1c1.ms["master1"].log.info("%s" % ent.dn)
-def test_ticket47871_2(topology):
+def test_ticket47871_2(topology_m1c1):
'''
Wait until there is just a last entries
'''
@@ -178,11 +83,11 @@ def test_ticket47871_2(topology):
TRY_NO = 1
while TRY_NO <= MAX_TRIES:
time.sleep(6) # at least 1 trimming occurred
- ents = topology.master.search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)")
+ ents = topology_m1c1.ms["master1"].search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)")
assert len(ents) <= MAX_OTHERS
- topology.master.log.info("\nTry no %d it remains %d entries" % (TRY_NO, len(ents)))
+ topology_m1c1.ms["master1"].log.info("\nTry no %d it remains %d entries" % (TRY_NO, len(ents)))
for ent in ents:
- topology.master.log.info("%s" % ent.dn)
+ topology_m1c1.ms["master1"].log.info("%s" % ent.dn)
if len(ents) > 1:
TRY_NO += 1
else:
diff --git a/dirsrvtests/tests/tickets/ticket47900_test.py b/dirsrvtests/tests/tickets/ticket47900_test.py
index 1265eea..ae3da98 100644
--- a/dirsrvtests/tests/tickets/ticket47900_test.py
+++ b/dirsrvtests/tests/tickets/ticket47900_test.py
@@ -6,70 +6,26 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
import logging
+
+import ldap
import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
-CONFIG_DN = 'cn=config'
+CONFIG_DN = 'cn=config'
ADMIN_NAME = 'passwd_admin'
-ADMIN_DN = 'cn=%s,%s' % (ADMIN_NAME, SUFFIX)
-ADMIN_PWD = 'adminPassword_1'
+ADMIN_DN = 'cn=%s,%s' % (ADMIN_NAME, SUFFIX)
+ADMIN_PWD = 'adminPassword_1'
ENTRY_NAME = 'Joe Schmo'
-ENTRY_DN = 'cn=%s,%s' % (ENTRY_NAME, SUFFIX)
+ENTRY_DN = 'cn=%s,%s' % (ENTRY_NAME, SUFFIX)
INVALID_PWDS = ('2_Short', 'No_Number', 'N0Special', '{SSHA}bBy8UdtPZwu8uZna9QOYG3Pr41RpIRVDl8wddw==')
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def test_ticket47900(topology):
+def test_ticket47900(topology_st):
"""
Test that password administrators/root DN can
bypass password syntax/policy.
@@ -90,54 +46,54 @@ def test_ticket47900(topology):
entry.setValues('cn', ADMIN_NAME)
entry.setValues('userpassword', ADMIN_PWD)
- topology.standalone.log.info("Creating Password Administator entry %s..." % ADMIN_DN)
+ topology_st.standalone.log.info("Creating Password Administator entry %s..." % ADMIN_DN)
try:
- topology.standalone.add_s(entry)
+ topology_st.standalone.add_s(entry)
except ldap.LDAPError as e:
- topology.standalone.log.error('Unexpected result ' + e.message['desc'])
+ topology_st.standalone.log.error('Unexpected result ' + e.message['desc'])
assert False
- topology.standalone.log.error("Failed to add Password Administator %s, error: %s "
- % (ADMIN_DN, e.message['desc']))
+ topology_st.standalone.log.error("Failed to add Password Administator %s, error: %s "
+ % (ADMIN_DN, e.message['desc']))
assert False
- topology.standalone.log.info("Configuring password policy...")
+ topology_st.standalone.log.info("Configuring password policy...")
try:
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local' , 'on'),
- (ldap.MOD_REPLACE, 'passwordCheckSyntax', 'on'),
- (ldap.MOD_REPLACE, 'passwordMinCategories' , '1'),
- (ldap.MOD_REPLACE, 'passwordMinTokenLength' , '1'),
- (ldap.MOD_REPLACE, 'passwordExp' , 'on'),
- (ldap.MOD_REPLACE, 'passwordMinDigits' , '1'),
- (ldap.MOD_REPLACE, 'passwordMinSpecials' , '1')])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on'),
+ (ldap.MOD_REPLACE, 'passwordCheckSyntax', 'on'),
+ (ldap.MOD_REPLACE, 'passwordMinCategories', '1'),
+ (ldap.MOD_REPLACE, 'passwordMinTokenLength', '1'),
+ (ldap.MOD_REPLACE, 'passwordExp', 'on'),
+ (ldap.MOD_REPLACE, 'passwordMinDigits', '1'),
+ (ldap.MOD_REPLACE, 'passwordMinSpecials', '1')])
except ldap.LDAPError as e:
- topology.standalone.log.error('Failed configure password policy: ' + e.message['desc'])
+ topology_st.standalone.log.error('Failed configure password policy: ' + e.message['desc'])
assert False
#
# Add an aci to allow everyone all access (just makes things easier)
#
- topology.standalone.log.info("Add aci to allow password admin to add/update entries...")
+ topology_st.standalone.log.info("Add aci to allow password admin to add/update entries...")
- ACI_TARGET = "(target = \"ldap:///%s\")" % SUFFIX
- ACI_TARGETATTR = "(targetattr = *)"
- ACI_ALLOW = "(version 3.0; acl \"Password Admin Access\"; allow (all) "
- ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)"
- ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT
+ ACI_TARGET = "(target = \"ldap:///%s\")" % SUFFIX
+ ACI_TARGETATTR = "(targetattr = *)"
+ ACI_ALLOW = "(version 3.0; acl \"Password Admin Access\"; allow (all) "
+ ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)"
+ ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT
mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
try:
- topology.standalone.modify_s(SUFFIX, mod)
+ topology_st.standalone.modify_s(SUFFIX, mod)
except ldap.LDAPError as e:
- topology.standalone.log.error('Failed to add aci for password admin: ' + e.message['desc'])
+ topology_st.standalone.log.error('Failed to add aci for password admin: ' + e.message['desc'])
assert False
#
# Bind as the Password Admin
#
- topology.standalone.log.info("Bind as the Password Administator (before activating)...")
+ topology_st.standalone.log.info("Bind as the Password Administator (before activating)...")
try:
- topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
+ topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
except ldap.LDAPError as e:
- topology.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc'])
+ topology_st.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc'])
assert False
#
@@ -151,49 +107,49 @@ def test_ticket47900(topology):
#
# Start by attempting to add an entry with an invalid password
#
- topology.standalone.log.info("Attempt to add entries with invalid passwords, these adds should fail...")
+ topology_st.standalone.log.info("Attempt to add entries with invalid passwords, these adds should fail...")
for passwd in INVALID_PWDS:
failed_as_expected = False
entry.setValues('userpassword', passwd)
- topology.standalone.log.info("Create a regular user entry %s with password (%s)..." % (ENTRY_DN, passwd))
+ topology_st.standalone.log.info("Create a regular user entry %s with password (%s)..." % (ENTRY_DN, passwd))
try:
- topology.standalone.add_s(entry)
+ topology_st.standalone.add_s(entry)
except ldap.LDAPError as e:
# We failed as expected
failed_as_expected = True
- topology.standalone.log.info('Add failed as expected: password (%s) result (%s)'
- % (passwd, e.message['desc']))
+ topology_st.standalone.log.info('Add failed as expected: password (%s) result (%s)'
+ % (passwd, e.message['desc']))
if not failed_as_expected:
- topology.standalone.log.error("We were incorrectly able to add an entry " +
- "with an invalid password (%s)" % (passwd))
+ topology_st.standalone.log.error("We were incorrectly able to add an entry " +
+ "with an invalid password (%s)" % (passwd))
assert False
#
# Now activate a password administator, bind as root dn to do the config
# update, then rebind as the password admin
#
- topology.standalone.log.info("Activate the Password Administator...")
+ topology_st.standalone.log.info("Activate the Password Administator...")
# Bind as Root DN
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
- topology.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc'])
+ topology_st.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc'])
assert False
# Update config
try:
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_DN)])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_DN)])
except ldap.LDAPError as e:
- topology.standalone.log.error('Failed to add password admin to config: ' + e.message['desc'])
+ topology_st.standalone.log.error('Failed to add password admin to config: ' + e.message['desc'])
assert False
# Bind as Password Admin
try:
- topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
+ topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
except ldap.LDAPError as e:
- topology.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc'])
+ topology_st.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc'])
assert False
#
@@ -201,21 +157,21 @@ def test_ticket47900(topology):
#
for passwd in INVALID_PWDS:
entry.setValues('userpassword', passwd)
- topology.standalone.log.info("Create a regular user entry %s with password (%s)..." % (ENTRY_DN, passwd))
+ topology_st.standalone.log.info("Create a regular user entry %s with password (%s)..." % (ENTRY_DN, passwd))
try:
- topology.standalone.add_s(entry)
+ topology_st.standalone.add_s(entry)
except ldap.LDAPError as e:
- topology.standalone.log.error('Failed to add entry with password (%s) result (%s)'
- % (passwd, e.message['desc']))
+ topology_st.standalone.log.error('Failed to add entry with password (%s) result (%s)'
+ % (passwd, e.message['desc']))
assert False
- topology.standalone.log.info('Succesfully added entry (%s)' % ENTRY_DN)
+ topology_st.standalone.log.info('Succesfully added entry (%s)' % ENTRY_DN)
# Delete entry for the next pass
try:
- topology.standalone.delete_s(ENTRY_DN)
+ topology_st.standalone.delete_s(ENTRY_DN)
except ldap.LDAPError as e:
- topology.standalone.log.error('Failed to delete entry: %s' % (e.message['desc']))
+ topology_st.standalone.log.error('Failed to delete entry: %s' % (e.message['desc']))
assert False
#
@@ -223,36 +179,36 @@ def test_ticket47900(topology):
#
entry.setValues('userpassword', ADMIN_PWD)
try:
- topology.standalone.add_s(entry)
+ topology_st.standalone.add_s(entry)
except ldap.LDAPError as e:
- topology.standalone.log.error('Failed to add entry with valid password (%s) result (%s)'
- % (passwd, e.message['desc']))
+ topology_st.standalone.log.error('Failed to add entry with valid password (%s) result (%s)'
+ % (passwd, e.message['desc']))
assert False
#
# Deactivate the password admin and make sure invalid password updates fail
#
- topology.standalone.log.info("Deactivate Password Administator and try invalid password updates...")
+ topology_st.standalone.log.info("Deactivate Password Administator and try invalid password updates...")
# Bind as root DN
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
- topology.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc'])
+ topology_st.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc'])
assert False
# Update config
try:
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_DELETE, 'passwordAdminDN', None)])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_DELETE, 'passwordAdminDN', None)])
except ldap.LDAPError as e:
- topology.standalone.log.error('Failed to remove password admin from config: ' + e.message['desc'])
+ topology_st.standalone.log.error('Failed to remove password admin from config: ' + e.message['desc'])
assert False
# Bind as Password Admin
try:
- topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
+ topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
except ldap.LDAPError as e:
- topology.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc'])
+ topology_st.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc'])
assert False
#
@@ -262,42 +218,42 @@ def test_ticket47900(topology):
failed_as_expected = False
entry.setValues('userpassword', passwd)
try:
- topology.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)])
+ topology_st.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)])
except ldap.LDAPError as e:
# We failed as expected
failed_as_expected = True
- topology.standalone.log.info('Password update failed as expected: password (%s) result (%s)'
- % (passwd, e.message['desc']))
+ topology_st.standalone.log.info('Password update failed as expected: password (%s) result (%s)'
+ % (passwd, e.message['desc']))
if not failed_as_expected:
- topology.standalone.log.error("We were incorrectly able to add an invalid password (%s)"
- % (passwd))
+ topology_st.standalone.log.error("We were incorrectly able to add an invalid password (%s)"
+ % (passwd))
assert False
#
# Now activate a password administator
#
- topology.standalone.log.info("Activate Password Administator and try updates again...")
+ topology_st.standalone.log.info("Activate Password Administator and try updates again...")
# Bind as root DN
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
- topology.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc'])
+ topology_st.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc'])
assert False
# Update config
try:
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_DN)])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_DN)])
except ldap.LDAPError as e:
- topology.standalone.log.error('Failed to add password admin to config: ' + e.message['desc'])
+ topology_st.standalone.log.error('Failed to add password admin to config: ' + e.message['desc'])
assert False
# Bind as Password Admin
try:
- topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
+ topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
except ldap.LDAPError as e:
- topology.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc'])
+ topology_st.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc'])
assert False
#
@@ -306,12 +262,12 @@ def test_ticket47900(topology):
for passwd in INVALID_PWDS:
entry.setValues('userpassword', passwd)
try:
- topology.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)])
+ topology_st.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)])
except ldap.LDAPError as e:
- topology.standalone.log.error('Password update failed unexpectedly: password (%s) result (%s)'
- % (passwd, e.message['desc']))
+ topology_st.standalone.log.error('Password update failed unexpectedly: password (%s) result (%s)'
+ % (passwd, e.message['desc']))
assert False
- topology.standalone.log.info('Password update succeeded (%s)' % passwd)
+ topology_st.standalone.log.info('Password update succeeded (%s)' % passwd)
if __name__ == '__main__':
diff --git a/dirsrvtests/tests/tickets/ticket47910_test.py b/dirsrvtests/tests/tickets/ticket47910_test.py
index b2986ea..b51118c 100644
--- a/dirsrvtests/tests/tickets/ticket47910_test.py
+++ b/dirsrvtests/tests/tickets/ticket47910_test.py
@@ -5,66 +5,20 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
import logging
-import pytest
-import re
import subprocess
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
from datetime import datetime, timedelta
+import pytest
+from lib389.tasks import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
@pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-(a)pytest.fixture(scope="module")
-def log_dir(topology):
+def log_dir(topology_st):
'''
Do a search operation
and disable access log buffering
@@ -72,15 +26,15 @@ def log_dir(topology):
'''
log.info("Diable access log buffering")
- topology.standalone.setAccessLogBuffering(False)
+ topology_st.standalone.setAccessLogBuffering(False)
log.info("Do a ldapsearch operation")
- topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)")
+ topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)")
log.info("sleep for sometime so that access log file get generated")
- time.sleep( 1 )
+ time.sleep(1)
- return topology.standalone.accesslog
+ return topology_st.standalone.accesslog
def format_time(local_datetime):
@@ -106,7 +60,7 @@ def execute_logconv(inst, start_time_stamp, end_time_stamp, access_log):
return proc.returncode
-def test_ticket47910_logconv_start_end_positive(topology, log_dir):
+def test_ticket47910_logconv_start_end_positive(topology_st, log_dir):
'''
Execute logconv.pl with -S and -E(endtime) with random time stamp
This is execute successfully
@@ -125,11 +79,11 @@ def test_ticket47910_logconv_start_end_positive(topology, log_dir):
formatted_end_time_stamp = format_time(end_time_stamp)
log.info("Executing logconv.pl with -S and -E")
- result = execute_logconv(topology.standalone, formatted_start_time_stamp, formatted_end_time_stamp, log_dir)
+ result = execute_logconv(topology_st.standalone, formatted_start_time_stamp, formatted_end_time_stamp, log_dir)
assert result == 0
-def test_ticket47910_logconv_start_end_negative(topology, log_dir):
+def test_ticket47910_logconv_start_end_negative(topology_st, log_dir):
'''
Execute logconv.pl with -S and -E(endtime) with random time stamp
This is a negative test case, where endtime will be lesser than the
@@ -151,11 +105,11 @@ def test_ticket47910_logconv_start_end_negative(topology, log_dir):
formatted_end_time_stamp = format_time(end_time_stamp)
log.info("Executing logconv.pl with -S and -E")
- result = execute_logconv(topology.standalone, formatted_start_time_stamp, formatted_end_time_stamp, log_dir)
+ result = execute_logconv(topology_st.standalone, formatted_start_time_stamp, formatted_end_time_stamp, log_dir)
assert result == 1
-def test_ticket47910_logconv_start_end_invalid(topology, log_dir):
+def test_ticket47910_logconv_start_end_invalid(topology_st, log_dir):
'''
Execute logconv.pl with -S and -E(endtime) with invalid time stamp
This is a negative test case, where it should give error message
@@ -169,12 +123,11 @@ def test_ticket47910_logconv_start_end_invalid(topology, log_dir):
end_time_stamp = "invalid"
log.info("Executing logconv.pl with -S and -E")
- result = execute_logconv(topology.standalone, start_time_stamp, end_time_stamp, log_dir)
+ result = execute_logconv(topology_st.standalone, start_time_stamp, end_time_stamp, log_dir)
assert result == 1
-def test_ticket47910_logconv_noaccesslogs(topology, log_dir):
-
+def test_ticket47910_logconv_noaccesslogs(topology_st, log_dir):
'''
Execute logconv.pl -S(starttime) without specify
access logs location
@@ -189,7 +142,7 @@ def test_ticket47910_logconv_noaccesslogs(topology, log_dir):
time_stamp = (datetime.now() - timedelta(minutes=2))
formatted_time_stamp = format_time(time_stamp)
log.info("Executing logconv.pl with -S current time")
- cmd = [os.path.join(topology.standalone.get_bin_dir(), 'logconv.pl'), '-S', formatted_time_stamp]
+ cmd = [os.path.join(topology_st.standalone.get_bin_dir(), 'logconv.pl'), '-S', formatted_time_stamp]
log.info(" ".join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
diff --git a/dirsrvtests/tests/tickets/ticket47920_test.py b/dirsrvtests/tests/tickets/ticket47920_test.py
index 301d6a6..cd4b7f4 100644
--- a/dirsrvtests/tests/tickets/ticket47920_test.py
+++ b/dirsrvtests/tests/tickets/ticket47920_test.py
@@ -6,33 +6,29 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
import logging
+
+import ldap
import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from ldap.controls.readentry import PostReadControl
+from lib389 import Entry
from lib389._constants import *
-from lib389.properties import *
-from ldap.controls.readentry import PreReadControl,PostReadControl
-
+from lib389.topologies import topology_st
-SCOPE_IN_CN = 'in'
+SCOPE_IN_CN = 'in'
SCOPE_OUT_CN = 'out'
-SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX)
+SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX)
SCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX)
PROVISIONING_CN = "provisioning"
PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SCOPE_IN_DN)
ACTIVE_CN = "accounts"
-STAGE_CN = "staged users"
+STAGE_CN = "staged users"
DELETE_CN = "deleted users"
ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SCOPE_IN_DN)
-STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN)
-DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN)
+STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN)
+DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN)
STAGE_USER_CN = "stage guy"
STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN)
@@ -60,103 +56,65 @@ FINAL_DESC = "final description"
log = logging.getLogger(__name__)
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-def _header(topology, label):
- topology.standalone.log.info("\n\n###############################################")
- topology.standalone.log.info("#######")
- topology.standalone.log.info("####### %s" % label)
- topology.standalone.log.info("#######")
- topology.standalone.log.info("###############################################")
+def _header(topology_st, label):
+ topology_st.standalone.log.info("\n\n###############################################")
+ topology_st.standalone.log.info("#######")
+ topology_st.standalone.log.info("####### %s" % label)
+ topology_st.standalone.log.info("#######")
+ topology_st.standalone.log.info("###############################################")
-def _add_user(topology, type='active'):
+def _add_user(topology_st, type='active'):
if type == 'active':
- topology.standalone.add_s(Entry((ACTIVE_USER_DN, {
- 'objectclass': "top person inetuser".split(),
- 'sn': ACTIVE_USER_CN,
- 'cn': ACTIVE_USER_CN,
- 'description': INITIAL_DESC})))
+ topology_st.standalone.add_s(Entry((ACTIVE_USER_DN, {
+ 'objectclass': "top person inetuser".split(),
+ 'sn': ACTIVE_USER_CN,
+ 'cn': ACTIVE_USER_CN,
+ 'description': INITIAL_DESC})))
elif type == 'stage':
- topology.standalone.add_s(Entry((STAGE_USER_DN, {
- 'objectclass': "top person inetuser".split(),
- 'sn': STAGE_USER_CN,
- 'cn': STAGE_USER_CN})))
+ topology_st.standalone.add_s(Entry((STAGE_USER_DN, {
+ 'objectclass': "top person inetuser".split(),
+ 'sn': STAGE_USER_CN,
+ 'cn': STAGE_USER_CN})))
else:
- topology.standalone.add_s(Entry((OUT_USER_DN, {
- 'objectclass': "top person inetuser".split(),
- 'sn': OUT_USER_CN,
- 'cn': OUT_USER_CN})))
+ topology_st.standalone.add_s(Entry((OUT_USER_DN, {
+ 'objectclass': "top person inetuser".split(),
+ 'sn': OUT_USER_CN,
+ 'cn': OUT_USER_CN})))
-def test_ticket47920_init(topology):
- topology.standalone.add_s(Entry((SCOPE_IN_DN, {
- 'objectclass': "top nscontainer".split(),
- 'cn': SCOPE_IN_DN})))
- topology.standalone.add_s(Entry((ACTIVE_DN, {
- 'objectclass': "top nscontainer".split(),
- 'cn': ACTIVE_CN})))
+def test_ticket47920_init(topology_st):
+ topology_st.standalone.add_s(Entry((SCOPE_IN_DN, {
+ 'objectclass': "top nscontainer".split(),
+ 'cn': SCOPE_IN_DN})))
+ topology_st.standalone.add_s(Entry((ACTIVE_DN, {
+ 'objectclass': "top nscontainer".split(),
+ 'cn': ACTIVE_CN})))
# add users
- _add_user(topology, 'active')
+ _add_user(topology_st, 'active')
-def test_ticket47920_mod_readentry_ctrl(topology):
- _header(topology, 'MOD: with a readentry control')
+def test_ticket47920_mod_readentry_ctrl(topology_st):
+ _header(topology_st, 'MOD: with a readentry control')
- topology.standalone.log.info("Check the initial value of the entry")
- ent = topology.standalone.getEntry(ACTIVE_USER_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
+ topology_st.standalone.log.info("Check the initial value of the entry")
+ ent = topology_st.standalone.getEntry(ACTIVE_USER_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
assert ent.hasAttr('description')
assert ent.getValue('description') == INITIAL_DESC
pr = PostReadControl(criticality=True, attrList=['cn', 'description'])
- _, _, _, resp_ctrls = topology.standalone.modify_ext_s(ACTIVE_USER_DN, [(ldap.MOD_REPLACE, 'description', [FINAL_DESC])], serverctrls=[pr])
+ _, _, _, resp_ctrls = topology_st.standalone.modify_ext_s(ACTIVE_USER_DN,
+ [(ldap.MOD_REPLACE, 'description', [FINAL_DESC])],
+ serverctrls=[pr])
assert resp_ctrls[0].dn == ACTIVE_USER_DN
assert 'description' in resp_ctrls[0].entry
assert 'cn' in resp_ctrls[0].entry
print(resp_ctrls[0].entry['description'])
- ent = topology.standalone.getEntry(ACTIVE_USER_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
+ ent = topology_st.standalone.getEntry(ACTIVE_USER_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
assert ent.hasAttr('description')
assert ent.getValue('description') == FINAL_DESC
diff --git a/dirsrvtests/tests/tickets/ticket47921_test.py b/dirsrvtests/tests/tickets/ticket47921_test.py
index e46e996..2d2d35d 100644
--- a/dirsrvtests/tests/tickets/ticket47921_test.py
+++ b/dirsrvtests/tests/tickets/ticket47921_test.py
@@ -6,59 +6,16 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_ticket47921(topology):
+def test_ticket47921(topology_st):
'''
Test that indirect cos reflects the current value of the indirect entry
'''
@@ -69,45 +26,46 @@ def test_ticket47921(topology):
# Add COS definition
try:
- topology.standalone.add_s(Entry((INDIRECT_COS_DN,
- {'objectclass': 'top cosSuperDefinition cosIndirectDefinition ldapSubEntry'.split(),
- 'cosIndirectSpecifier': 'manager',
- 'cosAttribute': 'roomnumber'
- })))
+ topology_st.standalone.add_s(Entry((INDIRECT_COS_DN,
+ {
+ 'objectclass': 'top cosSuperDefinition cosIndirectDefinition ldapSubEntry'.split(),
+ 'cosIndirectSpecifier': 'manager',
+ 'cosAttribute': 'roomnumber'
+ })))
except ldap.LDAPError as e:
log.fatal('Failed to add cos defintion, error: ' + e.message['desc'])
assert False
# Add manager entry
try:
- topology.standalone.add_s(Entry((MANAGER_DN,
- {'objectclass': 'top extensibleObject'.split(),
- 'uid': 'my manager',
- 'roomnumber': '1'
- })))
+ topology_st.standalone.add_s(Entry((MANAGER_DN,
+ {'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'my manager',
+ 'roomnumber': '1'
+ })))
except ldap.LDAPError as e:
log.fatal('Failed to add manager entry, error: ' + e.message['desc'])
assert False
# Add user entry
try:
- topology.standalone.add_s(Entry((USER_DN,
- {'objectclass': 'top person organizationalPerson inetorgperson'.split(),
- 'sn': 'last',
- 'cn': 'full',
- 'givenname': 'mark',
- 'uid': 'user',
- 'manager': MANAGER_DN
- })))
+ topology_st.standalone.add_s(Entry((USER_DN,
+ {'objectclass': 'top person organizationalPerson inetorgperson'.split(),
+ 'sn': 'last',
+ 'cn': 'full',
+ 'givenname': 'mark',
+ 'uid': 'user',
+ 'manager': MANAGER_DN
+ })))
except ldap.LDAPError as e:
log.fatal('Failed to add manager entry, error: ' + e.message['desc'])
assert False
# Test COS is working
try:
- entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
- "uid=user",
- ['roomnumber'])
+ entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
+ "uid=user",
+ ['roomnumber'])
if entry:
if entry[0].getValue('roomnumber') != '1':
log.fatal('COS is not working.')
@@ -121,16 +79,16 @@ def test_ticket47921(topology):
# Modify manager entry
try:
- topology.standalone.modify_s(MANAGER_DN, [(ldap.MOD_REPLACE, 'roomnumber', '2')])
+ topology_st.standalone.modify_s(MANAGER_DN, [(ldap.MOD_REPLACE, 'roomnumber', '2')])
except ldap.LDAPError as e:
log.error('Failed to modify manager entry: ' + e.message['desc'])
assert False
# Confirm COS is returning the new value
try:
- entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
- "uid=user",
- ['roomnumber'])
+ entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
+ "uid=user",
+ ['roomnumber'])
if entry:
if entry[0].getValue('roomnumber') != '2':
log.fatal('COS is not working after manager update.')
diff --git a/dirsrvtests/tests/tickets/ticket47927_test.py b/dirsrvtests/tests/tickets/ticket47927_test.py
index 7e19ae0..1cb20d2 100644
--- a/dirsrvtests/tests/tickets/ticket47927_test.py
+++ b/dirsrvtests/tests/tickets/ticket47927_test.py
@@ -6,24 +6,14 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
EXCLUDED_CONTAINER_CN = "excluded_container"
EXCLUDED_CONTAINER_DN = "cn=%s,%s" % (EXCLUDED_CONTAINER_CN, SUFFIX)
@@ -43,210 +33,183 @@ USER_4_CN = "test_4"
USER_4_DN = "cn=%s,%s" % (USER_4_CN, EXCLUDED_BIS_CONTAINER_DN)
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_ticket47927_init(topology):
- topology.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS)
+def test_ticket47927_init(topology_st):
+ topology_st.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS)
try:
- topology.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
- [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'telephonenumber'),
- (ldap.MOD_REPLACE, 'uniqueness-subtrees', DEFAULT_SUFFIX),
- ])
+ topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
+ [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'telephonenumber'),
+ (ldap.MOD_REPLACE, 'uniqueness-subtrees', DEFAULT_SUFFIX),
+ ])
except ldap.LDAPError as e:
log.fatal('test_ticket47927: Failed to configure plugin for "telephonenumber": error ' + e.message['desc'])
assert False
- topology.standalone.restart(timeout=120)
-
- topology.standalone.add_s(Entry((EXCLUDED_CONTAINER_DN, {'objectclass': "top nscontainer".split(),
- 'cn': EXCLUDED_CONTAINER_CN})))
- topology.standalone.add_s(Entry((EXCLUDED_BIS_CONTAINER_DN, {'objectclass': "top nscontainer".split(),
- 'cn': EXCLUDED_BIS_CONTAINER_CN})))
- topology.standalone.add_s(Entry((ENFORCED_CONTAINER_DN, {'objectclass': "top nscontainer".split(),
- 'cn': ENFORCED_CONTAINER_CN})))
-
- # adding an entry on a stage with a different 'cn'
- topology.standalone.add_s(Entry((USER_1_DN, {
- 'objectclass': "top person".split(),
- 'sn': USER_1_CN,
- 'cn': USER_1_CN})))
- # adding an entry on a stage with a different 'cn'
- topology.standalone.add_s(Entry((USER_2_DN, {
- 'objectclass': "top person".split(),
- 'sn': USER_2_CN,
- 'cn': USER_2_CN})))
- topology.standalone.add_s(Entry((USER_3_DN, {
- 'objectclass': "top person".split(),
- 'sn': USER_3_CN,
- 'cn': USER_3_CN})))
- topology.standalone.add_s(Entry((USER_4_DN, {
- 'objectclass': "top person".split(),
- 'sn': USER_4_CN,
- 'cn': USER_4_CN})))
-
-
-def test_ticket47927_one(topology):
+ topology_st.standalone.restart(timeout=120)
+
+ topology_st.standalone.add_s(Entry((EXCLUDED_CONTAINER_DN, {'objectclass': "top nscontainer".split(),
+ 'cn': EXCLUDED_CONTAINER_CN})))
+ topology_st.standalone.add_s(Entry((EXCLUDED_BIS_CONTAINER_DN, {'objectclass': "top nscontainer".split(),
+ 'cn': EXCLUDED_BIS_CONTAINER_CN})))
+ topology_st.standalone.add_s(Entry((ENFORCED_CONTAINER_DN, {'objectclass': "top nscontainer".split(),
+ 'cn': ENFORCED_CONTAINER_CN})))
+
+ # adding an entry on a stage with a different 'cn'
+ topology_st.standalone.add_s(Entry((USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': USER_1_CN,
+ 'cn': USER_1_CN})))
+ # adding an entry on a stage with a different 'cn'
+ topology_st.standalone.add_s(Entry((USER_2_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': USER_2_CN,
+ 'cn': USER_2_CN})))
+ topology_st.standalone.add_s(Entry((USER_3_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': USER_3_CN,
+ 'cn': USER_3_CN})))
+ topology_st.standalone.add_s(Entry((USER_4_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': USER_4_CN,
+ 'cn': USER_4_CN})))
+
+
+def test_ticket47927_one(topology_st):
'''
Check that uniqueness is enforce on all SUFFIX
'''
- UNIQUE_VALUE='1234'
+ UNIQUE_VALUE = '1234'
try:
- topology.standalone.modify_s(USER_1_DN,
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+ topology_st.standalone.modify_s(USER_1_DN,
+ [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
except ldap.LDAPError as e:
log.fatal('test_ticket47927_one: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.message['desc']))
assert False
# we expect to fail because user1 is in the scope of the plugin
try:
- topology.standalone.modify_s(USER_2_DN,
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+ topology_st.standalone.modify_s(USER_2_DN,
+ [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
log.fatal('test_ticket47927_one: unexpected success to set the telephonenumber for %s' % (USER_2_DN))
assert False
except ldap.LDAPError as e:
- log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % (USER_2_DN, e.message['desc']))
+ log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % (
+ USER_2_DN, e.message['desc']))
pass
-
# we expect to fail because user1 is in the scope of the plugin
try:
- topology.standalone.modify_s(USER_3_DN,
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+ topology_st.standalone.modify_s(USER_3_DN,
+ [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
log.fatal('test_ticket47927_one: unexpected success to set the telephonenumber for %s' % (USER_3_DN))
assert False
except ldap.LDAPError as e:
- log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % (USER_3_DN, e.message['desc']))
+ log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % (
+ USER_3_DN, e.message['desc']))
pass
-def test_ticket47927_two(topology):
+def test_ticket47927_two(topology_st):
'''
Exclude the EXCLUDED_CONTAINER_DN from the uniqueness plugin
'''
try:
- topology.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
- [(ldap.MOD_REPLACE, 'uniqueness-exclude-subtrees', EXCLUDED_CONTAINER_DN)])
+ topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
+ [(ldap.MOD_REPLACE, 'uniqueness-exclude-subtrees', EXCLUDED_CONTAINER_DN)])
except ldap.LDAPError as e:
- log.fatal('test_ticket47927_two: Failed to configure plugin for to exclude %s: error %s' % (EXCLUDED_CONTAINER_DN, e.message['desc']))
+ log.fatal('test_ticket47927_two: Failed to configure plugin for to exclude %s: error %s' % (
+ EXCLUDED_CONTAINER_DN, e.message['desc']))
assert False
- topology.standalone.restart(timeout=120)
+ topology_st.standalone.restart(timeout=120)
-def test_ticket47927_three(topology):
+def test_ticket47927_three(topology_st):
'''
Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN
First case: it exists an entry (with the same attribute value) in the scope
of the plugin and we set the value in an entry that is in an excluded scope
'''
- UNIQUE_VALUE='9876'
+ UNIQUE_VALUE = '9876'
try:
- topology.standalone.modify_s(USER_1_DN,
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+ topology_st.standalone.modify_s(USER_1_DN,
+ [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
except ldap.LDAPError as e:
log.fatal('test_ticket47927_three: Failed to set the telephonenumber ' + e.message['desc'])
assert False
# we should not be allowed to set this value (because user1 is in the scope)
try:
- topology.standalone.modify_s(USER_2_DN,
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+ topology_st.standalone.modify_s(USER_2_DN,
+ [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
log.fatal('test_ticket47927_three: unexpected success to set the telephonenumber for %s' % (USER_2_DN))
assert False
except ldap.LDAPError as e:
- log.fatal('test_ticket47927_three: Failed (expected) to set the telephonenumber for %s: %s' % (USER_2_DN , e.message['desc']))
-
+ log.fatal('test_ticket47927_three: Failed (expected) to set the telephonenumber for %s: %s' % (
+ USER_2_DN, e.message['desc']))
# USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful
try:
- topology.standalone.modify_s(USER_3_DN,
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+ topology_st.standalone.modify_s(USER_3_DN,
+ [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
log.fatal('test_ticket47927_three: success to set the telephonenumber for %s' % (USER_3_DN))
except ldap.LDAPError as e:
- log.fatal('test_ticket47927_three: Failed (unexpected) to set the telephonenumber for %s: %s' % (USER_3_DN, e.message['desc']))
+ log.fatal('test_ticket47927_three: Failed (unexpected) to set the telephonenumber for %s: %s' % (
+ USER_3_DN, e.message['desc']))
assert False
-def test_ticket47927_four(topology):
+def test_ticket47927_four(topology_st):
'''
Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN
Second case: it exists an entry (with the same attribute value) in an excluded scope
of the plugin and we set the value in an entry is in the scope
'''
- UNIQUE_VALUE='1111'
+ UNIQUE_VALUE = '1111'
# USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful
try:
- topology.standalone.modify_s(USER_3_DN,
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+ topology_st.standalone.modify_s(USER_3_DN,
+ [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
log.fatal('test_ticket47927_four: success to set the telephonenumber for %s' % USER_3_DN)
except ldap.LDAPError as e:
- log.fatal('test_ticket47927_four: Failed (unexpected) to set the telephonenumber for %s: %s' % (USER_3_DN, e.message['desc']))
+ log.fatal('test_ticket47927_four: Failed (unexpected) to set the telephonenumber for %s: %s' % (
+ USER_3_DN, e.message['desc']))
assert False
-
# we should be allowed to set this value (because user3 is excluded from scope)
try:
- topology.standalone.modify_s(USER_1_DN,
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+ topology_st.standalone.modify_s(USER_1_DN,
+ [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
except ldap.LDAPError as e:
- log.fatal('test_ticket47927_four: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.message['desc']))
+ log.fatal(
+ 'test_ticket47927_four: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.message['desc']))
assert False
# we should not be allowed to set this value (because user1 is in the scope)
try:
- topology.standalone.modify_s(USER_2_DN,
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+ topology_st.standalone.modify_s(USER_2_DN,
+ [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
log.fatal('test_ticket47927_four: unexpected success to set the telephonenumber %s' % USER_2_DN)
assert False
except ldap.LDAPError as e:
- log.fatal('test_ticket47927_four: Failed (expected) to set the telephonenumber for %s: %s' % (USER_2_DN, e.message['desc']))
+ log.fatal('test_ticket47927_four: Failed (expected) to set the telephonenumber for %s: %s' % (
+ USER_2_DN, e.message['desc']))
pass
-def test_ticket47927_five(topology):
+def test_ticket47927_five(topology_st):
'''
Exclude the EXCLUDED_BIS_CONTAINER_DN from the uniqueness plugin
'''
try:
- topology.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
- [(ldap.MOD_ADD, 'uniqueness-exclude-subtrees', EXCLUDED_BIS_CONTAINER_DN)])
+ topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
+ [(ldap.MOD_ADD, 'uniqueness-exclude-subtrees', EXCLUDED_BIS_CONTAINER_DN)])
except ldap.LDAPError as e:
- log.fatal('test_ticket47927_five: Failed to configure plugin for to exclude %s: error %s' % (EXCLUDED_BIS_CONTAINER_DN, e.message['desc']))
+ log.fatal('test_ticket47927_five: Failed to configure plugin for to exclude %s: error %s' % (
+ EXCLUDED_BIS_CONTAINER_DN, e.message['desc']))
assert False
- topology.standalone.restart(timeout=120)
- topology.standalone.getEntry('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', ldap.SCOPE_BASE)
+ topology_st.standalone.restart(timeout=120)
+ topology_st.standalone.getEntry('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', ldap.SCOPE_BASE)
-def test_ticket47927_six(topology):
+def test_ticket47927_six(topology_st):
'''
Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN
and EXCLUDED_BIS_CONTAINER_DN
@@ -255,37 +218,39 @@ def test_ticket47927_six(topology):
'''
UNIQUE_VALUE = '222'
try:
- topology.standalone.modify_s(USER_1_DN,
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+ topology_st.standalone.modify_s(USER_1_DN,
+ [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
except ldap.LDAPError as e:
log.fatal('test_ticket47927_six: Failed to set the telephonenumber ' + e.message['desc'])
assert False
# we should not be allowed to set this value (because user1 is in the scope)
try:
- topology.standalone.modify_s(USER_2_DN,
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+ topology_st.standalone.modify_s(USER_2_DN,
+ [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
log.fatal('test_ticket47927_six: unexpected success to set the telephonenumber for %s' % (USER_2_DN))
assert False
except ldap.LDAPError as e:
- log.fatal('test_ticket47927_six: Failed (expected) to set the telephonenumber for %s: %s' % (USER_2_DN , e.message['desc']))
-
+ log.fatal('test_ticket47927_six: Failed (expected) to set the telephonenumber for %s: %s' % (
+ USER_2_DN, e.message['desc']))
# USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful
try:
- topology.standalone.modify_s(USER_3_DN,
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+ topology_st.standalone.modify_s(USER_3_DN,
+ [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
log.fatal('test_ticket47927_six: success to set the telephonenumber for %s' % (USER_3_DN))
except ldap.LDAPError as e:
- log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % (USER_3_DN, e.message['desc']))
+ log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % (
+ USER_3_DN, e.message['desc']))
assert False
# USER_4_DN is in EXCLUDED_CONTAINER_DN so update should be successful
try:
- topology.standalone.modify_s(USER_4_DN,
- [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+ topology_st.standalone.modify_s(USER_4_DN,
+ [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
log.fatal('test_ticket47927_six: success to set the telephonenumber for %s' % (USER_4_DN))
except ldap.LDAPError as e:
- log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % (USER_4_DN, e.message['desc']))
+ log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % (
+ USER_4_DN, e.message['desc']))
assert False
diff --git a/dirsrvtests/tests/tickets/ticket47931_test.py b/dirsrvtests/tests/tickets/ticket47931_test.py
index 9aa54fc..7e4964a 100644
--- a/dirsrvtests/tests/tickets/ticket47931_test.py
+++ b/dirsrvtests/tests/tickets/ticket47931_test.py
@@ -1,16 +1,9 @@
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
import threading
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
+
+import pytest
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
@@ -25,12 +18,6 @@ MEMBER_DN_COMP = "uid=member"
TIME_OUT = 5
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
class modifySecondBackendThread(threading.Thread):
def __init__(self, inst, timeout):
threading.Thread.__init__(self)
@@ -57,38 +44,7 @@ class modifySecondBackendThread(threading.Thread):
log.info('Finished modifying second suffix')
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-
-def test_ticket47931(topology):
+def test_ticket47931(topology_st):
"""Test Retro Changelog and MemberOf deadlock fix.
Verification steps:
- Enable retro cl and memberOf.
@@ -105,56 +61,56 @@ def test_ticket47931(topology):
# Enable dynamic plugins to make plugin configuration easier
try:
- topology.standalone.modify_s(DN_CONFIG,
- [(ldap.MOD_REPLACE,
- 'nsslapd-dynamic-plugins',
- 'on')])
+ topology_st.standalone.modify_s(DN_CONFIG,
+ [(ldap.MOD_REPLACE,
+ 'nsslapd-dynamic-plugins',
+ 'on')])
except ldap.LDAPError as e:
ldap.error('Failed to enable dynamic plugins! ' + e.message['desc'])
assert False
# Enable the plugins
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
- topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
+ topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
# Create second backend
- topology.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: SECOND_BACKEND})
- topology.standalone.mappingtree.create(SECOND_SUFFIX, bename=SECOND_BACKEND)
+ topology_st.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: SECOND_BACKEND})
+ topology_st.standalone.mappingtree.create(SECOND_SUFFIX, bename=SECOND_BACKEND)
# Create the root node of the second backend
try:
- topology.standalone.add_s(Entry((SECOND_SUFFIX,
- {'objectclass': 'top domain'.split(),
- 'dc': 'deadlock'})))
+ topology_st.standalone.add_s(Entry((SECOND_SUFFIX,
+ {'objectclass': 'top domain'.split(),
+ 'dc': 'deadlock'})))
except ldap.LDAPError as e:
log.fatal('Failed to create suffix entry: error ' + e.message['desc'])
assert False
# Configure retrocl scope
try:
- topology.standalone.modify_s(RETROCL_PLUGIN_DN,
- [(ldap.MOD_REPLACE,
- 'nsslapd-include-suffix',
- DEFAULT_SUFFIX)])
+ topology_st.standalone.modify_s(RETROCL_PLUGIN_DN,
+ [(ldap.MOD_REPLACE,
+ 'nsslapd-include-suffix',
+ DEFAULT_SUFFIX)])
except ldap.LDAPError as e:
ldap.error('Failed to configure retrocl plugin: ' + e.message['desc'])
assert False
# Configure memberOf group attribute
try:
- topology.standalone.modify_s(MEMBEROF_PLUGIN_DN,
- [(ldap.MOD_REPLACE,
- 'memberofgroupattr',
- 'uniquemember')])
+ topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN,
+ [(ldap.MOD_REPLACE,
+ 'memberofgroupattr',
+ 'uniquemember')])
except ldap.LDAPError as e:
log.fatal('Failed to configure memberOf plugin: error ' + e.message['desc'])
assert False
# Create group
try:
- topology.standalone.add_s(Entry((GROUP_DN,
- {'objectclass': 'top extensibleObject'.split(),
- 'cn': 'group'})))
+ topology_st.standalone.add_s(Entry((GROUP_DN,
+ {'objectclass': 'top extensibleObject'.split(),
+ 'cn': 'group'})))
except ldap.LDAPError as e:
log.fatal('Failed to add grouo: error ' + e.message['desc'])
assert False
@@ -163,27 +119,27 @@ def test_ticket47931(topology):
for idx in range(1, 1500):
try:
USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX))
- topology.standalone.add_s(Entry((USER_DN,
- {'objectclass': 'top extensibleObject'.split(),
- 'uid': 'member%d' % (x)})))
+ topology_st.standalone.add_s(Entry((USER_DN,
+ {'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'member%d' % (x)})))
except ldap.LDAPError as e:
log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.message['desc']))
assert False
# Modify second backend (separate thread)
- mod_backend_thrd = modifySecondBackendThread(topology.standalone, TIME_OUT)
+ mod_backend_thrd = modifySecondBackendThread(topology_st.standalone, TIME_OUT)
mod_backend_thrd.start()
# Add members to the group - set timeout
log.info('Adding members to the group...')
- topology.standalone.set_option(ldap.OPT_TIMEOUT, TIME_OUT)
+ topology_st.standalone.set_option(ldap.OPT_TIMEOUT, TIME_OUT)
for idx in range(1, 1500):
try:
MEMBER_VAL = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX))
- topology.standalone.modify_s(GROUP_DN,
- [(ldap.MOD_ADD,
- 'uniquemember',
- MEMBER_VAL)])
+ topology_st.standalone.modify_s(GROUP_DN,
+ [(ldap.MOD_ADD,
+ 'uniquemember',
+ MEMBER_VAL)])
except ldap.TIMEOUT:
log.fatal('Deadlock! Bug verification failed.')
assert False
@@ -204,4 +160,4 @@ if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
\ No newline at end of file
+ pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/tickets/ticket47937_test.py b/dirsrvtests/tests/tickets/ticket47937_test.py
index 71a6ef1..5af6eec 100644
--- a/dirsrvtests/tests/tickets/ticket47937_test.py
+++ b/dirsrvtests/tests/tickets/ticket47937_test.py
@@ -6,72 +6,29 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
+import logging
import time
+
import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def test_ticket47937(topology):
+def test_ticket47937(topology_st):
"""
Test that DNA plugin only accepts valid attributes for "dnaType"
"""
log.info("Creating \"ou=people\"...")
try:
- topology.standalone.add_s(Entry(('ou=people,' + SUFFIX, {
- 'objectclass': 'top organizationalunit'.split(),
- 'ou': 'people'
- })))
+ topology_st.standalone.add_s(Entry(('ou=people,' + SUFFIX, {
+ 'objectclass': 'top organizationalunit'.split(),
+ 'ou': 'people'
+ })))
except ldap.ALREADY_EXISTS:
pass
@@ -81,10 +38,10 @@ def test_ticket47937(topology):
log.info("Creating \"ou=ranges\"...")
try:
- topology.standalone.add_s(Entry(('ou=ranges,' + SUFFIX, {
- 'objectclass': 'top organizationalunit'.split(),
- 'ou': 'ranges'
- })))
+ topology_st.standalone.add_s(Entry(('ou=ranges,' + SUFFIX, {
+ 'objectclass': 'top organizationalunit'.split(),
+ 'ou': 'ranges'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add ou=ranges org unit: error ' + e.message['desc'])
@@ -92,10 +49,10 @@ def test_ticket47937(topology):
log.info("Creating \"cn=entry\"...")
try:
- topology.standalone.add_s(Entry(('cn=entry,ou=people,' + SUFFIX, {
- 'objectclass': 'top groupofuniquenames'.split(),
- 'cn': 'entry'
- })))
+ topology_st.standalone.add_s(Entry(('cn=entry,ou=people,' + SUFFIX, {
+ 'objectclass': 'top groupofuniquenames'.split(),
+ 'cn': 'entry'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add test entry: error ' + e.message['desc'])
@@ -103,13 +60,13 @@ def test_ticket47937(topology):
log.info("Creating DNA shared config entry...")
try:
- topology.standalone.add_s(Entry(('dnaHostname=localhost.localdomain+dnaPortNum=389,ou=ranges,%s' % SUFFIX, {
- 'objectclass': 'top dnaSharedConfig'.split(),
- 'dnaHostname': 'localhost.localdomain',
- 'dnaPortNum': '389',
- 'dnaSecurePortNum': '636',
- 'dnaRemainingValues': '9501'
- })))
+ topology_st.standalone.add_s(Entry(('dnaHostname=localhost.localdomain+dnaPortNum=389,ou=ranges,%s' % SUFFIX, {
+ 'objectclass': 'top dnaSharedConfig'.split(),
+ 'dnaHostname': 'localhost.localdomain',
+ 'dnaPortNum': '389',
+ 'dnaSecurePortNum': '636',
+ 'dnaRemainingValues': '9501'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add shared config entry: error ' + e.message['desc'])
@@ -117,16 +74,17 @@ def test_ticket47937(topology):
log.info("Add dna plugin config entry...")
try:
- topology.standalone.add_s(Entry(('cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config', {
- 'objectclass': 'top dnaPluginConfig'.split(),
- 'dnaType': 'description',
- 'dnaMaxValue': '10000',
- 'dnaMagicRegen': '0',
- 'dnaFilter': '(objectclass=top)',
- 'dnaScope': 'ou=people,%s' % SUFFIX,
- 'dnaNextValue': '500',
- 'dnaSharedCfgDN': 'ou=ranges,%s' % SUFFIX
- })))
+ topology_st.standalone.add_s(
+ Entry(('cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config', {
+ 'objectclass': 'top dnaPluginConfig'.split(),
+ 'dnaType': 'description',
+ 'dnaMaxValue': '10000',
+ 'dnaMagicRegen': '0',
+ 'dnaFilter': '(objectclass=top)',
+ 'dnaScope': 'ou=people,%s' % SUFFIX,
+ 'dnaNextValue': '500',
+ 'dnaSharedCfgDN': 'ou=ranges,%s' % SUFFIX
+ })))
except ldap.LDAPError as e:
log.error('Failed to add DNA config entry: error ' + e.message['desc'])
@@ -134,22 +92,22 @@ def test_ticket47937(topology):
log.info("Enable the DNA plugin...")
try:
- topology.standalone.plugins.enable(name=PLUGIN_DNA)
+ topology_st.standalone.plugins.enable(name=PLUGIN_DNA)
except e:
log.error("Failed to enable DNA Plugin: error " + e.message['desc'])
assert False
log.info("Restarting the server...")
- topology.standalone.stop(timeout=120)
+ topology_st.standalone.stop(timeout=120)
time.sleep(1)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
time.sleep(3)
log.info("Apply an invalid attribute to the DNA config(dnaType: foo)...")
try:
- topology.standalone.modify_s('cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config',
- [(ldap.MOD_REPLACE, 'dnaType', 'foo')])
+ topology_st.standalone.modify_s('cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config',
+ [(ldap.MOD_REPLACE, 'dnaType', 'foo')])
except ldap.LDAPError as e:
log.info('Operation failed as expected (error: %s)' % e.message['desc'])
else:
diff --git a/dirsrvtests/tests/tickets/ticket47950_test.py b/dirsrvtests/tests/tickets/ticket47950_test.py
index fc3975b..d9bfec3 100644
--- a/dirsrvtests/tests/tickets/ticket47950_test.py
+++ b/dirsrvtests/tests/tickets/ticket47950_test.py
@@ -6,17 +6,11 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
import logging
+
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
@@ -24,48 +18,7 @@ USER1_DN = "uid=user1,%s" % DEFAULT_SUFFIX
USER2_DN = "uid=user2,%s" % DEFAULT_SUFFIX
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def test_ticket47950(topology):
+def test_ticket47950(topology_st):
"""
Testing nsslapd-plugin-binddn-tracking does not cause issues around
access control and reconfiguring replication/repl agmt.
@@ -77,7 +30,7 @@ def test_ticket47950(topology):
# Turn on bind dn tracking
#
try:
- topology.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'nsslapd-plugin-binddn-tracking', 'on')])
+ topology_st.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'nsslapd-plugin-binddn-tracking', 'on')])
log.info('nsslapd-plugin-binddn-tracking enabled.')
except ldap.LDAPError as e:
log.error('Failed to enable bind dn tracking: ' + e.message['desc'])
@@ -87,21 +40,21 @@ def test_ticket47950(topology):
# Add two users
#
try:
- topology.standalone.add_s(Entry((USER1_DN, {
- 'objectclass': "top person inetuser".split(),
- 'userpassword': "password",
- 'sn': "1",
- 'cn': "user 1"})))
+ topology_st.standalone.add_s(Entry((USER1_DN, {
+ 'objectclass': "top person inetuser".split(),
+ 'userpassword': "password",
+ 'sn': "1",
+ 'cn': "user 1"})))
log.info('Added test user %s' % USER1_DN)
except ldap.LDAPError as e:
log.error('Failed to add %s: %s' % (USER1_DN, e.message['desc']))
assert False
try:
- topology.standalone.add_s(Entry((USER2_DN, {
- 'objectclass': "top person inetuser".split(),
- 'sn': "2",
- 'cn': "user 2"})))
+ topology_st.standalone.add_s(Entry((USER2_DN, {
+ 'objectclass': "top person inetuser".split(),
+ 'sn': "2",
+ 'cn': "user 2"})))
log.info('Added test user %s' % USER2_DN)
except ldap.LDAPError as e:
log.error('Failed to add user1: ' + e.message['desc'])
@@ -112,9 +65,9 @@ def test_ticket47950(topology):
#
try:
acival = '(targetattr ="cn")(version 3.0;acl "Test bind dn tracking"' + \
- ';allow (all) (userdn = "ldap:///%s");)' % USER1_DN
+ ';allow (all) (userdn = "ldap:///%s");)' % USER1_DN
- topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', acival)])
+ topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', acival)])
log.info('Added aci')
except ldap.LDAPError as e:
log.error('Failed to add aci: ' + e.message['desc'])
@@ -124,14 +77,14 @@ def test_ticket47950(topology):
# Make modification as user
#
try:
- topology.standalone.simple_bind_s(USER1_DN, "password")
+ topology_st.standalone.simple_bind_s(USER1_DN, "password")
log.info('Bind as user %s successful' % USER1_DN)
except ldap.LDAPError as e:
log.error('Failed to bind as user1: ' + e.message['desc'])
assert False
try:
- topology.standalone.modify_s(USER2_DN, [(ldap.MOD_REPLACE, 'cn', 'new value')])
+ topology_st.standalone.modify_s(USER2_DN, [(ldap.MOD_REPLACE, 'cn', 'new value')])
log.info('%s successfully modified user %s' % (USER1_DN, USER2_DN))
except ldap.LDAPError as e:
log.error('Failed to update user2: ' + e.message['desc'])
@@ -141,15 +94,15 @@ def test_ticket47950(topology):
# Setup replica and create a repl agmt
#
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
log.info('Bind as %s successful' % DN_DM)
except ldap.LDAPError as e:
log.error('Failed to bind as rootDN: ' + e.message['desc'])
assert False
try:
- topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER,
- replicaId=REPLICAID_MASTER_1)
+ topology_st.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER,
+ replicaId=REPLICAID_MASTER_1)
log.info('Successfully enabled replication.')
except ValueError:
log.error('Failed to enable replication')
@@ -162,8 +115,8 @@ def test_ticket47950(topology):
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
try:
- repl_agreement = topology.standalone.agreement.create(suffix=DEFAULT_SUFFIX, host="127.0.0.1",
- port="7777", properties=properties)
+ repl_agreement = topology_st.standalone.agreement.create(suffix=DEFAULT_SUFFIX, host="127.0.0.1",
+ port="7777", properties=properties)
log.info('Successfully created replication agreement')
except InvalidArgumentError as e:
log.error('Failed to create replication agreement: ' + e.message['desc'])
@@ -174,7 +127,7 @@ def test_ticket47950(topology):
#
try:
properties = {REPLICA_ID: "7"}
- topology.standalone.replica.setProperties(DEFAULT_SUFFIX, None, None, properties)
+ topology_st.standalone.replica.setProperties(DEFAULT_SUFFIX, None, None, properties)
log.info('Successfully modified replica')
except ldap.LDAPError as e:
log.error('Failed to update replica config: ' + e.message['desc'])
@@ -185,7 +138,7 @@ def test_ticket47950(topology):
#
try:
properties = {RA_CONSUMER_PORT: "8888"}
- topology.standalone.agreement.setProperties(None, repl_agreement, None, properties)
+ topology_st.standalone.agreement.setProperties(None, repl_agreement, None, properties)
log.info('Successfully modified replication agreement')
except ValueError:
log.error('Failed to update replica agreement: ' + repl_agreement)
diff --git a/dirsrvtests/tests/tickets/ticket47953_test.py b/dirsrvtests/tests/tickets/ticket47953_test.py
index 69c57e2..5e90df0 100644
--- a/dirsrvtests/tests/tickets/ticket47953_test.py
+++ b/dirsrvtests/tests/tickets/ticket47953_test.py
@@ -6,63 +6,17 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
import logging
-import pytest
import shutil
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
+
+import pytest
from lib389.tasks import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- #request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-def test_ticket47953(topology):
+def test_ticket47953(topology_st):
"""
Test that we can delete an aci that has an invalid syntax.
Sart by importing an ldif with a "bad" aci, then simply try
@@ -74,16 +28,16 @@ def test_ticket47953(topology):
#
# Import an invalid ldif
#
- ldif_file = (topology.standalone.getDir(__file__, DATA_DIR) +
+ ldif_file = (topology_st.standalone.getDir(__file__, DATA_DIR) +
"ticket47953/ticket47953.ldif")
try:
- ldif_dir = topology.standalone.get_ldif_dir()
+ ldif_dir = topology_st.standalone.get_ldif_dir()
shutil.copy(ldif_file, ldif_dir)
ldif_file = ldif_dir + '/ticket47953.ldif'
except:
log.fatal('Failed to copy ldif to instance ldif dir')
assert False
- importTask = Tasks(topology.standalone)
+ importTask = Tasks(topology_st.standalone)
args = {TASK_WAIT: True}
try:
importTask.importLDIF(DEFAULT_SUFFIX, None, ldif_file, args)
@@ -100,7 +54,7 @@ def test_ticket47953(topology):
log.info('Attempting to remove invalid aci...')
try:
- topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', acival)])
+ topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', acival)])
log.info('Removed invalid aci.')
except ldap.LDAPError as e:
log.error('Failed to remove invalid aci: ' + e.message['desc'])
diff --git a/dirsrvtests/tests/tickets/ticket47963_test.py b/dirsrvtests/tests/tickets/ticket47963_test.py
index 0200198..4a4fc39 100644
--- a/dirsrvtests/tests/tickets/ticket47963_test.py
+++ b/dirsrvtests/tests/tickets/ticket47963_test.py
@@ -6,58 +6,17 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
import logging
+
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-def test_ticket47963(topology):
+def test_ticket47963(topology_st):
'''
Test that the memberOf plugin works correctly after setting:
@@ -73,53 +32,53 @@ def test_ticket47963(topology):
#
# Enable the plugin and configure the skiop nest attribute, then restart the server
#
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
try:
- topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofskipnested', 'on')])
+ topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofskipnested', 'on')])
except ldap.LDAPError as e:
log.error('test_automember: Failed to modify config entry: error ' + e.message['desc'])
assert False
- topology.standalone.restart(timeout=10)
+ topology_st.standalone.restart(timeout=10)
#
# Add our groups, users, memberships, etc
#
try:
- topology.standalone.add_s(Entry((USER_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'test_user'
- })))
+ topology_st.standalone.add_s(Entry((USER_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'test_user'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add teset user: error ' + e.message['desc'])
assert False
try:
- topology.standalone.add_s(Entry((GROUP_DN1, {
- 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
- 'cn': 'group1',
- 'member': USER_DN
- })))
+ topology_st.standalone.add_s(Entry((GROUP_DN1, {
+ 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
+ 'cn': 'group1',
+ 'member': USER_DN
+ })))
except ldap.LDAPError as e:
log.error('Failed to add group1: error ' + e.message['desc'])
assert False
try:
- topology.standalone.add_s(Entry((GROUP_DN2, {
- 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
- 'cn': 'group2',
- 'member': USER_DN
- })))
+ topology_st.standalone.add_s(Entry((GROUP_DN2, {
+ 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
+ 'cn': 'group2',
+ 'member': USER_DN
+ })))
except ldap.LDAPError as e:
log.error('Failed to add group2: error ' + e.message['desc'])
assert False
# Add group with no member(yet)
try:
- topology.standalone.add_s(Entry((GROUP_DN3, {
- 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
- 'cn': 'group'
- })))
+ topology_st.standalone.add_s(Entry((GROUP_DN3, {
+ 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
+ 'cn': 'group'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add group3: error ' + e.message['desc'])
assert False
@@ -130,7 +89,7 @@ def test_ticket47963(topology):
#
try:
member_filter = ('(&(memberOf=' + GROUP_DN1 + ')(memberOf=' + GROUP_DN2 + '))')
- entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter)
+ entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter)
if not entries:
log.fatal('User is missing expected memberOf attrs')
assert False
@@ -140,7 +99,7 @@ def test_ticket47963(topology):
# Add the user to the group
try:
- topology.standalone.modify_s(GROUP_DN3, [(ldap.MOD_ADD, 'member', USER_DN)])
+ topology_st.standalone.modify_s(GROUP_DN3, [(ldap.MOD_ADD, 'member', USER_DN)])
except ldap.LDAPError as e:
log.error('Failed to member to group: error ' + e.message['desc'])
assert False
@@ -149,8 +108,8 @@ def test_ticket47963(topology):
# Check that the test user is a "memberOf" all three groups
try:
member_filter = ('(&(memberOf=' + GROUP_DN1 + ')(memberOf=' + GROUP_DN2 +
- ')(memberOf=' + GROUP_DN3 + '))')
- entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter)
+ ')(memberOf=' + GROUP_DN3 + '))')
+ entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter)
if not entries:
log.fatal('User is missing expected memberOf attrs')
assert False
@@ -162,7 +121,7 @@ def test_ticket47963(topology):
# Delete group2, and check memberOf values in the user entry
#
try:
- topology.standalone.delete_s(GROUP_DN2)
+ topology_st.standalone.delete_s(GROUP_DN2)
except ldap.LDAPError as e:
log.error('Failed to delete test group2: ' + e.message['desc'])
assert False
@@ -170,7 +129,7 @@ def test_ticket47963(topology):
try:
member_filter = ('(&(memberOf=' + GROUP_DN1 + ')(memberOf=' + GROUP_DN3 + '))')
- entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter)
+ entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter)
if not entries:
log.fatal('User incorrect memberOf attrs')
assert False
diff --git a/dirsrvtests/tests/tickets/ticket47966_test.py b/dirsrvtests/tests/tickets/ticket47966_test.py
index 4748c12..0152de0 100644
--- a/dirsrvtests/tests/tickets/ticket47966_test.py
+++ b/dirsrvtests/tests/tickets/ticket47966_test.py
@@ -6,128 +6,24 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_m2
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-m1_m2_agmt = ""
-
-
-class TopologyReplication(object):
- def __init__(self, master1, master2):
- master1.open()
- self.master1 = master1
- master2.open()
- self.master2 = master2
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating master 1...
- master1 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master1.allocate(args_master)
- instance_master1 = master1.exists()
- if instance_master1:
- master1.delete()
- master1.create()
- master1.open()
- master1.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-
- # Creating master 2...
- master2 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master2.allocate(args_master)
- instance_master2 = master2.exists()
- if instance_master2:
- master2.delete()
- master2.create()
- master2.open()
- master2.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
- #
- # Create all the agreements
- #
- # Creating agreement from master 1 to master 2
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- global m1_m2_agmt
- m1_m2_agmt = master1.agreement.create(suffix=DEFAULT_SUFFIX, host=master2.host, port=master2.port, properties=properties)
- if not m1_m2_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m1_m2_agmt)
-
- # Creating agreement from master 2 to master 1
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m2_m1_agmt = master2.agreement.create(suffix=DEFAULT_SUFFIX, host=master1.host, port=master1.port, properties=properties)
- if not m2_m1_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m2_m1_agmt)
-
- # Allow the replicas to get situated with the new agreements...
- time.sleep(5)
-
- #
- # Initialize all the agreements
- #
- master1.agreement.init(DEFAULT_SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- master1.waitForReplInit(m1_m2_agmt)
-
- # Check replication is working...
- if master1.testReplication(DEFAULT_SUFFIX, master2):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- def fin():
- master1.delete()
- master2.delete()
- request.addfinalizer(fin)
-
- return TopologyReplication(master1, master2)
-def test_ticket47966(topology):
+def test_ticket47966(topology_m2):
'''
Testing bulk import when the backend with VLV was recreated.
If the test passes without the server crash, 47966 is verified.
'''
log.info('Testing Ticket 47966 - [VLV] slapd crashes during Dogtag clone reinstallation')
- M1 = topology.master1
- M2 = topology.master2
+ M1 = topology_m2.ms["master1"]
+ M2 = topology_m2.ms["master2"]
+ m1_m2_agmt = topology_m2.ms["master1_agmts"]["m1_m2"]
log.info('0. Create a VLV index on Master 2.')
# get the backend entry
diff --git a/dirsrvtests/tests/tickets/ticket47970_test.py b/dirsrvtests/tests/tickets/ticket47970_test.py
index 5eb426d..86d67e3 100644
--- a/dirsrvtests/tests/tickets/ticket47970_test.py
+++ b/dirsrvtests/tests/tickets/ticket47970_test.py
@@ -6,18 +6,12 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import ldap.sasl
import logging
+
+import ldap.sasl
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
@@ -25,48 +19,7 @@ USER1_DN = "uid=user1,%s" % DEFAULT_SUFFIX
USER2_DN = "uid=user2,%s" % DEFAULT_SUFFIX
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def test_ticket47970(topology):
+def test_ticket47970(topology_st):
"""
Testing that a failed SASL bind does not trigger account lockout -
which would attempt to update the passwordRetryCount on the root dse entry
@@ -78,14 +31,14 @@ def test_ticket47970(topology):
# Enable account lockout
#
try:
- topology.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordLockout', 'on')])
+ topology_st.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordLockout', 'on')])
log.info('account lockout enabled.')
except ldap.LDAPError as e:
log.error('Failed to enable account lockout: ' + e.message['desc'])
assert False
try:
- topology.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordMaxFailure', '5')])
+ topology_st.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordMaxFailure', '5')])
log.info('passwordMaxFailure set.')
except ldap.LDAPError as e:
log.error('Failed to to set passwordMaxFailure: ' + e.message['desc'])
@@ -99,7 +52,7 @@ def test_ticket47970(topology):
user_name = "mark"
pw = "secret"
auth_tokens = ldap.sasl.digest_md5(user_name, pw)
- topology.standalone.sasl_interactive_bind_s("", auth_tokens)
+ topology_st.standalone.sasl_interactive_bind_s("", auth_tokens)
except ldap.INVALID_CREDENTIALS as e:
log.info("SASL Bind failed as expected")
failed_as_expected = True
@@ -112,9 +65,9 @@ def test_ticket47970(topology):
# Check that passwordRetryCount was not set on the root dse entry
#
try:
- entry = topology.standalone.search_s("", ldap.SCOPE_BASE,
- "passwordRetryCount=*",
- ['passwordRetryCount'])
+ entry = topology_st.standalone.search_s("", ldap.SCOPE_BASE,
+ "passwordRetryCount=*",
+ ['passwordRetryCount'])
except ldap.LDAPError as e:
log.error('Failed to search Root DSE entry: ' + e.message['desc'])
assert False
diff --git a/dirsrvtests/tests/tickets/ticket47973_test.py b/dirsrvtests/tests/tickets/ticket47973_test.py
index 9116246..c333ddb 100644
--- a/dirsrvtests/tests/tickets/ticket47973_test.py
+++ b/dirsrvtests/tests/tickets/ticket47973_test.py
@@ -6,15 +6,12 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import time
-import ldap
-import ldap.sasl
import logging
+
+import ldap.sasl
import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
@@ -22,47 +19,6 @@ USER_DN = 'uid=user1,%s' % (DEFAULT_SUFFIX)
SCHEMA_RELOAD_COUNT = 10
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
def task_complete(conn, task_dn):
finished = False
@@ -81,7 +37,7 @@ def task_complete(conn, task_dn):
return finished
-def test_ticket47973(topology):
+def test_ticket47973(topology_st):
"""
During the schema reload task there is a small window where the new schema is not loaded
into the asi hashtables - this results in searches not returning entries.
@@ -93,10 +49,10 @@ def test_ticket47973(topology):
# Add a user
#
try:
- topology.standalone.add_s(Entry((USER_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user1'
- })))
+ topology_st.standalone.add_s(Entry((USER_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add user1: error ' + e.message['desc'])
assert False
@@ -113,10 +69,10 @@ def test_ticket47973(topology):
TASK_DN = 'cn=task-' + str(task_count) + ',cn=schema reload task, cn=tasks, cn=config'
try:
- topology.standalone.add_s(Entry((TASK_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'cn': 'task-' + str(task_count)
- })))
+ topology_st.standalone.add_s(Entry((TASK_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'cn': 'task-' + str(task_count)
+ })))
except ldap.LDAPError as e:
log.error('Failed to add task entry: error ' + e.message['desc'])
assert False
@@ -130,9 +86,9 @@ def test_ticket47973(topology):
# Now check the user is still being returned
#
try:
- entries = topology.standalone.search_s(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- '(uid=user1)')
+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ '(uid=user1)')
if not entries or not entries[0]:
log.fatal('User was not returned from search!')
assert False
@@ -143,7 +99,7 @@ def test_ticket47973(topology):
#
# Check if task is complete
#
- if task_complete(topology.standalone, TASK_DN):
+ if task_complete(topology_st.standalone, TASK_DN):
break
search_count += 1
diff --git a/dirsrvtests/tests/tickets/ticket47976_test.py b/dirsrvtests/tests/tickets/ticket47976_test.py
index df4891d..408912b 100644
--- a/dirsrvtests/tests/tickets/ticket47976_test.py
+++ b/dirsrvtests/tests/tickets/ticket47976_test.py
@@ -1,162 +1,115 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
-PEOPLE_OU='people'
+PEOPLE_OU = 'people'
PEOPLE_DN = "ou=%s,%s" % (PEOPLE_OU, SUFFIX)
-GROUPS_OU='groups'
+GROUPS_OU = 'groups'
GROUPS_DN = "ou=%s,%s" % (GROUPS_OU, SUFFIX)
-DEFINITIONS_CN='definitions'
+DEFINITIONS_CN = 'definitions'
DEFINITIONS_DN = "cn=%s,%s" % (DEFINITIONS_CN, SUFFIX)
-TEMPLATES_CN='templates'
+TEMPLATES_CN = 'templates'
TEMPLATES_DN = "cn=%s,%s" % (TEMPLATES_CN, SUFFIX)
-MANAGED_GROUP_TEMPLATES_CN='managed group templates'
-MANAGED_GROUP_TEMPLATES_DN='cn=%s,%s' % (MANAGED_GROUP_TEMPLATES_CN, TEMPLATES_DN)
-MANAGED_GROUP_MEP_TMPL_CN='UPG'
-MANAGED_GROUP_MEP_TMPL_DN='cn=%s,%s' % (MANAGED_GROUP_MEP_TMPL_CN, MANAGED_GROUP_TEMPLATES_DN)
-MANAGED_GROUP_DEF_CN='managed group definition'
-MANAGED_GROUP_DEF_DN='cn=%s,%s' % (MANAGED_GROUP_DEF_CN, DEFINITIONS_DN)
+MANAGED_GROUP_TEMPLATES_CN = 'managed group templates'
+MANAGED_GROUP_TEMPLATES_DN = 'cn=%s,%s' % (MANAGED_GROUP_TEMPLATES_CN, TEMPLATES_DN)
+MANAGED_GROUP_MEP_TMPL_CN = 'UPG'
+MANAGED_GROUP_MEP_TMPL_DN = 'cn=%s,%s' % (MANAGED_GROUP_MEP_TMPL_CN, MANAGED_GROUP_TEMPLATES_DN)
+MANAGED_GROUP_DEF_CN = 'managed group definition'
+MANAGED_GROUP_DEF_DN = 'cn=%s,%s' % (MANAGED_GROUP_DEF_CN, DEFINITIONS_DN)
-MAX_ACCOUNTS=2
+MAX_ACCOUNTS = 2
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_ticket47976_init(topology):
+def test_ticket47976_init(topology_st):
"""Create mep definitions and templates"""
try:
- topology.standalone.add_s(Entry((PEOPLE_DN, {
- 'objectclass': "top extensibleObject".split(),
- 'ou': 'people'})))
+ topology_st.standalone.add_s(Entry((PEOPLE_DN, {
+ 'objectclass': "top extensibleObject".split(),
+ 'ou': 'people'})))
except ldap.ALREADY_EXISTS:
pass
try:
- topology.standalone.add_s(Entry((GROUPS_DN, {
- 'objectclass': "top extensibleObject".split(),
- 'ou': GROUPS_OU})))
+ topology_st.standalone.add_s(Entry((GROUPS_DN, {
+ 'objectclass': "top extensibleObject".split(),
+ 'ou': GROUPS_OU})))
except ldap.ALREADY_EXISTS:
pass
- topology.standalone.add_s(Entry((DEFINITIONS_DN, {
- 'objectclass': "top nsContainer".split(),
- 'cn': DEFINITIONS_CN})))
- topology.standalone.add_s(Entry((TEMPLATES_DN, {
- 'objectclass': "top nsContainer".split(),
- 'cn': TEMPLATES_CN})))
- topology.standalone.add_s(Entry((MANAGED_GROUP_DEF_DN, {
- 'objectclass': "top extensibleObject".split(),
- 'cn': MANAGED_GROUP_DEF_CN,
- 'originScope': PEOPLE_DN,
- 'originFilter': '(objectclass=posixAccount)',
- 'managedBase': GROUPS_DN,
- 'managedTemplate': MANAGED_GROUP_MEP_TMPL_DN})))
-
- topology.standalone.add_s(Entry((MANAGED_GROUP_TEMPLATES_DN, {
- 'objectclass': "top nsContainer".split(),
- 'cn': MANAGED_GROUP_TEMPLATES_CN})))
-
- topology.standalone.add_s(Entry((MANAGED_GROUP_MEP_TMPL_DN, {
- 'objectclass': "top mepTemplateEntry".split(),
- 'cn': MANAGED_GROUP_MEP_TMPL_CN,
- 'mepRDNAttr': 'cn',
- 'mepStaticAttr': ['objectclass: posixGroup',
- 'objectclass: extensibleObject'],
- 'mepMappedAttr': ['cn: $cn|uid: $cn',
- 'gidNumber: $uidNumber']})))
-
-
- topology.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY)
- topology.standalone.restart(timeout=10)
-
-
-def test_ticket47976_1(topology):
+ topology_st.standalone.add_s(Entry((DEFINITIONS_DN, {
+ 'objectclass': "top nsContainer".split(),
+ 'cn': DEFINITIONS_CN})))
+ topology_st.standalone.add_s(Entry((TEMPLATES_DN, {
+ 'objectclass': "top nsContainer".split(),
+ 'cn': TEMPLATES_CN})))
+ topology_st.standalone.add_s(Entry((MANAGED_GROUP_DEF_DN, {
+ 'objectclass': "top extensibleObject".split(),
+ 'cn': MANAGED_GROUP_DEF_CN,
+ 'originScope': PEOPLE_DN,
+ 'originFilter': '(objectclass=posixAccount)',
+ 'managedBase': GROUPS_DN,
+ 'managedTemplate': MANAGED_GROUP_MEP_TMPL_DN})))
+
+ topology_st.standalone.add_s(Entry((MANAGED_GROUP_TEMPLATES_DN, {
+ 'objectclass': "top nsContainer".split(),
+ 'cn': MANAGED_GROUP_TEMPLATES_CN})))
+
+ topology_st.standalone.add_s(Entry((MANAGED_GROUP_MEP_TMPL_DN, {
+ 'objectclass': "top mepTemplateEntry".split(),
+ 'cn': MANAGED_GROUP_MEP_TMPL_CN,
+ 'mepRDNAttr': 'cn',
+ 'mepStaticAttr': ['objectclass: posixGroup',
+ 'objectclass: extensibleObject'],
+ 'mepMappedAttr': ['cn: $cn|uid: $cn',
+ 'gidNumber: $uidNumber']})))
+
+ topology_st.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY)
+ topology_st.standalone.restart(timeout=10)
+
+
+def test_ticket47976_1(topology_st):
mod = [(ldap.MOD_REPLACE, 'nsslapd-pluginConfigArea', DEFINITIONS_DN)]
- topology.standalone.modify_s('cn=%s,cn=plugins,cn=config' % PLUGIN_MANAGED_ENTRY, mod)
- topology.standalone.stop(timeout=10)
- topology.standalone.start(timeout=10)
+ topology_st.standalone.modify_s('cn=%s,cn=plugins,cn=config' % PLUGIN_MANAGED_ENTRY, mod)
+ topology_st.standalone.stop(timeout=10)
+ topology_st.standalone.start(timeout=10)
for cpt in range(MAX_ACCOUNTS):
name = "user%d" % (cpt)
- topology.standalone.add_s(Entry(("uid=%s,%s" %(name, PEOPLE_DN), {
- 'objectclass': 'top posixAccount extensibleObject'.split(),
- 'uid': name,
- 'cn': name,
- 'uidNumber': '1',
- 'gidNumber': '1',
- 'homeDirectory': '/home/%s' % name
- })))
+ topology_st.standalone.add_s(Entry(("uid=%s,%s" % (name, PEOPLE_DN), {
+ 'objectclass': 'top posixAccount extensibleObject'.split(),
+ 'uid': name,
+ 'cn': name,
+ 'uidNumber': '1',
+ 'gidNumber': '1',
+ 'homeDirectory': '/home/%s' % name
+ })))
-def test_ticket47976_2(topology):
+def test_ticket47976_2(topology_st):
"""It reimports the database with a very large page size
so all the entries (user and its private group).
"""
log.info('Test complete')
- mod = [(ldap.MOD_REPLACE, 'nsslapd-db-page-size', str(128*1024))]
- topology.standalone.modify_s(DN_LDBM, mod)
+ mod = [(ldap.MOD_REPLACE, 'nsslapd-db-page-size', str(128 * 1024))]
+ topology_st.standalone.modify_s(DN_LDBM, mod)
# Get the the full path and name for our LDIF we will be exporting
log.info('Export LDIF file...')
- ldif_dir = topology.standalone.get_ldif_dir()
+ ldif_dir = topology_st.standalone.get_ldif_dir()
ldif_file = ldif_dir + "/export.ldif"
args = {EXPORT_REPL_INFO: False,
TASK_WAIT: True}
- exportTask = Tasks(topology.standalone)
+ exportTask = Tasks(topology_st.standalone)
try:
exportTask.exportLDIF(DEFAULT_SUFFIX, None, ldif_file, args)
except ValueError:
assert False
# import the new ldif file
log.info('Import LDIF file...')
- importTask = Tasks(topology.standalone)
+ importTask = Tasks(topology_st.standalone)
args = {TASK_WAIT: True}
try:
importTask.importLDIF(DEFAULT_SUFFIX, None, ldif_file, args)
@@ -166,19 +119,19 @@ def test_ticket47976_2(topology):
assert False
-def test_ticket47976_3(topology):
+def test_ticket47976_3(topology_st):
"""A single delete of a user should hit 47976, because mep post op will
delete its related group.
"""
log.info('Testing if the delete will hang or not')
- #log.info("\n\nAttach\n\n debugger")
- #time.sleep(60)
- topology.standalone.set_option(ldap.OPT_TIMEOUT, 5)
+ # log.info("\n\nAttach\n\n debugger")
+ # time.sleep(60)
+ topology_st.standalone.set_option(ldap.OPT_TIMEOUT, 5)
try:
for cpt in range(MAX_ACCOUNTS):
name = "user%d" % (cpt)
- topology.standalone.delete_s("uid=%s,%s" %(name, PEOPLE_DN))
+ topology_st.standalone.delete_s("uid=%s,%s" % (name, PEOPLE_DN))
except ldap.TIMEOUT as e:
log.fatal('Timeout... likely it hangs (47976)')
assert False
@@ -187,13 +140,13 @@ def test_ticket47976_3(topology):
for cpt in range(MAX_ACCOUNTS):
try:
name = "user%d" % (cpt)
- topology.standalone.getEntry("uid=%s,%s" %(name, PEOPLE_DN), ldap.SCOPE_BASE, 'objectclass=*')
+ topology_st.standalone.getEntry("uid=%s,%s" % (name, PEOPLE_DN), ldap.SCOPE_BASE, 'objectclass=*')
assert False
except ldap.NO_SUCH_OBJECT:
log.info('%s was correctly deleted' % name)
pass
- assert cpt == (MAX_ACCOUNTS -1)
+ assert cpt == (MAX_ACCOUNTS - 1)
if __name__ == '__main__':
diff --git a/dirsrvtests/tests/tickets/ticket47980_test.py b/dirsrvtests/tests/tickets/ticket47980_test.py
index eefc103..8a17a4b 100644
--- a/dirsrvtests/tests/tickets/ticket47980_test.py
+++ b/dirsrvtests/tests/tickets/ticket47980_test.py
@@ -6,18 +6,12 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import ldap.sasl
import logging
+
+import ldap.sasl
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
@@ -36,89 +30,48 @@ USER6_DN = 'uid=user6,%s' % (BRANCH6)
BRANCH1_CONTAINER = 'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com'
BRANCH1_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \
- 'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com'
+ 'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com'
BRANCH1_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \
- 'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com'
+ 'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com'
BRANCH1_COS_DEF = 'cn=nsPwPolicy_CoS,ou=level1,dc=example,dc=com'
BRANCH2_CONTAINER = 'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com'
BRANCH2_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \
- 'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com'
+ 'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com'
BRANCH2_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \
- 'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com'
+ 'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com'
BRANCH2_COS_DEF = 'cn=nsPwPolicy_CoS,ou=level2,ou=level1,dc=example,dc=com'
BRANCH3_CONTAINER = 'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com'
BRANCH3_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlevel3\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \
- 'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com'
+ 'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com'
BRANCH3_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlevel3\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \
- 'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com'
+ 'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com'
BRANCH3_COS_DEF = 'cn=nsPwPolicy_CoS,ou=level3,ou=level2,ou=level1,dc=example,dc=com'
BRANCH4_CONTAINER = 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com'
BRANCH4_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \
- 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com'
+ 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com'
BRANCH4_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \
- 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com'
+ 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com'
BRANCH4_COS_DEF = 'cn=nsPwPolicy_CoS,ou=people,dc=example,dc=com'
BRANCH5_CONTAINER = 'cn=nsPwPolicyContainer,ou=lower,ou=people,dc=example,dc=com'
BRANCH5_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \
- 'cn=nsPwPolicyContainer,ou=lower,ou=People,dc=example,dc=com'
+ 'cn=nsPwPolicyContainer,ou=lower,ou=People,dc=example,dc=com'
BRANCH5_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \
- 'cn=nsPwPolicyContainer,ou=lower,ou=People,dc=example,dc=com'
+ 'cn=nsPwPolicyContainer,ou=lower,ou=People,dc=example,dc=com'
BRANCH5_COS_DEF = 'cn=nsPwPolicy_CoS,ou=lower,ou=People,dc=example,dc=com'
BRANCH6_CONTAINER = 'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com'
BRANCH6_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlower\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \
- 'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com'
+ 'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com'
BRANCH6_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlower\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \
- 'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com'
+ 'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com'
BRANCH6_COS_DEF = 'cn=nsPwPolicy_CoS,ou=lower,ou=lower,ou=People,dc=example,dc=com'
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def test_ticket47980(topology):
+def test_ticket47980(topology_st):
"""
Multiple COS pointer definitions that use the same attribute are not correctly ordered.
The cos plugin was incorrectly sorting the attribute indexes based on subtree, which lead
@@ -129,38 +82,38 @@ def test_ticket47980(topology):
# Add our nested branches
try:
- topology.standalone.add_s(Entry((BRANCH1, {
- 'objectclass': 'top extensibleObject'.split(),
- 'ou': 'level1'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH1, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'ou': 'level1'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add level1: error ' + e.message['desc'])
assert False
try:
- topology.standalone.add_s(Entry((BRANCH2, {
- 'objectclass': 'top extensibleObject'.split(),
- 'ou': 'level2'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH2, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'ou': 'level2'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add level2: error ' + e.message['desc'])
assert False
try:
- topology.standalone.add_s(Entry((BRANCH3, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'level3'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH3, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'level3'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add level3: error ' + e.message['desc'])
assert False
# People branch, might already exist
try:
- topology.standalone.add_s(Entry((BRANCH4, {
- 'objectclass': 'top extensibleObject'.split(),
- 'ou': 'level4'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH4, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'ou': 'level4'
+ })))
except ldap.ALREADY_EXISTS:
pass
except ldap.LDAPError as e:
@@ -168,81 +121,81 @@ def test_ticket47980(topology):
assert False
try:
- topology.standalone.add_s(Entry((BRANCH5, {
- 'objectclass': 'top extensibleObject'.split(),
- 'ou': 'level5'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH5, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'ou': 'level5'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add level5: error ' + e.message['desc'])
assert False
try:
- topology.standalone.add_s(Entry((BRANCH6, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'level6'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH6, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'level6'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add level6: error ' + e.message['desc'])
assert False
# Add users to each branch
try:
- topology.standalone.add_s(Entry((USER1_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user1'
- })))
+ topology_st.standalone.add_s(Entry((USER1_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add user1: error ' + e.message['desc'])
assert False
try:
- topology.standalone.add_s(Entry((USER2_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user2'
- })))
+ topology_st.standalone.add_s(Entry((USER2_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user2'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add user2: error ' + e.message['desc'])
assert False
try:
- topology.standalone.add_s(Entry((USER3_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user3'
- })))
+ topology_st.standalone.add_s(Entry((USER3_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user3'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add user3: error ' + e.message['desc'])
assert False
try:
- topology.standalone.add_s(Entry((USER4_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user4'
- })))
+ topology_st.standalone.add_s(Entry((USER4_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user4'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add user4: error ' + e.message['desc'])
assert False
try:
- topology.standalone.add_s(Entry((USER5_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user5'
- })))
+ topology_st.standalone.add_s(Entry((USER5_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user5'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add user5: error ' + e.message['desc'])
assert False
try:
- topology.standalone.add_s(Entry((USER6_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user6'
- })))
+ topology_st.standalone.add_s(Entry((USER6_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user6'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add user6: error ' + e.message['desc'])
assert False
# Enable password policy
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])
except ldap.LDAPError as e:
log.error('Failed to set pwpolicy-local: error ' + e.message['desc'])
assert False
@@ -252,51 +205,51 @@ def test_ticket47980(topology):
#
# Add the container
try:
- topology.standalone.add_s(Entry((BRANCH1_CONTAINER, {
- 'objectclass': 'top nsContainer'.split(),
- 'cn': 'nsPwPolicyContainer'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH1_CONTAINER, {
+ 'objectclass': 'top nsContainer'.split(),
+ 'cn': 'nsPwPolicyContainer'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add subtree container for level1: error ' + e.message['desc'])
assert False
# Add the password policy subentry
try:
- topology.standalone.add_s(Entry((BRANCH1_PWP, {
- 'objectclass': 'top ldapsubentry passwordpolicy'.split(),
- 'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com',
- 'passwordMustChange': 'off',
- 'passwordExp': 'off',
- 'passwordHistory': 'off',
- 'passwordMinAge': '0',
- 'passwordChange': 'off',
- 'passwordStorageScheme': 'ssha'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH1_PWP, {
+ 'objectclass': 'top ldapsubentry passwordpolicy'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com',
+ 'passwordMustChange': 'off',
+ 'passwordExp': 'off',
+ 'passwordHistory': 'off',
+ 'passwordMinAge': '0',
+ 'passwordChange': 'off',
+ 'passwordStorageScheme': 'ssha'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add passwordpolicy for level1: error ' + e.message['desc'])
assert False
# Add the COS template
try:
- topology.standalone.add_s(Entry((BRANCH1_COS_TMPL, {
- 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
- 'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com',
- 'cosPriority': '1',
- 'cn': 'cn=nsPwTemplateEntry,ou=level1,dc=example,dc=com',
- 'pwdpolicysubentry': BRANCH1_PWP
- })))
+ topology_st.standalone.add_s(Entry((BRANCH1_COS_TMPL, {
+ 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com',
+ 'cosPriority': '1',
+ 'cn': 'cn=nsPwTemplateEntry,ou=level1,dc=example,dc=com',
+ 'pwdpolicysubentry': BRANCH1_PWP
+ })))
except ldap.LDAPError as e:
log.error('Failed to add COS template for level1: error ' + e.message['desc'])
assert False
# Add the COS definition
try:
- topology.standalone.add_s(Entry((BRANCH1_COS_DEF, {
- 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
- 'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com',
- 'costemplatedn': BRANCH1_COS_TMPL,
- 'cosAttribute': 'pwdpolicysubentry default operational-default'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH1_COS_DEF, {
+ 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com',
+ 'costemplatedn': BRANCH1_COS_TMPL,
+ 'cosAttribute': 'pwdpolicysubentry default operational-default'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add COS def for level1: error ' + e.message['desc'])
assert False
@@ -306,51 +259,51 @@ def test_ticket47980(topology):
#
# Add the container
try:
- topology.standalone.add_s(Entry((BRANCH2_CONTAINER, {
- 'objectclass': 'top nsContainer'.split(),
- 'cn': 'nsPwPolicyContainer'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH2_CONTAINER, {
+ 'objectclass': 'top nsContainer'.split(),
+ 'cn': 'nsPwPolicyContainer'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add subtree container for level2: error ' + e.message['desc'])
assert False
# Add the password policy subentry
try:
- topology.standalone.add_s(Entry((BRANCH2_PWP, {
- 'objectclass': 'top ldapsubentry passwordpolicy'.split(),
- 'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com',
- 'passwordMustChange': 'off',
- 'passwordExp': 'off',
- 'passwordHistory': 'off',
- 'passwordMinAge': '0',
- 'passwordChange': 'off',
- 'passwordStorageScheme': 'ssha'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH2_PWP, {
+ 'objectclass': 'top ldapsubentry passwordpolicy'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com',
+ 'passwordMustChange': 'off',
+ 'passwordExp': 'off',
+ 'passwordHistory': 'off',
+ 'passwordMinAge': '0',
+ 'passwordChange': 'off',
+ 'passwordStorageScheme': 'ssha'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add passwordpolicy for level2: error ' + e.message['desc'])
assert False
# Add the COS template
try:
- topology.standalone.add_s(Entry((BRANCH2_COS_TMPL, {
- 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
- 'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com',
- 'cosPriority': '1',
- 'cn': 'cn=nsPwTemplateEntry,ou=level2,dc=example,dc=com',
- 'pwdpolicysubentry': BRANCH2_PWP
- })))
+ topology_st.standalone.add_s(Entry((BRANCH2_COS_TMPL, {
+ 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com',
+ 'cosPriority': '1',
+ 'cn': 'cn=nsPwTemplateEntry,ou=level2,dc=example,dc=com',
+ 'pwdpolicysubentry': BRANCH2_PWP
+ })))
except ldap.LDAPError as e:
log.error('Failed to add COS template for level2: error ' + e.message['desc'])
assert False
# Add the COS definition
try:
- topology.standalone.add_s(Entry((BRANCH2_COS_DEF, {
- 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
- 'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com',
- 'costemplatedn': BRANCH2_COS_TMPL,
- 'cosAttribute': 'pwdpolicysubentry default operational-default'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH2_COS_DEF, {
+ 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com',
+ 'costemplatedn': BRANCH2_COS_TMPL,
+ 'cosAttribute': 'pwdpolicysubentry default operational-default'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add COS def for level2: error ' + e.message['desc'])
assert False
@@ -360,51 +313,51 @@ def test_ticket47980(topology):
#
# Add the container
try:
- topology.standalone.add_s(Entry((BRANCH3_CONTAINER, {
- 'objectclass': 'top nsContainer'.split(),
- 'cn': 'nsPwPolicyContainer'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH3_CONTAINER, {
+ 'objectclass': 'top nsContainer'.split(),
+ 'cn': 'nsPwPolicyContainer'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add subtree container for level3: error ' + e.message['desc'])
assert False
# Add the password policy subentry
try:
- topology.standalone.add_s(Entry((BRANCH3_PWP, {
- 'objectclass': 'top ldapsubentry passwordpolicy'.split(),
- 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com',
- 'passwordMustChange': 'off',
- 'passwordExp': 'off',
- 'passwordHistory': 'off',
- 'passwordMinAge': '0',
- 'passwordChange': 'off',
- 'passwordStorageScheme': 'ssha'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH3_PWP, {
+ 'objectclass': 'top ldapsubentry passwordpolicy'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com',
+ 'passwordMustChange': 'off',
+ 'passwordExp': 'off',
+ 'passwordHistory': 'off',
+ 'passwordMinAge': '0',
+ 'passwordChange': 'off',
+ 'passwordStorageScheme': 'ssha'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add passwordpolicy for level3: error ' + e.message['desc'])
assert False
# Add the COS template
try:
- topology.standalone.add_s(Entry((BRANCH3_COS_TMPL, {
- 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
- 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com',
- 'cosPriority': '1',
- 'cn': 'cn=nsPwTemplateEntry,ou=level3,dc=example,dc=com',
- 'pwdpolicysubentry': BRANCH3_PWP
- })))
+ topology_st.standalone.add_s(Entry((BRANCH3_COS_TMPL, {
+ 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com',
+ 'cosPriority': '1',
+ 'cn': 'cn=nsPwTemplateEntry,ou=level3,dc=example,dc=com',
+ 'pwdpolicysubentry': BRANCH3_PWP
+ })))
except ldap.LDAPError as e:
log.error('Failed to add COS template for level3: error ' + e.message['desc'])
assert False
# Add the COS definition
try:
- topology.standalone.add_s(Entry((BRANCH3_COS_DEF, {
- 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
- 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com',
- 'costemplatedn': BRANCH3_COS_TMPL,
- 'cosAttribute': 'pwdpolicysubentry default operational-default'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH3_COS_DEF, {
+ 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com',
+ 'costemplatedn': BRANCH3_COS_TMPL,
+ 'cosAttribute': 'pwdpolicysubentry default operational-default'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add COS def for level3: error ' + e.message['desc'])
assert False
@@ -414,51 +367,51 @@ def test_ticket47980(topology):
#
# Add the container
try:
- topology.standalone.add_s(Entry((BRANCH4_CONTAINER, {
- 'objectclass': 'top nsContainer'.split(),
- 'cn': 'nsPwPolicyContainer'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH4_CONTAINER, {
+ 'objectclass': 'top nsContainer'.split(),
+ 'cn': 'nsPwPolicyContainer'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add subtree container for level3: error ' + e.message['desc'])
assert False
# Add the password policy subentry
try:
- topology.standalone.add_s(Entry((BRANCH4_PWP, {
- 'objectclass': 'top ldapsubentry passwordpolicy'.split(),
- 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
- 'passwordMustChange': 'off',
- 'passwordExp': 'off',
- 'passwordHistory': 'off',
- 'passwordMinAge': '0',
- 'passwordChange': 'off',
- 'passwordStorageScheme': 'ssha'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH4_PWP, {
+ 'objectclass': 'top ldapsubentry passwordpolicy'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+ 'passwordMustChange': 'off',
+ 'passwordExp': 'off',
+ 'passwordHistory': 'off',
+ 'passwordMinAge': '0',
+ 'passwordChange': 'off',
+ 'passwordStorageScheme': 'ssha'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add passwordpolicy for branch4: error ' + e.message['desc'])
assert False
# Add the COS template
try:
- topology.standalone.add_s(Entry((BRANCH4_COS_TMPL, {
- 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
- 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
- 'cosPriority': '1',
- 'cn': 'cn=nsPwTemplateEntry,ou=people,dc=example,dc=com',
- 'pwdpolicysubentry': BRANCH4_PWP
- })))
+ topology_st.standalone.add_s(Entry((BRANCH4_COS_TMPL, {
+ 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+ 'cosPriority': '1',
+ 'cn': 'cn=nsPwTemplateEntry,ou=people,dc=example,dc=com',
+ 'pwdpolicysubentry': BRANCH4_PWP
+ })))
except ldap.LDAPError as e:
log.error('Failed to add COS template for level3: error ' + e.message['desc'])
assert False
# Add the COS definition
try:
- topology.standalone.add_s(Entry((BRANCH4_COS_DEF, {
- 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
- 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
- 'costemplatedn': BRANCH4_COS_TMPL,
- 'cosAttribute': 'pwdpolicysubentry default operational-default'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH4_COS_DEF, {
+ 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+ 'costemplatedn': BRANCH4_COS_TMPL,
+ 'cosAttribute': 'pwdpolicysubentry default operational-default'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add COS def for branch4: error ' + e.message['desc'])
assert False
@@ -468,51 +421,51 @@ def test_ticket47980(topology):
#
# Add the container
try:
- topology.standalone.add_s(Entry((BRANCH5_CONTAINER, {
- 'objectclass': 'top nsContainer'.split(),
- 'cn': 'nsPwPolicyContainer'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH5_CONTAINER, {
+ 'objectclass': 'top nsContainer'.split(),
+ 'cn': 'nsPwPolicyContainer'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add subtree container for branch5: error ' + e.message['desc'])
assert False
# Add the password policy subentry
try:
- topology.standalone.add_s(Entry((BRANCH5_PWP, {
- 'objectclass': 'top ldapsubentry passwordpolicy'.split(),
- 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com',
- 'passwordMustChange': 'off',
- 'passwordExp': 'off',
- 'passwordHistory': 'off',
- 'passwordMinAge': '0',
- 'passwordChange': 'off',
- 'passwordStorageScheme': 'ssha'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH5_PWP, {
+ 'objectclass': 'top ldapsubentry passwordpolicy'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com',
+ 'passwordMustChange': 'off',
+ 'passwordExp': 'off',
+ 'passwordHistory': 'off',
+ 'passwordMinAge': '0',
+ 'passwordChange': 'off',
+ 'passwordStorageScheme': 'ssha'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add passwordpolicy for branch5: error ' + e.message['desc'])
assert False
# Add the COS template
try:
- topology.standalone.add_s(Entry((BRANCH5_COS_TMPL, {
- 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
- 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com',
- 'cosPriority': '1',
- 'cn': 'cn=nsPwTemplateEntry,ou=lower,ou=people,dc=example,dc=com',
- 'pwdpolicysubentry': BRANCH5_PWP
- })))
+ topology_st.standalone.add_s(Entry((BRANCH5_COS_TMPL, {
+ 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com',
+ 'cosPriority': '1',
+ 'cn': 'cn=nsPwTemplateEntry,ou=lower,ou=people,dc=example,dc=com',
+ 'pwdpolicysubentry': BRANCH5_PWP
+ })))
except ldap.LDAPError as e:
log.error('Failed to add COS template for branch5: error ' + e.message['desc'])
assert False
# Add the COS definition
try:
- topology.standalone.add_s(Entry((BRANCH5_COS_DEF, {
- 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
- 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com',
- 'costemplatedn': BRANCH5_COS_TMPL,
- 'cosAttribute': 'pwdpolicysubentry default operational-default'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH5_COS_DEF, {
+ 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com',
+ 'costemplatedn': BRANCH5_COS_TMPL,
+ 'cosAttribute': 'pwdpolicysubentry default operational-default'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add COS def for level3: error ' + e.message['desc'])
assert False
@@ -522,51 +475,51 @@ def test_ticket47980(topology):
#
# Add the container
try:
- topology.standalone.add_s(Entry((BRANCH6_CONTAINER, {
- 'objectclass': 'top nsContainer'.split(),
- 'cn': 'nsPwPolicyContainer'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH6_CONTAINER, {
+ 'objectclass': 'top nsContainer'.split(),
+ 'cn': 'nsPwPolicyContainer'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add subtree container for branch6: error ' + e.message['desc'])
assert False
# Add the password policy subentry
try:
- topology.standalone.add_s(Entry((BRANCH6_PWP, {
- 'objectclass': 'top ldapsubentry passwordpolicy'.split(),
- 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com',
- 'passwordMustChange': 'off',
- 'passwordExp': 'off',
- 'passwordHistory': 'off',
- 'passwordMinAge': '0',
- 'passwordChange': 'off',
- 'passwordStorageScheme': 'ssha'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH6_PWP, {
+ 'objectclass': 'top ldapsubentry passwordpolicy'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com',
+ 'passwordMustChange': 'off',
+ 'passwordExp': 'off',
+ 'passwordHistory': 'off',
+ 'passwordMinAge': '0',
+ 'passwordChange': 'off',
+ 'passwordStorageScheme': 'ssha'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add passwordpolicy for branch6: error ' + e.message['desc'])
assert False
# Add the COS template
try:
- topology.standalone.add_s(Entry((BRANCH6_COS_TMPL, {
- 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
- 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com',
- 'cosPriority': '1',
- 'cn': 'cn=nsPwTemplateEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com',
- 'pwdpolicysubentry': BRANCH6_PWP
- })))
+ topology_st.standalone.add_s(Entry((BRANCH6_COS_TMPL, {
+ 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com',
+ 'cosPriority': '1',
+ 'cn': 'cn=nsPwTemplateEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com',
+ 'pwdpolicysubentry': BRANCH6_PWP
+ })))
except ldap.LDAPError as e:
log.error('Failed to add COS template for branch6: error ' + e.message['desc'])
assert False
# Add the COS definition
try:
- topology.standalone.add_s(Entry((BRANCH6_COS_DEF, {
- 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
- 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com',
- 'costemplatedn': BRANCH6_COS_TMPL,
- 'cosAttribute': 'pwdpolicysubentry default operational-default'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH6_COS_DEF, {
+ 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com',
+ 'costemplatedn': BRANCH6_COS_TMPL,
+ 'cosAttribute': 'pwdpolicysubentry default operational-default'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add COS def for branch6: error ' + e.message['desc'])
assert False
@@ -577,7 +530,7 @@ def test_ticket47980(topology):
# Now check that each user has its expected passwordPolicy subentry
#
try:
- entries = topology.standalone.search_s(USER1_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+ entries = topology_st.standalone.search_s(USER1_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
if not entries[0].hasValue('pwdpolicysubentry', BRANCH1_PWP):
log.fatal('User %s does not have expected pwdpolicysubentry!')
assert False
@@ -586,7 +539,7 @@ def test_ticket47980(topology):
assert False
try:
- entries = topology.standalone.search_s(USER2_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+ entries = topology_st.standalone.search_s(USER2_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
if not entries[0].hasValue('pwdpolicysubentry', BRANCH2_PWP):
log.fatal('User %s does not have expected pwdpolicysubentry!' % USER2_DN)
assert False
@@ -595,7 +548,7 @@ def test_ticket47980(topology):
assert False
try:
- entries = topology.standalone.search_s(USER3_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+ entries = topology_st.standalone.search_s(USER3_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
if not entries[0].hasValue('pwdpolicysubentry', BRANCH3_PWP):
log.fatal('User %s does not have expected pwdpolicysubentry!' % USER3_DN)
assert False
@@ -604,7 +557,7 @@ def test_ticket47980(topology):
assert False
try:
- entries = topology.standalone.search_s(USER4_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+ entries = topology_st.standalone.search_s(USER4_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
if not entries[0].hasValue('pwdpolicysubentry', BRANCH4_PWP):
log.fatal('User %s does not have expected pwdpolicysubentry!' % USER4_DN)
assert False
@@ -613,7 +566,7 @@ def test_ticket47980(topology):
assert False
try:
- entries = topology.standalone.search_s(USER5_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+ entries = topology_st.standalone.search_s(USER5_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
if not entries[0].hasValue('pwdpolicysubentry', BRANCH5_PWP):
log.fatal('User %s does not have expected pwdpolicysubentry!' % USER5_DN)
assert False
@@ -622,7 +575,7 @@ def test_ticket47980(topology):
assert False
try:
- entries = topology.standalone.search_s(USER6_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+ entries = topology_st.standalone.search_s(USER6_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
if not entries[0].hasValue('pwdpolicysubentry', BRANCH6_PWP):
log.fatal('User %s does not have expected pwdpolicysubentry!' % USER6_DN)
assert False
diff --git a/dirsrvtests/tests/tickets/ticket47981_test.py b/dirsrvtests/tests/tickets/ticket47981_test.py
index 07761fc..90cd7ff 100644
--- a/dirsrvtests/tests/tickets/ticket47981_test.py
+++ b/dirsrvtests/tests/tickets/ticket47981_test.py
@@ -6,18 +6,12 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import ldap.sasl
import logging
+
+import ldap.sasl
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
@@ -33,56 +27,15 @@ SECOND_SUFFIX = 'o=netscaperoot'
BE_NAME = 'netscaperoot'
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
def addSubtreePwPolicy(inst):
#
# Add subtree policy to the people branch
#
try:
inst.add_s(Entry((BRANCH_CONTAINER, {
- 'objectclass': 'top nsContainer'.split(),
- 'cn': 'nsPwPolicyContainer'
- })))
+ 'objectclass': 'top nsContainer'.split(),
+ 'cn': 'nsPwPolicyContainer'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add subtree container for ou=people: error ' + e.message['desc'])
assert False
@@ -90,15 +43,15 @@ def addSubtreePwPolicy(inst):
# Add the password policy subentry
try:
inst.add_s(Entry((BRANCH_PWP, {
- 'objectclass': 'top ldapsubentry passwordpolicy'.split(),
- 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
- 'passwordMustChange': 'off',
- 'passwordExp': 'off',
- 'passwordHistory': 'off',
- 'passwordMinAge': '0',
- 'passwordChange': 'off',
- 'passwordStorageScheme': 'ssha'
- })))
+ 'objectclass': 'top ldapsubentry passwordpolicy'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+ 'passwordMustChange': 'off',
+ 'passwordExp': 'off',
+ 'passwordHistory': 'off',
+ 'passwordMinAge': '0',
+ 'passwordChange': 'off',
+ 'passwordStorageScheme': 'ssha'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add passwordpolicy: error ' + e.message['desc'])
assert False
@@ -106,12 +59,12 @@ def addSubtreePwPolicy(inst):
# Add the COS template
try:
inst.add_s(Entry((BRANCH_COS_TMPL, {
- 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
- 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
- 'cosPriority': '1',
- 'cn': 'cn=nsPwTemplateEntry,ou=people,dc=example,dc=com',
- 'pwdpolicysubentry': BRANCH_PWP
- })))
+ 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+ 'cosPriority': '1',
+ 'cn': 'cn=nsPwTemplateEntry,ou=people,dc=example,dc=com',
+ 'pwdpolicysubentry': BRANCH_PWP
+ })))
except ldap.LDAPError as e:
log.error('Failed to add COS template: error ' + e.message['desc'])
assert False
@@ -119,11 +72,11 @@ def addSubtreePwPolicy(inst):
# Add the COS definition
try:
inst.add_s(Entry((BRANCH_COS_DEF, {
- 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
- 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
- 'costemplatedn': BRANCH_COS_TMPL,
- 'cosAttribute': 'pwdpolicysubentry default operational-default'
- })))
+ 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+ 'costemplatedn': BRANCH_COS_TMPL,
+ 'cosAttribute': 'pwdpolicysubentry default operational-default'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add COS def: error ' + e.message['desc'])
assert False
@@ -157,7 +110,7 @@ def delSubtreePwPolicy(inst):
time.sleep(0.5)
-def test_ticket47981(topology):
+def test_ticket47981(topology_st):
"""
If there are multiple suffixes, and the last suffix checked does not contain any COS entries,
while other suffixes do, then the vattr cache is not invalidated as it should be. Then any
@@ -171,12 +124,12 @@ def test_ticket47981(topology):
#
log.info('Adding second suffix that will not contain any COS entries...\n')
- topology.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: BE_NAME})
- topology.standalone.mappingtree.create(SECOND_SUFFIX, bename=BE_NAME)
+ topology_st.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: BE_NAME})
+ topology_st.standalone.mappingtree.create(SECOND_SUFFIX, bename=BE_NAME)
try:
- topology.standalone.add_s(Entry((SECOND_SUFFIX, {
- 'objectclass': 'top organization'.split(),
- 'o': BE_NAME})))
+ topology_st.standalone.add_s(Entry((SECOND_SUFFIX, {
+ 'objectclass': 'top organization'.split(),
+ 'o': BE_NAME})))
except ldap.ALREADY_EXISTS:
pass
except ldap.LDAPError as e:
@@ -189,10 +142,10 @@ def test_ticket47981(topology):
log.info('Add our test entries to the default suffix, and proceed with the test...')
try:
- topology.standalone.add_s(Entry((BRANCH, {
- 'objectclass': 'top extensibleObject'.split(),
- 'ou': 'level4'
- })))
+ topology_st.standalone.add_s(Entry((BRANCH, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'ou': 'level4'
+ })))
except ldap.ALREADY_EXISTS:
pass
except ldap.LDAPError as e:
@@ -203,10 +156,10 @@ def test_ticket47981(topology):
# Add a user to the branch
#
try:
- topology.standalone.add_s(Entry((USER_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'user1'
- })))
+ topology_st.standalone.add_s(Entry((USER_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
except ldap.LDAPError as e:
log.error('Failed to add user1: error ' + e.message['desc'])
assert False
@@ -215,21 +168,21 @@ def test_ticket47981(topology):
# Enable password policy and add the subtree policy
#
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])
except ldap.LDAPError as e:
log.error('Failed to set pwpolicy-local: error ' + e.message['desc'])
assert False
- addSubtreePwPolicy(topology.standalone)
+ addSubtreePwPolicy(topology_st.standalone)
#
# Now check the user has its expected passwordPolicy subentry
#
try:
- entries = topology.standalone.search_s(USER_DN,
- ldap.SCOPE_BASE,
- '(objectclass=top)',
- ['pwdpolicysubentry', 'dn'])
+ entries = topology_st.standalone.search_s(USER_DN,
+ ldap.SCOPE_BASE,
+ '(objectclass=top)',
+ ['pwdpolicysubentry', 'dn'])
if not entries[0].hasAttr('pwdpolicysubentry'):
log.fatal('User does not have expected pwdpolicysubentry!')
assert False
@@ -240,9 +193,9 @@ def test_ticket47981(topology):
#
# Delete the password policy and make sure it is removed from the same user
#
- delSubtreePwPolicy(topology.standalone)
+ delSubtreePwPolicy(topology_st.standalone)
try:
- entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+ entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
if entries[0].hasAttr('pwdpolicysubentry'):
log.fatal('User unexpectedly does have the pwdpolicysubentry!')
assert False
@@ -253,9 +206,9 @@ def test_ticket47981(topology):
#
# Add the subtree policvy back and see if the user now has it
#
- addSubtreePwPolicy(topology.standalone)
+ addSubtreePwPolicy(topology_st.standalone)
try:
- entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+ entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
if not entries[0].hasAttr('pwdpolicysubentry'):
log.fatal('User does not have expected pwdpolicysubentry!')
assert False
diff --git a/dirsrvtests/tests/tickets/ticket47988_test.py b/dirsrvtests/tests/tickets/ticket47988_test.py
index 0e975e4..ee5d16e 100644
--- a/dirsrvtests/tests/tickets/ticket47988_test.py
+++ b/dirsrvtests/tests/tickets/ticket47988_test.py
@@ -11,51 +11,43 @@ Created on Nov 7, 2013
@author: tbordaz
'''
-import os
-import sys
-import time
-import ldap
import logging
-import pytest
-import tarfile
-import stat
import shutil
+import stat
+import tarfile
+import time
from random import randint
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
+import ldap
+import pytest
+from lib389 import Entry
+from lib389._constants import *
+from lib389.topologies import topology_m2
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-#
-# important part. We can deploy Master1 and Master2 on different versions
-#
-installation1_prefix = None
-installation2_prefix = None
-
TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
OC_NAME = 'OCticket47988'
MUST = "(postalAddress $ postalCode)"
-MAY = "(member $ street)"
+MAY = "(member $ street)"
OTHER_NAME = 'other_entry'
MAX_OTHERS = 10
-BIND_NAME = 'bind_entry'
-BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
-BIND_PW = 'password'
+BIND_NAME = 'bind_entry'
+BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
+BIND_PW = 'password'
ENTRY_NAME = 'test_entry'
-ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
-ENTRY_OC = "top person %s" % OC_NAME
+ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
+ENTRY_OC = "top person %s" % OC_NAME
+
def _oc_definition(oid_ext, name, must=None, may=None):
- oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
+ oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
desc = 'To test ticket 47490'
- sup = 'person'
+ sup = 'person'
if not must:
must = MUST
if not may:
@@ -63,120 +55,14 @@ def _oc_definition(oid_ext, name, must=None, may=None):
new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may)
return new_oc
-class TopologyMaster1Master2(object):
- def __init__(self, master1, master2):
- master1.open()
- self.master1 = master1
-
- master2.open()
- self.master2 = master2
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to create a replicated topology for the 'module'.
- The replicated topology is MASTER1 <-> Master2.
- '''
- global installation1_prefix
- global installation2_prefix
-
- #os.environ['USE_VALGRIND'] = '1'
-
- # allocate master1 on a given deployement
- master1 = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Args for the master1 instance
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_master = args_instance.copy()
- master1.allocate(args_master)
-
- # allocate master1 on a given deployement
- master2 = DirSrv(verbose=False)
- if installation2_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation2_prefix
-
- # Args for the consumer instance
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_master = args_instance.copy()
- master2.allocate(args_master)
-
- # Get the status of the instance and restart it if it exists
- instance_master1 = master1.exists()
- instance_master2 = master2.exists()
-
- # Remove all the instances
- if instance_master1:
- master1.delete()
- if instance_master2:
- master2.delete()
-
- # Create the instances
- master1.create()
- master1.open()
- master2.create()
- master2.open()
-
- def fin():
- master1.delete()
- master2.delete()
- request.addfinalizer(fin)
-
- #
- # Now prepare the Master-Consumer topology
- #
- # First Enable replication
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
- master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
- # Initialize the supplier->consumer
-
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
-
- if not repl_agreement:
- log.fatal("Fail to create a replica agreement")
- sys.exit(1)
-
- log.debug("%s created" % repl_agreement)
-
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
-
- master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- master1.waitForReplInit(repl_agreement)
-
- # Check replication is working fine
- if master1.testReplication(DEFAULT_SUFFIX, master2):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- # Here we have two instances master and consumer
- return TopologyMaster1Master2(master1, master2)
-
-
-def _header(topology, label):
- topology.master1.log.info("\n\n###############################################")
- topology.master1.log.info("#######")
- topology.master1.log.info("####### %s" % label)
- topology.master1.log.info("#######")
- topology.master1.log.info("###################################################")
+def _header(topology_m2, label):
+ topology_m2.ms["master1"].log.info("\n\n###############################################")
+ topology_m2.ms["master1"].log.info("#######")
+ topology_m2.ms["master1"].log.info("####### %s" % label)
+ topology_m2.ms["master1"].log.info("#######")
+ topology_m2.ms["master1"].log.info("###################################################")
def _install_schema(server, tarFile):
@@ -216,7 +102,7 @@ def _install_schema(server, tarFile):
os.chmod(server.schemadir, st.st_mode | stat.S_IRUSR | stat.S_IRGRP)
-def test_ticket47988_init(topology):
+def test_ticket47988_init(topology_m2):
"""
It adds
- Objectclass with MAY 'member'
@@ -225,48 +111,48 @@ def test_ticket47988_init(topology):
"""
- _header(topology, 'test_ticket47988_init')
+ _header(topology_m2, 'test_ticket47988_init')
# enable acl error logging
mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192))] # REPL
- topology.master1.modify_s(DN_CONFIG, mod)
- topology.master2.modify_s(DN_CONFIG, mod)
+ topology_m2.ms["master1"].modify_s(DN_CONFIG, mod)
+ topology_m2.ms["master2"].modify_s(DN_CONFIG, mod)
mod = [(ldap.MOD_REPLACE, 'nsslapd-accesslog-level', str(260))] # Internal op
- topology.master1.modify_s(DN_CONFIG, mod)
- topology.master2.modify_s(DN_CONFIG, mod)
+ topology_m2.ms["master1"].modify_s(DN_CONFIG, mod)
+ topology_m2.ms["master2"].modify_s(DN_CONFIG, mod)
# add dummy entries
for cpt in range(MAX_OTHERS):
name = "%s%d" % (OTHER_NAME, cpt)
- topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
- 'objectclass': "top person".split(),
- 'sn': name,
- 'cn': name})))
+ topology_m2.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
+ 'objectclass': "top person".split(),
+ 'sn': name,
+ 'cn': name})))
# check that entry 0 is replicated before
loop = 0
entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
while loop <= 10:
try:
- ent = topology.master2.getEntry(entryDN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber'])
+ ent = topology_m2.ms["master2"].getEntry(entryDN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber'])
break
except ldap.NO_SUCH_OBJECT:
time.sleep(1)
loop += 1
assert (loop <= 10)
- topology.master1.stop(timeout=10)
- topology.master2.stop(timeout=10)
+ topology_m2.ms["master1"].stop(timeout=10)
+ topology_m2.ms["master2"].stop(timeout=10)
- #install the specific schema M1: ipa3.3, M2: ipa4.1
- schema_file = os.path.join(topology.master1.getDir(__file__, DATA_DIR), "ticket47988/schema_ipa3.3.tar.gz")
- _install_schema(topology.master1, schema_file)
- schema_file = os.path.join(topology.master1.getDir(__file__, DATA_DIR), "ticket47988/schema_ipa4.1.tar.gz")
- _install_schema(topology.master2, schema_file)
+ # install the specific schema M1: ipa3.3, M2: ipa4.1
+ schema_file = os.path.join(topology_m2.ms["master1"].getDir(__file__, DATA_DIR), "ticket47988/schema_ipa3.3.tar.gz")
+ _install_schema(topology_m2.ms["master1"], schema_file)
+ schema_file = os.path.join(topology_m2.ms["master1"].getDir(__file__, DATA_DIR), "ticket47988/schema_ipa4.1.tar.gz")
+ _install_schema(topology_m2.ms["master2"], schema_file)
- topology.master1.start(timeout=10)
- topology.master2.start(timeout=10)
+ topology_m2.ms["master1"].start(timeout=10)
+ topology_m2.ms["master2"].start(timeout=10)
def _do_update_schema(server, range=3999):
@@ -276,7 +162,8 @@ def _do_update_schema(server, range=3999):
postfix = str(randint(range, range + 1000))
OID = '2.16.840.1.113730.3.8.12.%s' % postfix
NAME = 'thierry%s' % postfix
- value = '( %s NAME \'%s\' DESC \'Override for Group Attributes\' STRUCTURAL MUST ( cn ) MAY sn X-ORIGIN ( \'IPA v4.1.2\' \'user defined\' ) )' % (OID, NAME)
+ value = '( %s NAME \'%s\' DESC \'Override for Group Attributes\' STRUCTURAL MUST ( cn ) MAY sn X-ORIGIN ( \'IPA v4.1.2\' \'user defined\' ) )' % (
+ OID, NAME)
mod = [(ldap.MOD_ADD, 'objectclasses', value)]
server.modify_s('cn=schema', mod)
@@ -286,8 +173,8 @@ def _do_update_entry(supplier=None, consumer=None, attempts=10):
This is doing an update on M2 (IPA4.1) and checks the update has been
propagated to M1 (IPA3.3)
'''
- assert(supplier)
- assert(consumer)
+ assert (supplier)
+ assert (consumer)
entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
value = str(randint(100, 200))
mod = [(ldap.MOD_REPLACE, 'telephonenumber', value)]
@@ -306,170 +193,170 @@ def _do_update_entry(supplier=None, consumer=None, attempts=10):
assert (loop <= attempts)
-def _pause_M2_to_M1(topology):
- topology.master1.log.info("\n\n######################### Pause RA M2->M1 ######################\n")
- ents = topology.master2.agreement.list(suffix=SUFFIX)
+def _pause_M2_to_M1(topology_m2):
+ topology_m2.ms["master1"].log.info("\n\n######################### Pause RA M2->M1 ######################\n")
+ ents = topology_m2.ms["master2"].agreement.list(suffix=SUFFIX)
assert len(ents) == 1
- topology.master2.agreement.pause(ents[0].dn)
+ topology_m2.ms["master2"].agreement.pause(ents[0].dn)
-def _resume_M1_to_M2(topology):
- topology.master1.log.info("\n\n######################### resume RA M1->M2 ######################\n")
- ents = topology.master1.agreement.list(suffix=SUFFIX)
+def _resume_M1_to_M2(topology_m2):
+ topology_m2.ms["master1"].log.info("\n\n######################### resume RA M1->M2 ######################\n")
+ ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX)
assert len(ents) == 1
- topology.master1.agreement.resume(ents[0].dn)
+ topology_m2.ms["master1"].agreement.resume(ents[0].dn)
-def _pause_M1_to_M2(topology):
- topology.master1.log.info("\n\n######################### Pause RA M1->M2 ######################\n")
- ents = topology.master1.agreement.list(suffix=SUFFIX)
+def _pause_M1_to_M2(topology_m2):
+ topology_m2.ms["master1"].log.info("\n\n######################### Pause RA M1->M2 ######################\n")
+ ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX)
assert len(ents) == 1
- topology.master1.agreement.pause(ents[0].dn)
+ topology_m2.ms["master1"].agreement.pause(ents[0].dn)
-def _resume_M2_to_M1(topology):
- topology.master1.log.info("\n\n######################### resume RA M2->M1 ######################\n")
- ents = topology.master2.agreement.list(suffix=SUFFIX)
+def _resume_M2_to_M1(topology_m2):
+ topology_m2.ms["master1"].log.info("\n\n######################### resume RA M2->M1 ######################\n")
+ ents = topology_m2.ms["master2"].agreement.list(suffix=SUFFIX)
assert len(ents) == 1
- topology.master2.agreement.resume(ents[0].dn)
+ topology_m2.ms["master2"].agreement.resume(ents[0].dn)
-def test_ticket47988_1(topology):
+def test_ticket47988_1(topology_m2):
'''
Check that replication is working and pause replication M2->M1
'''
- _header(topology, 'test_ticket47988_1')
+ _header(topology_m2, 'test_ticket47988_1')
- topology.master1.log.debug("\n\nCheck that replication is working and pause replication M2->M1\n")
- _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5)
- _pause_M2_to_M1(topology)
+ topology_m2.ms["master1"].log.debug("\n\nCheck that replication is working and pause replication M2->M1\n")
+ _do_update_entry(supplier=topology_m2.ms["master2"], consumer=topology_m2.ms["master1"], attempts=5)
+ _pause_M2_to_M1(topology_m2)
-def test_ticket47988_2(topology):
+def test_ticket47988_2(topology_m2):
'''
Update M1 schema and trigger update M1->M2
So M1 should learn new/extended definitions that are in M2 schema
'''
- _header(topology, 'test_ticket47988_2')
+ _header(topology_m2, 'test_ticket47988_2')
- topology.master1.log.debug("\n\nUpdate M1 schema and an entry on M1\n")
- master1_schema_csn = topology.master1.schema.get_schema_csn()
- master2_schema_csn = topology.master2.schema.get_schema_csn()
- topology.master1.log.debug("\nBefore updating the schema on M1\n")
- topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
- topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
+ topology_m2.ms["master1"].log.debug("\n\nUpdate M1 schema and an entry on M1\n")
+ master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn()
+ master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn()
+ topology_m2.ms["master1"].log.debug("\nBefore updating the schema on M1\n")
+ topology_m2.ms["master1"].log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
+ topology_m2.ms["master1"].log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
# Here M1 should no, should check M2 schema and learn
- _do_update_schema(topology.master1)
- master1_schema_csn = topology.master1.schema.get_schema_csn()
- master2_schema_csn = topology.master2.schema.get_schema_csn()
- topology.master1.log.debug("\nAfter updating the schema on M1\n")
- topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
- topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
+ _do_update_schema(topology_m2.ms["master1"])
+ master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn()
+ master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn()
+ topology_m2.ms["master1"].log.debug("\nAfter updating the schema on M1\n")
+ topology_m2.ms["master1"].log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
+ topology_m2.ms["master1"].log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
assert (master1_schema_csn)
# to avoid linger effect where a replication session is reused without checking the schema
- _pause_M1_to_M2(topology)
- _resume_M1_to_M2(topology)
-
- #topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify")
- #time.sleep(60)
- _do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=15)
- master1_schema_csn = topology.master1.schema.get_schema_csn()
- master2_schema_csn = topology.master2.schema.get_schema_csn()
- topology.master1.log.debug("\nAfter a full replication session\n")
- topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
- topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
+ _pause_M1_to_M2(topology_m2)
+ _resume_M1_to_M2(topology_m2)
+
+ # topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify")
+ # time.sleep(60)
+ _do_update_entry(supplier=topology_m2.ms["master1"], consumer=topology_m2.ms["master2"], attempts=15)
+ master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn()
+ master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn()
+ topology_m2.ms["master1"].log.debug("\nAfter a full replication session\n")
+ topology_m2.ms["master1"].log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
+ topology_m2.ms["master1"].log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
assert (master1_schema_csn)
assert (master2_schema_csn)
-def test_ticket47988_3(topology):
+def test_ticket47988_3(topology_m2):
'''
Resume replication M2->M1 and check replication is still working
'''
- _header(topology, 'test_ticket47988_3')
+ _header(topology_m2, 'test_ticket47988_3')
- _resume_M2_to_M1(topology)
- _do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=5)
- _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5)
+ _resume_M2_to_M1(topology_m2)
+ _do_update_entry(supplier=topology_m2.ms["master1"], consumer=topology_m2.ms["master2"], attempts=5)
+ _do_update_entry(supplier=topology_m2.ms["master2"], consumer=topology_m2.ms["master1"], attempts=5)
-def test_ticket47988_4(topology):
+def test_ticket47988_4(topology_m2):
'''
Check schemaCSN is identical on both server
And save the nsschemaCSN to later check they do not change unexpectedly
'''
- _header(topology, 'test_ticket47988_4')
+ _header(topology_m2, 'test_ticket47988_4')
- master1_schema_csn = topology.master1.schema.get_schema_csn()
- master2_schema_csn = topology.master2.schema.get_schema_csn()
- topology.master1.log.debug("\n\nMaster1 nsschemaCSN: %s" % master1_schema_csn)
- topology.master1.log.debug("\n\nMaster2 nsschemaCSN: %s" % master2_schema_csn)
+ master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn()
+ master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn()
+ topology_m2.ms["master1"].log.debug("\n\nMaster1 nsschemaCSN: %s" % master1_schema_csn)
+ topology_m2.ms["master1"].log.debug("\n\nMaster2 nsschemaCSN: %s" % master2_schema_csn)
assert (master1_schema_csn)
assert (master2_schema_csn)
assert (master1_schema_csn == master2_schema_csn)
- topology.master1.saved_schema_csn = master1_schema_csn
- topology.master2.saved_schema_csn = master2_schema_csn
+ topology_m2.ms["master1"].saved_schema_csn = master1_schema_csn
+ topology_m2.ms["master2"].saved_schema_csn = master2_schema_csn
-def test_ticket47988_5(topology):
+def test_ticket47988_5(topology_m2):
'''
Check schemaCSN do not change unexpectedly
'''
- _header(topology, 'test_ticket47988_5')
-
- _do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=5)
- _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5)
- master1_schema_csn = topology.master1.schema.get_schema_csn()
- master2_schema_csn = topology.master2.schema.get_schema_csn()
- topology.master1.log.debug("\n\nMaster1 nsschemaCSN: %s" % master1_schema_csn)
- topology.master1.log.debug("\n\nMaster2 nsschemaCSN: %s" % master2_schema_csn)
+ _header(topology_m2, 'test_ticket47988_5')
+
+ _do_update_entry(supplier=topology_m2.ms["master1"], consumer=topology_m2.ms["master2"], attempts=5)
+ _do_update_entry(supplier=topology_m2.ms["master2"], consumer=topology_m2.ms["master1"], attempts=5)
+ master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn()
+ master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn()
+ topology_m2.ms["master1"].log.debug("\n\nMaster1 nsschemaCSN: %s" % master1_schema_csn)
+ topology_m2.ms["master1"].log.debug("\n\nMaster2 nsschemaCSN: %s" % master2_schema_csn)
assert (master1_schema_csn)
assert (master2_schema_csn)
assert (master1_schema_csn == master2_schema_csn)
- assert (topology.master1.saved_schema_csn == master1_schema_csn)
- assert (topology.master2.saved_schema_csn == master2_schema_csn)
+ assert (topology_m2.ms["master1"].saved_schema_csn == master1_schema_csn)
+ assert (topology_m2.ms["master2"].saved_schema_csn == master2_schema_csn)
-def test_ticket47988_6(topology):
+def test_ticket47988_6(topology_m2):
'''
Update M1 schema and trigger update M2->M1
So M2 should learn new/extended definitions that are in M1 schema
'''
- _header(topology, 'test_ticket47988_6')
+ _header(topology_m2, 'test_ticket47988_6')
- topology.master1.log.debug("\n\nUpdate M1 schema and an entry on M1\n")
- master1_schema_csn = topology.master1.schema.get_schema_csn()
- master2_schema_csn = topology.master2.schema.get_schema_csn()
- topology.master1.log.debug("\nBefore updating the schema on M1\n")
- topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
- topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
+ topology_m2.ms["master1"].log.debug("\n\nUpdate M1 schema and an entry on M1\n")
+ master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn()
+ master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn()
+ topology_m2.ms["master1"].log.debug("\nBefore updating the schema on M1\n")
+ topology_m2.ms["master1"].log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
+ topology_m2.ms["master1"].log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
# Here M1 should no, should check M2 schema and learn
- _do_update_schema(topology.master1, range=5999)
- master1_schema_csn = topology.master1.schema.get_schema_csn()
- master2_schema_csn = topology.master2.schema.get_schema_csn()
- topology.master1.log.debug("\nAfter updating the schema on M1\n")
- topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
- topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
+ _do_update_schema(topology_m2.ms["master1"], range=5999)
+ master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn()
+ master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn()
+ topology_m2.ms["master1"].log.debug("\nAfter updating the schema on M1\n")
+ topology_m2.ms["master1"].log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
+ topology_m2.ms["master1"].log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
assert (master1_schema_csn)
# to avoid linger effect where a replication session is reused without checking the schema
- _pause_M1_to_M2(topology)
- _resume_M1_to_M2(topology)
-
- #topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify")
- #time.sleep(60)
- _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=15)
- master1_schema_csn = topology.master1.schema.get_schema_csn()
- master2_schema_csn = topology.master2.schema.get_schema_csn()
- topology.master1.log.debug("\nAfter a full replication session\n")
- topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
- topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
+ _pause_M1_to_M2(topology_m2)
+ _resume_M1_to_M2(topology_m2)
+
+ # topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify")
+ # time.sleep(60)
+ _do_update_entry(supplier=topology_m2.ms["master2"], consumer=topology_m2.ms["master1"], attempts=15)
+ master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn()
+ master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn()
+ topology_m2.ms["master1"].log.debug("\nAfter a full replication session\n")
+ topology_m2.ms["master1"].log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
+ topology_m2.ms["master1"].log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
assert (master1_schema_csn)
assert (master2_schema_csn)
diff --git a/dirsrvtests/tests/tickets/ticket48005_test.py b/dirsrvtests/tests/tickets/ticket48005_test.py
index 7463a84..cd3aa20 100644
--- a/dirsrvtests/tests/tickets/ticket48005_test.py
+++ b/dirsrvtests/tests/tickets/ticket48005_test.py
@@ -6,68 +6,29 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import time
-import ldap
import logging
-import pytest
import re
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
+
+import pytest
from lib389.tasks import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- #request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-def test_ticket48005_setup(topology):
+def test_ticket48005_setup(topology_st):
'''
allow dump core
generate a test ldif file using dbgen.pl
import the ldif
'''
log.info("Ticket 48005 setup...")
- if hasattr(topology.standalone, 'prefix'):
- prefix = topology.standalone.prefix
+ if hasattr(topology_st.standalone, 'prefix'):
+ prefix = topology_st.standalone.prefix
else:
prefix = None
- sysconfig_dirsrv = os.path.join(topology.standalone.get_initconfig_dir(), 'dirsrv')
+ sysconfig_dirsrv = os.path.join(topology_st.standalone.get_initconfig_dir(), 'dirsrv')
cmdline = 'egrep "ulimit -c unlimited" %s' % sysconfig_dirsrv
p = os.popen(cmdline, "r")
ulimitc = p.readline()
@@ -85,13 +46,13 @@ def test_ticket48005_setup(topology):
log.info('Adding it')
cmdline = 'echo LimitCORE=infinity >> %s' % sysconfig_dirsrv_systemd
- topology.standalone.restart(timeout=10)
+ topology_st.standalone.restart(timeout=10)
- ldif_file = topology.standalone.get_ldif_dir() + "/ticket48005.ldif"
+ ldif_file = topology_st.standalone.get_ldif_dir() + "/ticket48005.ldif"
os.system('ls %s' % ldif_file)
os.system('rm -f %s' % ldif_file)
- if hasattr(topology.standalone, 'prefix'):
- prefix = topology.standalone.prefix
+ if hasattr(topology_st.standalone, 'prefix'):
+ prefix = topology_st.standalone.prefix
else:
prefix = None
dbgen_prog = prefix + '/bin/dbgen.pl'
@@ -103,13 +64,13 @@ def test_ticket48005_setup(topology):
num = int(dnnumstr)
log.info("We have %d entries.\n", num)
- importTask = Tasks(topology.standalone)
+ importTask = Tasks(topology_st.standalone)
args = {TASK_WAIT: True}
importTask.importLDIF(SUFFIX, None, ldif_file, args)
log.info('Importing %s complete.' % ldif_file)
-def test_ticket48005_memberof(topology):
+def test_ticket48005_memberof(topology_st):
'''
Enable memberof and referint plugin
Run fixmemberof task without waiting
@@ -118,22 +79,22 @@ def test_ticket48005_memberof(topology):
If no core was found, this test case was successful.
'''
log.info("Ticket 48005 memberof test...")
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
- topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
+ topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
- topology.standalone.restart(timeout=10)
+ topology_st.standalone.restart(timeout=10)
try:
# run the fixup task
- topology.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: False})
+ topology_st.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: False})
except ValueError:
log.error('Some problem occured with a value that was provided')
assert False
- topology.standalone.stop(timeout=10)
+ topology_st.standalone.stop(timeout=10)
mytmp = '/tmp'
- logdir = re.sub('errors', '', topology.standalone.errlog)
+ logdir = re.sub('errors', '', topology_st.standalone.errlog)
cmdline = 'ls ' + logdir + 'core*'
p = os.popen(cmdline, "r")
lcore = p.readline()
@@ -143,17 +104,17 @@ def test_ticket48005_memberof(topology):
assert False
log.info('No core files are found')
- topology.standalone.start(timeout=10)
+ topology_st.standalone.start(timeout=10)
- topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
- topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
+ topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
- topology.standalone.restart(timeout=10)
+ topology_st.standalone.restart(timeout=10)
log.info("Ticket 48005 memberof test complete")
-def test_ticket48005_automember(topology):
+def test_ticket48005_automember(topology_st):
'''
Enable automember and referint plugin
1. Run automember rebuild membership task without waiting
@@ -170,36 +131,36 @@ def test_ticket48005_automember(topology):
If no core was found, this test case was successful.
'''
log.info("Ticket 48005 automember test...")
- topology.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER)
- topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
+ topology_st.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER)
+ topology_st.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
# configure automember config entry
log.info('Adding automember config')
try:
- topology.standalone.add_s(Entry(('cn=group cfg,cn=Auto Membership Plugin,cn=plugins,cn=config', {
- 'objectclass': 'top autoMemberDefinition'.split(),
- 'autoMemberScope': 'dc=example,dc=com',
- 'autoMemberFilter': 'objectclass=inetorgperson',
- 'autoMemberDefaultGroup': 'cn=group0,dc=example,dc=com',
- 'autoMemberGroupingAttr': 'uniquemember:dn',
- 'cn': 'group cfg'})))
+ topology_st.standalone.add_s(Entry(('cn=group cfg,cn=Auto Membership Plugin,cn=plugins,cn=config', {
+ 'objectclass': 'top autoMemberDefinition'.split(),
+ 'autoMemberScope': 'dc=example,dc=com',
+ 'autoMemberFilter': 'objectclass=inetorgperson',
+ 'autoMemberDefaultGroup': 'cn=group0,dc=example,dc=com',
+ 'autoMemberGroupingAttr': 'uniquemember:dn',
+ 'cn': 'group cfg'})))
except ValueError:
log.error('Failed to add automember config')
assert False
- topology.standalone.restart(timeout=10)
+ topology_st.standalone.restart(timeout=10)
try:
# run the automember rebuild task
- topology.standalone.tasks.automemberRebuild(suffix=SUFFIX, args={TASK_WAIT: False})
+ topology_st.standalone.tasks.automemberRebuild(suffix=SUFFIX, args={TASK_WAIT: False})
except ValueError:
log.error('Automember rebuild task failed.')
assert False
- topology.standalone.stop(timeout=10)
+ topology_st.standalone.stop(timeout=10)
mytmp = '/tmp'
- logdir = re.sub('errors', '', topology.standalone.errlog)
+ logdir = re.sub('errors', '', topology_st.standalone.errlog)
cmdline = 'ls ' + logdir + 'core*'
p = os.popen(cmdline, "r")
lcore = p.readline()
@@ -209,19 +170,19 @@ def test_ticket48005_automember(topology):
assert False
log.info('No core files are found')
- topology.standalone.start(timeout=10)
+ topology_st.standalone.start(timeout=10)
ldif_out_file = mytmp + "/ticket48005_automember_exported.ldif"
try:
# run the automember export task
- topology.standalone.tasks.automemberExport(suffix=SUFFIX, ldif_out=ldif_out_file, args={TASK_WAIT: False})
+ topology_st.standalone.tasks.automemberExport(suffix=SUFFIX, ldif_out=ldif_out_file, args={TASK_WAIT: False})
except ValueError:
log.error('Automember Export task failed.')
assert False
- topology.standalone.stop(timeout=10)
+ topology_st.standalone.stop(timeout=10)
- logdir = re.sub('errors', '', topology.standalone.errlog)
+ logdir = re.sub('errors', '', topology_st.standalone.errlog)
cmdline = 'ls ' + logdir + 'core*'
p = os.popen(cmdline, "r")
lcore = p.readline()
@@ -231,20 +192,21 @@ def test_ticket48005_automember(topology):
assert False
log.info('No core files are found')
- topology.standalone.start(timeout=10)
+ topology_st.standalone.start(timeout=10)
- ldif_in_file = topology.standalone.get_ldif_dir() + "/ticket48005.ldif"
+ ldif_in_file = topology_st.standalone.get_ldif_dir() + "/ticket48005.ldif"
ldif_out_file = mytmp + "/ticket48005_automember_map.ldif"
try:
# run the automember map task
- topology.standalone.tasks.automemberMap(ldif_in=ldif_in_file, ldif_out=ldif_out_file, args={TASK_WAIT: False})
+ topology_st.standalone.tasks.automemberMap(ldif_in=ldif_in_file, ldif_out=ldif_out_file,
+ args={TASK_WAIT: False})
except ValueError:
log.error('Automember Map task failed.')
assert False
- topology.standalone.stop(timeout=10)
+ topology_st.standalone.stop(timeout=10)
- logdir = re.sub('errors', '', topology.standalone.errlog)
+ logdir = re.sub('errors', '', topology_st.standalone.errlog)
cmdline = 'ls ' + logdir + 'core*'
p = os.popen(cmdline, "r")
lcore = p.readline()
@@ -254,17 +216,17 @@ def test_ticket48005_automember(topology):
assert False
log.info('No core files are found')
- topology.standalone.start(timeout=10)
+ topology_st.standalone.start(timeout=10)
- topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
- topology.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER)
+ topology_st.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
+ topology_st.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER)
- topology.standalone.restart(timeout=10)
+ topology_st.standalone.restart(timeout=10)
log.info("Ticket 48005 automember test complete")
-def test_ticket48005_syntaxvalidate(topology):
+def test_ticket48005_syntaxvalidate(topology_st):
'''
Run syntax validate task without waiting
Shutdown the server
@@ -275,15 +237,15 @@ def test_ticket48005_syntaxvalidate(topology):
try:
# run the fixup task
- topology.standalone.tasks.syntaxValidate(suffix=SUFFIX, args={TASK_WAIT: False})
+ topology_st.standalone.tasks.syntaxValidate(suffix=SUFFIX, args={TASK_WAIT: False})
except ValueError:
log.error('Some problem occured with a value that was provided')
assert False
- topology.standalone.stop(timeout=10)
+ topology_st.standalone.stop(timeout=10)
mytmp = '/tmp'
- logdir = re.sub('errors', '', topology.standalone.errlog)
+ logdir = re.sub('errors', '', topology_st.standalone.errlog)
cmdline = 'ls ' + logdir + 'core*'
p = os.popen(cmdline, "r")
lcore = p.readline()
@@ -293,12 +255,12 @@ def test_ticket48005_syntaxvalidate(topology):
assert False
log.info('No core files are found')
- topology.standalone.start(timeout=10)
+ topology_st.standalone.start(timeout=10)
log.info("Ticket 48005 syntax validate test complete")
-def test_ticket48005_usn(topology):
+def test_ticket48005_usn(topology_st):
'''
Enable entryusn
Delete all user entries.
@@ -308,19 +270,19 @@ def test_ticket48005_usn(topology):
If no core was found, this test case was successful.
'''
log.info("Ticket 48005 usn test...")
- topology.standalone.plugins.enable(name=PLUGIN_USN)
+ topology_st.standalone.plugins.enable(name=PLUGIN_USN)
- topology.standalone.restart(timeout=10)
+ topology_st.standalone.restart(timeout=10)
try:
- entries = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=inetorgperson)")
+ entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=inetorgperson)")
if len(entries) == 0:
log.info("No user entries.")
else:
for i in range(len(entries)):
# log.info('Deleting %s' % entries[i].dn)
try:
- topology.standalone.delete_s(entries[i].dn)
+ topology_st.standalone.delete_s(entries[i].dn)
except ValueError:
log.error('delete_s %s failed.' % entries[i].dn)
assert False
@@ -330,15 +292,15 @@ def test_ticket48005_usn(topology):
try:
# run the usn tombstone cleanup
- topology.standalone.tasks.usnTombstoneCleanup(suffix=SUFFIX, bename="userRoot", args={TASK_WAIT: False})
+ topology_st.standalone.tasks.usnTombstoneCleanup(suffix=SUFFIX, bename="userRoot", args={TASK_WAIT: False})
except ValueError:
log.error('Some problem occured with a value that was provided')
assert False
- topology.standalone.stop(timeout=10)
+ topology_st.standalone.stop(timeout=10)
mytmp = '/tmp'
- logdir = re.sub('errors', '', topology.standalone.errlog)
+ logdir = re.sub('errors', '', topology_st.standalone.errlog)
cmdline = 'ls ' + logdir + 'core*'
p = os.popen(cmdline, "r")
lcore = p.readline()
@@ -348,16 +310,16 @@ def test_ticket48005_usn(topology):
assert False
log.info('No core files are found')
- topology.standalone.start(timeout=10)
+ topology_st.standalone.start(timeout=10)
- topology.standalone.plugins.disable(name=PLUGIN_USN)
+ topology_st.standalone.plugins.disable(name=PLUGIN_USN)
- topology.standalone.restart(timeout=10)
+ topology_st.standalone.restart(timeout=10)
log.info("Ticket 48005 usn test complete")
-def test_ticket48005_schemareload(topology):
+def test_ticket48005_schemareload(topology_st):
'''
Run schema reload task without waiting
Shutdown the server
@@ -368,14 +330,14 @@ def test_ticket48005_schemareload(topology):
try:
# run the schema reload task
- topology.standalone.tasks.schemaReload(args={TASK_WAIT: False})
+ topology_st.standalone.tasks.schemaReload(args={TASK_WAIT: False})
except ValueError:
log.error('Schema Reload task failed.')
assert False
- topology.standalone.stop(timeout=10)
+ topology_st.standalone.stop(timeout=10)
- logdir = re.sub('errors', '', topology.standalone.errlog)
+ logdir = re.sub('errors', '', topology_st.standalone.errlog)
cmdline = 'ls ' + logdir + 'core*'
p = os.popen(cmdline, "r")
lcore = p.readline()
@@ -386,7 +348,7 @@ def test_ticket48005_schemareload(topology):
assert False
log.info('No core files are found')
- topology.standalone.start(timeout=10)
+ topology_st.standalone.start(timeout=10)
log.info("Ticket 48005 schema reload test complete")
diff --git a/dirsrvtests/tests/tickets/ticket48013_test.py b/dirsrvtests/tests/tickets/ticket48013_test.py
index 12bfa32..e178a0c 100644
--- a/dirsrvtests/tests/tickets/ticket48013_test.py
+++ b/dirsrvtests/tests/tickets/ticket48013_test.py
@@ -6,31 +6,16 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import time
-import ldap
-import logging
-import pytest
import ldapurl
+import pytest
from ldap.ldapobject import SimpleLDAPObject
from ldap.syncrepl import SyncreplConsumer
-from lib389 import DirSrv
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
class SyncObject(SimpleLDAPObject, SyncreplConsumer):
def __init__(self, uri):
@@ -46,34 +31,7 @@ class SyncObject(SimpleLDAPObject, SyncreplConsumer):
self.syncrepl_poll(all=1)
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_ticket48013(topology):
+def test_ticket48013(topology_st):
'''
Content Synchonization: Test that invalid cookies are caught
'''
@@ -82,16 +40,16 @@ def test_ticket48013(topology):
# Enable dynamic plugins
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
except ldap.LDAPError as e:
ldap.error('Failed to enable dynamic plugin!' + e.message['desc'])
assert False
# Enable retro changelog
- topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
+ topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
# Enbale content sync plugin
- topology.standalone.plugins.enable(name=PLUGIN_REPL_SYNC)
+ topology_st.standalone.plugins.enable(name=PLUGIN_REPL_SYNC)
# Set everything up
ldap_url = ldapurl.LDAPUrl('ldap://%s:%s' % (HOST_STANDALONE,
diff --git a/dirsrvtests/tests/tickets/ticket48026_test.py b/dirsrvtests/tests/tickets/ticket48026_test.py
index 7eae5c8..5af3d90 100644
--- a/dirsrvtests/tests/tickets/ticket48026_test.py
+++ b/dirsrvtests/tests/tickets/ticket48026_test.py
@@ -6,65 +6,24 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX
USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_ticket48026(topology):
+def test_ticket48026(topology_st):
'''
Test that multiple attribute uniqueness works correctly.
'''
# Configure the plugin
- inst = topology.standalone
+ inst = topology_st.standalone
inst.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS)
try:
@@ -73,7 +32,7 @@ def test_ticket48026(topology):
[(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'mail'),
(ldap.MOD_ADD, 'uniqueness-attribute-name',
'mailAlternateAddress'),
- ])
+ ])
except ldap.LDAPError as e:
log.fatal('test_ticket48026: Failed to configure plugin for "mail": error ' + e.message['desc'])
assert False
@@ -87,7 +46,7 @@ def test_ticket48026(topology):
'cn': 'user 1',
'uid': 'user1',
'mail': 'user1(a)example.com',
- 'mailAlternateAddress' : 'user1(a)alt.example.com',
+ 'mailAlternateAddress': 'user1(a)alt.example.com',
'userpassword': 'password'})))
except ldap.LDAPError as e:
log.fatal('test_ticket48026: Failed to add test user' + USER1_DN + ': error ' + e.message['desc'])
@@ -95,11 +54,11 @@ def test_ticket48026(topology):
try:
inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
- 'sn': '2',
- 'cn': 'user 2',
- 'uid': 'user2',
- 'mail': 'user1(a)example.com',
- 'userpassword': 'password'})))
+ 'sn': '2',
+ 'cn': 'user 2',
+ 'uid': 'user2',
+ 'mail': 'user1(a)example.com',
+ 'userpassword': 'password'})))
except ldap.CONSTRAINT_VIOLATION:
pass
else:
@@ -108,24 +67,25 @@ def test_ticket48026(topology):
try:
inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
- 'sn': '2',
- 'cn': 'user 2',
- 'uid': 'user2',
- 'mailAlternateAddress': 'user1(a)alt.example.com',
- 'userpassword': 'password'})))
+ 'sn': '2',
+ 'cn': 'user 2',
+ 'uid': 'user2',
+ 'mailAlternateAddress': 'user1(a)alt.example.com',
+ 'userpassword': 'password'})))
except ldap.CONSTRAINT_VIOLATION:
pass
else:
- log.error('test_ticket48026: Adding of 2nd entry(mailAlternateAddress v mailAlternateAddress) incorrectly succeeded')
+ log.error(
+ 'test_ticket48026: Adding of 2nd entry(mailAlternateAddress v mailAlternateAddress) incorrectly succeeded')
assert False
try:
inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
- 'sn': '2',
- 'cn': 'user 2',
- 'uid': 'user2',
- 'mail': 'user1(a)alt.example.com',
- 'userpassword': 'password'})))
+ 'sn': '2',
+ 'cn': 'user 2',
+ 'uid': 'user2',
+ 'mail': 'user1(a)alt.example.com',
+ 'userpassword': 'password'})))
except ldap.CONSTRAINT_VIOLATION:
pass
else:
@@ -134,11 +94,11 @@ def test_ticket48026(topology):
try:
inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
- 'sn': '2',
- 'cn': 'user 2',
- 'uid': 'user2',
- 'mailAlternateAddress': 'user1(a)example.com',
- 'userpassword': 'password'})))
+ 'sn': '2',
+ 'cn': 'user 2',
+ 'uid': 'user2',
+ 'mailAlternateAddress': 'user1(a)example.com',
+ 'userpassword': 'password'})))
except ldap.CONSTRAINT_VIOLATION:
pass
else:
diff --git a/dirsrvtests/tests/tickets/ticket48109_test.py b/dirsrvtests/tests/tickets/ticket48109_test.py
index 85faefe..5327402 100644
--- a/dirsrvtests/tests/tickets/ticket48109_test.py
+++ b/dirsrvtests/tests/tickets/ticket48109_test.py
@@ -6,71 +6,18 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
UID_INDEX = 'cn=uid,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-
-def test_ticket48109(topology):
+def test_ticket48109(topology_st):
'''
Set SubStr lengths to cn=uid,cn=index,...
objectClass: extensibleObject
@@ -81,54 +28,54 @@ def test_ticket48109(topology):
log.info('Test case 0')
# add substr setting to UID_INDEX
try:
- topology.standalone.modify_s(UID_INDEX,
- [(ldap.MOD_ADD, 'objectClass', 'extensibleObject'),
- (ldap.MOD_ADD, 'nsIndexType', 'sub'),
- (ldap.MOD_ADD, 'nsSubStrBegin', '2'),
- (ldap.MOD_ADD, 'nsSubStrEnd', '2')])
+ topology_st.standalone.modify_s(UID_INDEX,
+ [(ldap.MOD_ADD, 'objectClass', 'extensibleObject'),
+ (ldap.MOD_ADD, 'nsIndexType', 'sub'),
+ (ldap.MOD_ADD, 'nsSubStrBegin', '2'),
+ (ldap.MOD_ADD, 'nsSubStrEnd', '2')])
except ldap.LDAPError as e:
log.error('Failed to add substr lengths: error ' + e.message['desc'])
assert False
# restart the server to apply the indexing
- topology.standalone.restart(timeout=10)
+ topology_st.standalone.restart(timeout=10)
# add a test user
UID = 'auser0'
USER_DN = 'uid=%s,%s' % (UID, SUFFIX)
try:
- topology.standalone.add_s(Entry((USER_DN, {
- 'objectclass': 'top person organizationalPerson inetOrgPerson'.split(),
- 'cn': 'a user0',
- 'sn': 'user0',
- 'givenname': 'a',
- 'mail': UID})))
+ topology_st.standalone.add_s(Entry((USER_DN, {
+ 'objectclass': 'top person organizationalPerson inetOrgPerson'.split(),
+ 'cn': 'a user0',
+ 'sn': 'user0',
+ 'givenname': 'a',
+ 'mail': UID})))
except ldap.LDAPError as e:
log.error('Failed to add ' + USER_DN + ': error ' + e.message['desc'])
assert False
- entries = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=a*)')
+ entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=a*)')
assert len(entries) == 1
# restart the server to check the access log
- topology.standalone.restart(timeout=10)
+ topology_st.standalone.restart(timeout=10)
- cmdline = 'egrep %s %s | egrep "uid=a\*"' % (SUFFIX, topology.standalone.accesslog)
+ cmdline = 'egrep %s %s | egrep "uid=a\*"' % (SUFFIX, topology_st.standalone.accesslog)
p = os.popen(cmdline, "r")
l0 = p.readline()
if l0 == "":
- log.error('Search with "(uid=a*)" is not logged in ' + topology.standalone.accesslog)
+ log.error('Search with "(uid=a*)" is not logged in ' + topology_st.standalone.accesslog)
assert False
else:
- #regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*')
+ # regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*')
regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*')
match = regex.match(l0)
log.info('match: %s' % match.group(1))
- cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology.standalone.accesslog)
+ cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology_st.standalone.accesslog)
p = os.popen(cmdline, "r")
l1 = p.readline()
if l1 == "":
- log.error('Search result of "(uid=a*)" is not logged in ' + topology.standalone.accesslog)
+ log.error('Search result of "(uid=a*)" is not logged in ' + topology_st.standalone.accesslog)
assert False
else:
log.info('l1: %s' % l1)
@@ -150,11 +97,11 @@ def test_ticket48109(topology):
# clean up substr setting to UID_INDEX
try:
- topology.standalone.modify_s(UID_INDEX,
- [(ldap.MOD_DELETE, 'objectClass', 'extensibleObject'),
- (ldap.MOD_DELETE, 'nsIndexType', 'sub'),
- (ldap.MOD_DELETE, 'nsSubStrBegin', '2'),
- (ldap.MOD_DELETE, 'nsSubStrEnd', '2')])
+ topology_st.standalone.modify_s(UID_INDEX,
+ [(ldap.MOD_DELETE, 'objectClass', 'extensibleObject'),
+ (ldap.MOD_DELETE, 'nsIndexType', 'sub'),
+ (ldap.MOD_DELETE, 'nsSubStrBegin', '2'),
+ (ldap.MOD_DELETE, 'nsSubStrEnd', '2')])
except ldap.LDAPError as e:
log.error('Failed to delete substr lengths: error ' + e.message['desc'])
assert False
@@ -168,53 +115,53 @@ def test_ticket48109(topology):
log.info('Test case 1')
# add substr setting to UID_INDEX
try:
- topology.standalone.modify_s(UID_INDEX,
- [(ldap.MOD_ADD, 'nsIndexType', 'sub'),
- (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrbegin=2'),
- (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrend=2')])
+ topology_st.standalone.modify_s(UID_INDEX,
+ [(ldap.MOD_ADD, 'nsIndexType', 'sub'),
+ (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrbegin=2'),
+ (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrend=2')])
except ldap.LDAPError as e:
log.error('Failed to add substr lengths: error ' + e.message['desc'])
assert False
# restart the server to apply the indexing
- topology.standalone.restart(timeout=10)
+ topology_st.standalone.restart(timeout=10)
# add a test user
UID = 'buser1'
USER_DN = 'uid=%s,%s' % (UID, SUFFIX)
try:
- topology.standalone.add_s(Entry((USER_DN, {
- 'objectclass': 'top person organizationalPerson inetOrgPerson'.split(),
- 'cn': 'b user1',
- 'sn': 'user1',
- 'givenname': 'b',
- 'mail': UID})))
+ topology_st.standalone.add_s(Entry((USER_DN, {
+ 'objectclass': 'top person organizationalPerson inetOrgPerson'.split(),
+ 'cn': 'b user1',
+ 'sn': 'user1',
+ 'givenname': 'b',
+ 'mail': UID})))
except ldap.LDAPError as e:
log.error('Failed to add ' + USER_DN + ': error ' + e.message['desc'])
assert False
- entries = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=b*)')
+ entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=b*)')
assert len(entries) == 1
# restart the server to check the access log
- topology.standalone.restart(timeout=10)
+ topology_st.standalone.restart(timeout=10)
- cmdline = 'egrep %s %s | egrep "uid=b\*"' % (SUFFIX, topology.standalone.accesslog)
+ cmdline = 'egrep %s %s | egrep "uid=b\*"' % (SUFFIX, topology_st.standalone.accesslog)
p = os.popen(cmdline, "r")
l0 = p.readline()
if l0 == "":
- log.error('Search with "(uid=b*)" is not logged in ' + topology.standalone.accesslog)
+ log.error('Search with "(uid=b*)" is not logged in ' + topology_st.standalone.accesslog)
assert False
else:
- #regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*')
+ # regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*')
regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*')
match = regex.match(l0)
log.info('match: %s' % match.group(1))
- cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology.standalone.accesslog)
+ cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology_st.standalone.accesslog)
p = os.popen(cmdline, "r")
l1 = p.readline()
if l1 == "":
- log.error('Search result of "(uid=*b)" is not logged in ' + topology.standalone.accesslog)
+ log.error('Search result of "(uid=*b)" is not logged in ' + topology_st.standalone.accesslog)
assert False
else:
log.info('l1: %s' % l1)
@@ -236,10 +183,10 @@ def test_ticket48109(topology):
# clean up substr setting to UID_INDEX
try:
- topology.standalone.modify_s(UID_INDEX,
- [(ldap.MOD_DELETE, 'nsIndexType', 'sub'),
- (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrbegin=2'),
- (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrend=2')])
+ topology_st.standalone.modify_s(UID_INDEX,
+ [(ldap.MOD_DELETE, 'nsIndexType', 'sub'),
+ (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrbegin=2'),
+ (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrend=2')])
except ldap.LDAPError as e:
log.error('Failed to delete substr lengths: error ' + e.message['desc'])
assert False
@@ -258,59 +205,59 @@ def test_ticket48109(topology):
# add substr setting to UID_INDEX
try:
- topology.standalone.modify_s(UID_INDEX,
- [(ldap.MOD_ADD, 'nsIndexType', 'sub'),
- (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrbegin=3'),
- (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrend=3'),
- (ldap.MOD_ADD, 'objectClass', 'extensibleObject'),
- (ldap.MOD_ADD, 'nsSubStrBegin', '2'),
- (ldap.MOD_ADD, 'nsSubStrEnd', '2')])
+ topology_st.standalone.modify_s(UID_INDEX,
+ [(ldap.MOD_ADD, 'nsIndexType', 'sub'),
+ (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrbegin=3'),
+ (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrend=3'),
+ (ldap.MOD_ADD, 'objectClass', 'extensibleObject'),
+ (ldap.MOD_ADD, 'nsSubStrBegin', '2'),
+ (ldap.MOD_ADD, 'nsSubStrEnd', '2')])
except ldap.LDAPError as e:
log.error('Failed to add substr lengths: error ' + e.message['desc'])
assert False
# restart the server to apply the indexing
- topology.standalone.restart(timeout=10)
+ topology_st.standalone.restart(timeout=10)
# add a test user
UID = 'cuser2'
USER_DN = 'uid=%s,%s' % (UID, SUFFIX)
try:
- topology.standalone.add_s(Entry((USER_DN, {
- 'objectclass': 'top person organizationalPerson inetOrgPerson'.split(),
- 'cn': 'c user2',
- 'sn': 'user2',
- 'givenname': 'c',
- 'mail': UID})))
+ topology_st.standalone.add_s(Entry((USER_DN, {
+ 'objectclass': 'top person organizationalPerson inetOrgPerson'.split(),
+ 'cn': 'c user2',
+ 'sn': 'user2',
+ 'givenname': 'c',
+ 'mail': UID})))
except ldap.LDAPError as e:
log.error('Failed to add ' + USER_DN + ': error ' + e.message['desc'])
assert False
- entries = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=c*)')
+ entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=c*)')
assert len(entries) == 1
- entries = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*2)')
+ entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*2)')
assert len(entries) == 1
# restart the server to check the access log
- topology.standalone.restart(timeout=10)
+ topology_st.standalone.restart(timeout=10)
- cmdline = 'egrep %s %s | egrep "uid=c\*"' % (SUFFIX, topology.standalone.accesslog)
+ cmdline = 'egrep %s %s | egrep "uid=c\*"' % (SUFFIX, topology_st.standalone.accesslog)
p = os.popen(cmdline, "r")
l0 = p.readline()
if l0 == "":
- log.error('Search with "(uid=c*)" is not logged in ' + topology.standalone.accesslog)
+ log.error('Search with "(uid=c*)" is not logged in ' + topology_st.standalone.accesslog)
assert False
else:
- #regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*')
+ # regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*')
regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*')
match = regex.match(l0)
log.info('match: %s' % match.group(1))
- cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology.standalone.accesslog)
+ cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology_st.standalone.accesslog)
p = os.popen(cmdline, "r")
l1 = p.readline()
if l1 == "":
- log.error('Search result of "(uid=c*)" is not logged in ' + topology.standalone.accesslog)
+ log.error('Search result of "(uid=c*)" is not logged in ' + topology_st.standalone.accesslog)
assert False
else:
log.info('l1: %s' % l1)
@@ -330,22 +277,22 @@ def test_ticket48109(topology):
else:
log.info('Test case 2-1 - OK - correct substr index used')
- cmdline = 'egrep %s %s | egrep "uid=\*2"' % (SUFFIX, topology.standalone.accesslog)
+ cmdline = 'egrep %s %s | egrep "uid=\*2"' % (SUFFIX, topology_st.standalone.accesslog)
p = os.popen(cmdline, "r")
l0 = p.readline()
if l0 == "":
- log.error('Search with "(uid=*2)" is not logged in ' + topology.standalone.accesslog)
+ log.error('Search with "(uid=*2)" is not logged in ' + topology_st.standalone.accesslog)
assert False
else:
- #regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*')
+ # regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*')
regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*')
match = regex.match(l0)
log.info('match: %s' % match.group(1))
- cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology.standalone.accesslog)
+ cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology_st.standalone.accesslog)
p = os.popen(cmdline, "r")
l1 = p.readline()
if l1 == "":
- log.error('Search result of "(uid=*2)" is not logged in ' + topology.standalone.accesslog)
+ log.error('Search result of "(uid=*2)" is not logged in ' + topology_st.standalone.accesslog)
assert False
else:
log.info('l1: %s' % l1)
@@ -367,13 +314,13 @@ def test_ticket48109(topology):
# clean up substr setting to UID_INDEX
try:
- topology.standalone.modify_s(UID_INDEX,
- [(ldap.MOD_DELETE, 'nsIndexType', 'sub'),
- (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrbegin=3'),
- (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrend=3'),
- (ldap.MOD_DELETE, 'objectClass', 'extensibleObject'),
- (ldap.MOD_DELETE, 'nsSubStrBegin', '2'),
- (ldap.MOD_DELETE, 'nsSubStrEnd', '2')])
+ topology_st.standalone.modify_s(UID_INDEX,
+ [(ldap.MOD_DELETE, 'nsIndexType', 'sub'),
+ (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrbegin=3'),
+ (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrend=3'),
+ (ldap.MOD_DELETE, 'objectClass', 'extensibleObject'),
+ (ldap.MOD_DELETE, 'nsSubStrBegin', '2'),
+ (ldap.MOD_DELETE, 'nsSubStrEnd', '2')])
except ldap.LDAPError as e:
log.error('Failed to delete substr lengths: error ' + e.message['desc'])
assert False
diff --git a/dirsrvtests/tests/tickets/ticket48170_test.py b/dirsrvtests/tests/tickets/ticket48170_test.py
index 7cbea4a..adfbcb8 100644
--- a/dirsrvtests/tests/tickets/ticket48170_test.py
+++ b/dirsrvtests/tests/tickets/ticket48170_test.py
@@ -6,59 +6,15 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_ticket48170(topology):
+def test_ticket48170(topology_st):
'''
Attempt to add a nsIndexType wikth an invalid value: "eq,pres"
'''
@@ -66,7 +22,7 @@ def test_ticket48170(topology):
INDEX_DN = 'cn=cn,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config'
REJECTED = False
try:
- topology.standalone.modify_s(INDEX_DN, [(ldap.MOD_ADD, 'nsINdexType', 'eq,pres')])
+ topology_st.standalone.modify_s(INDEX_DN, [(ldap.MOD_ADD, 'nsINdexType', 'eq,pres')])
except ldap.UNWILLING_TO_PERFORM:
log.info('Index update correctly rejected')
REJECTED = True
diff --git a/dirsrvtests/tests/tickets/ticket48194_test.py b/dirsrvtests/tests/tickets/ticket48194_test.py
index 92f4371..773dc95 100644
--- a/dirsrvtests/tests/tickets/ticket48194_test.py
+++ b/dirsrvtests/tests/tickets/ticket48194_test.py
@@ -6,19 +6,15 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
+import logging
import subprocess
import time
+
import ldap
-import logging
import pytest
-import shutil
-from lib389 import DirSrv, Entry, tools
-from lib389 import DirSrvTools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
@@ -34,63 +30,20 @@ plus_all_ecount_noweak = 0
plus_all_dcount_noweak = 0
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def _header(topology, label):
- topology.standalone.log.info("\n\n###############################################")
- topology.standalone.log.info("####### %s" % label)
- topology.standalone.log.info("###############################################")
+def _header(topology_st, label):
+ topology_st.standalone.log.info("\n\n###############################################")
+ topology_st.standalone.log.info("####### %s" % label)
+ topology_st.standalone.log.info("###############################################")
-def test_init(topology):
+def test_init(topology_st):
"""
Generate self signed cert and import it to the DS cert db.
Enable SSL
"""
- _header(topology, 'Testing Ticket 48194 - harden the list of ciphers available by default')
+ _header(topology_st, 'Testing Ticket 48194 - harden the list of ciphers available by default')
- conf_dir = topology.standalone.confdir
+ conf_dir = topology_st.standalone.confdir
log.info("\n######################### Checking existing certs ######################\n")
os.system('certutil -L -d %s -n "CA certificate"' % conf_dir)
@@ -121,8 +74,9 @@ def test_init(topology):
os.system('certutil -G -d %s -z %s -f %s' % (conf_dir, noisefile, pwdfile))
log.info("\n######################### Creating self-signed CA certificate ######################\n")
- os.system('( echo y ; echo ; echo y ) | certutil -S -n "CA certificate" -s "cn=CAcert" -x -t "CT,," -m 1000 -v 120 -d %s -z %s -f %s -2' %
- (conf_dir, noisefile, pwdfile))
+ os.system(
+ '( echo y ; echo ; echo y ) | certutil -S -n "CA certificate" -s "cn=CAcert" -x -t "CT,," -m 1000 -v 120 -d %s -z %s -f %s -2' %
+ (conf_dir, noisefile, pwdfile))
log.info("\n######################### Exporting the CA certificate to cacert.asc ######################\n")
cafile = '%s/cacert.asc' % conf_dir
@@ -138,8 +92,9 @@ def test_init(topology):
log.info("\n######################### Generate the server certificate ######################\n")
ohostname = os.popen('hostname --fqdn', "r")
myhostname = ohostname.readline()
- os.system('certutil -S -n "%s" -s "cn=%s,ou=389 Directory Server" -c "CA certificate" -t "u,u,u" -m 1001 -v 120 -d %s -z %s -f %s' %
- (SERVERCERT, myhostname.rstrip(), conf_dir, noisefile, pwdfile))
+ os.system(
+ 'certutil -S -n "%s" -s "cn=%s,ou=389 Directory Server" -c "CA certificate" -t "u,u,u" -m 1001 -v 120 -d %s -z %s -f %s' %
+ (SERVERCERT, myhostname.rstrip(), conf_dir, noisefile, pwdfile))
log.info("\n######################### create the pin file ######################\n")
pinfile = '%s/pin.txt' % (conf_dir)
@@ -150,25 +105,25 @@ def test_init(topology):
time.sleep(1)
log.info("\n######################### enable SSL in the directory server with all ciphers ######################\n")
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', 'off'),
- (ldap.MOD_REPLACE, 'nsTLS1', 'on'),
- (ldap.MOD_REPLACE, 'nsSSLClientAuth', 'allowed'),
- (ldap.MOD_REPLACE, 'allowWeakCipher', 'on'),
- (ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', 'off'),
+ (ldap.MOD_REPLACE, 'nsTLS1', 'on'),
+ (ldap.MOD_REPLACE, 'nsSSLClientAuth', 'allowed'),
+ (ldap.MOD_REPLACE, 'allowWeakCipher', 'on'),
+ (ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all')])
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-security', 'on'),
- (ldap.MOD_REPLACE, 'nsslapd-ssl-check-hostname', 'off'),
- (ldap.MOD_REPLACE, 'nsslapd-secureport', LDAPSPORT)])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-security', 'on'),
+ (ldap.MOD_REPLACE, 'nsslapd-ssl-check-hostname', 'off'),
+ (ldap.MOD_REPLACE, 'nsslapd-secureport', LDAPSPORT)])
- topology.standalone.add_s(Entry((RSA_DN, {'objectclass': "top nsEncryptionModule".split(),
- 'cn': RSA,
- 'nsSSLPersonalitySSL': SERVERCERT,
- 'nsSSLToken': 'internal (software)',
- 'nsSSLActivation': 'on'})))
+ topology_st.standalone.add_s(Entry((RSA_DN, {'objectclass': "top nsEncryptionModule".split(),
+ 'cn': RSA,
+ 'nsSSLPersonalitySSL': SERVERCERT,
+ 'nsSSLToken': 'internal (software)',
+ 'nsSSLActivation': 'on'})))
-def connectWithOpenssl(topology, cipher, expect):
+def connectWithOpenssl(topology_st, cipher, expect):
"""
Connect with the given cipher
Condition:
@@ -177,7 +132,7 @@ def connectWithOpenssl(topology, cipher, expect):
access log: "Cannot communicate securely with peer:
no common encryption algorithm(s)."
"""
- log.info("Testing %s -- expect to handshake %s", cipher,"successfully" if expect else "failed")
+ log.info("Testing %s -- expect to handshake %s", cipher, "successfully" if expect else "failed")
myurl = 'localhost:%s' % LDAPSPORT
cmdline = ['/usr/bin/openssl', 's_client', '-connect', myurl, '-cipher', cipher]
@@ -211,232 +166,237 @@ def connectWithOpenssl(topology, cipher, expect):
assert False
-def test_run_0(topology):
+def test_run_0(topology_st):
"""
Check nsSSL3Ciphers: +all
All ciphers are enabled except null.
Note: allowWeakCipher: on
"""
- _header(topology, 'Test Case 1 - Check the ciphers availability for "+all"; allowWeakCipher: on')
+ _header(topology_st, 'Test Case 1 - Check the ciphers availability for "+all"; allowWeakCipher: on')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.restart(timeout=120)
+ topology_st.standalone.restart(timeout=120)
- connectWithOpenssl(topology, 'RC4-SHA', True)
- connectWithOpenssl(topology, 'AES256-SHA256', True)
+ connectWithOpenssl(topology_st, 'RC4-SHA', True)
+ connectWithOpenssl(topology_st, 'AES256-SHA256', True)
-def test_run_1(topology):
+def test_run_1(topology_st):
"""
Check nsSSL3Ciphers: +all
All ciphers are enabled except null.
Note: default allowWeakCipher (i.e., off) for +all
"""
- _header(topology, 'Test Case 2 - Check the ciphers availability for "+all" with default allowWeakCiphers')
+ _header(topology_st, 'Test Case 2 - Check the ciphers availability for "+all" with default allowWeakCiphers')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')])
# Make sure allowWeakCipher is not set.
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'allowWeakCipher', None)])
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'allowWeakCipher', None)])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.48194_0' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.48194_0' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(2)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- connectWithOpenssl(topology, 'RC4-SHA', False)
- connectWithOpenssl(topology, 'AES256-SHA256', True)
+ connectWithOpenssl(topology_st, 'RC4-SHA', False)
+ connectWithOpenssl(topology_st, 'AES256-SHA256', True)
-def test_run_2(topology):
+def test_run_2(topology_st):
"""
Check nsSSL3Ciphers: +rsa_aes_128_sha,+rsa_aes_256_sha
rsa_aes_128_sha, tls_rsa_aes_128_sha, rsa_aes_256_sha, tls_rsa_aes_256_sha are enabled.
default allowWeakCipher
"""
- _header(topology, 'Test Case 3 - Check the ciphers availability for "+rsa_aes_128_sha,+rsa_aes_256_sha" with default allowWeakCipher')
+ _header(topology_st,
+ 'Test Case 3 - Check the ciphers availability for "+rsa_aes_128_sha,+rsa_aes_256_sha" with default allowWeakCipher')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+rsa_aes_128_sha,+rsa_aes_256_sha')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN,
+ [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+rsa_aes_128_sha,+rsa_aes_256_sha')])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.48194_1' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.48194_1' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(2)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- connectWithOpenssl(topology, 'RC4-SHA', False)
- connectWithOpenssl(topology, 'AES256-SHA256', False)
- connectWithOpenssl(topology, 'AES128-SHA', True)
- connectWithOpenssl(topology, 'AES256-SHA', True)
+ connectWithOpenssl(topology_st, 'RC4-SHA', False)
+ connectWithOpenssl(topology_st, 'AES256-SHA256', False)
+ connectWithOpenssl(topology_st, 'AES128-SHA', True)
+ connectWithOpenssl(topology_st, 'AES256-SHA', True)
-def test_run_3(topology):
+def test_run_3(topology_st):
"""
Check nsSSL3Ciphers: -all
All ciphers are disabled.
default allowWeakCipher
"""
- _header(topology, 'Test Case 4 - Check the ciphers availability for "-all"')
+ _header(topology_st, 'Test Case 4 - Check the ciphers availability for "-all"')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '-all')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '-all')])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.48194_2' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.48194_2' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(1)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- connectWithOpenssl(topology, 'RC4-SHA', False)
- connectWithOpenssl(topology, 'AES256-SHA256', False)
+ connectWithOpenssl(topology_st, 'RC4-SHA', False)
+ connectWithOpenssl(topology_st, 'AES256-SHA256', False)
-def test_run_4(topology):
+def test_run_4(topology_st):
"""
Check no nsSSL3Ciphers
Default ciphers are enabled.
default allowWeakCipher
"""
- _header(topology, 'Test Case 5 - Check no nsSSL3Ciphers (-all) with default allowWeakCipher')
+ _header(topology_st, 'Test Case 5 - Check no nsSSL3Ciphers (-all) with default allowWeakCipher')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'nsSSL3Ciphers', '-all')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'nsSSL3Ciphers', '-all')])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.48194_3' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.48194_3' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(2)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- connectWithOpenssl(topology, 'RC4-SHA', False)
- connectWithOpenssl(topology, 'AES256-SHA256', True)
+ connectWithOpenssl(topology_st, 'RC4-SHA', False)
+ connectWithOpenssl(topology_st, 'AES256-SHA256', True)
-def test_run_5(topology):
+def test_run_5(topology_st):
"""
Check nsSSL3Ciphers: default
Default ciphers are enabled.
default allowWeakCipher
"""
- _header(topology, 'Test Case 6 - Check default nsSSL3Ciphers (default setting) with default allowWeakCipher')
+ _header(topology_st, 'Test Case 6 - Check default nsSSL3Ciphers (default setting) with default allowWeakCipher')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default')])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.48194_4' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.48194_4' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(2)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- connectWithOpenssl(topology, 'RC4-SHA', False)
- connectWithOpenssl(topology, 'AES256-SHA256', True)
+ connectWithOpenssl(topology_st, 'RC4-SHA', False)
+ connectWithOpenssl(topology_st, 'AES256-SHA256', True)
-def test_run_6(topology):
+def test_run_6(topology_st):
"""
Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256
All ciphers are disabled.
default allowWeakCipher
"""
- _header(topology, 'Test Case 7 - Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256 with default allowWeakCipher')
+ _header(topology_st,
+ 'Test Case 7 - Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256 with default allowWeakCipher')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all,-TLS_RSA_WITH_AES_256_CBC_SHA256')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN,
+ [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all,-TLS_RSA_WITH_AES_256_CBC_SHA256')])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.48194_5' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.48194_5' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(2)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- connectWithOpenssl(topology, 'RC4-SHA', False)
- connectWithOpenssl(topology, 'AES256-SHA256', False)
- connectWithOpenssl(topology, 'AES128-SHA', True)
+ connectWithOpenssl(topology_st, 'RC4-SHA', False)
+ connectWithOpenssl(topology_st, 'AES256-SHA256', False)
+ connectWithOpenssl(topology_st, 'AES128-SHA', True)
-def test_run_7(topology):
+def test_run_7(topology_st):
"""
Check nsSSL3Ciphers: -all,+rsa_rc4_128_md5
All ciphers are disabled.
default allowWeakCipher
"""
- _header(topology, 'Test Case 8 - Check nsSSL3Ciphers: -all,+rsa_rc4_128_md5 with default allowWeakCipher')
+ _header(topology_st, 'Test Case 8 - Check nsSSL3Ciphers: -all,+rsa_rc4_128_md5 with default allowWeakCipher')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '-all,+rsa_rc4_128_md5')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '-all,+rsa_rc4_128_md5')])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.48194_6' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.48194_6' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(2)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- connectWithOpenssl(topology, 'RC4-SHA', False)
- connectWithOpenssl(topology, 'AES256-SHA256', False)
- connectWithOpenssl(topology, 'RC4-MD5', True)
+ connectWithOpenssl(topology_st, 'RC4-SHA', False)
+ connectWithOpenssl(topology_st, 'AES256-SHA256', False)
+ connectWithOpenssl(topology_st, 'RC4-MD5', True)
-def test_run_8(topology):
+def test_run_8(topology_st):
"""
Check nsSSL3Ciphers: default + allowWeakCipher: off
Strong Default ciphers are enabled.
"""
- _header(topology, 'Test Case 9 - Check default nsSSL3Ciphers (default setting + allowWeakCipher: off)')
+ _header(topology_st, 'Test Case 9 - Check default nsSSL3Ciphers (default setting + allowWeakCipher: off)')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default'),
- (ldap.MOD_REPLACE, 'allowWeakCipher', 'off')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default'),
+ (ldap.MOD_REPLACE, 'allowWeakCipher', 'off')])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.48194_7' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.48194_7' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(2)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- connectWithOpenssl(topology, 'RC4-SHA', False)
- connectWithOpenssl(topology, 'AES256-SHA256', True)
+ connectWithOpenssl(topology_st, 'RC4-SHA', False)
+ connectWithOpenssl(topology_st, 'AES256-SHA256', True)
-def test_run_9(topology):
+def test_run_9(topology_st):
"""
Check no nsSSL3Ciphers
Default ciphers are enabled.
allowWeakCipher: on
nsslapd-errorlog-level: 0
"""
- _header(topology, 'Test Case 10 - Check no nsSSL3Ciphers (default setting) with no errorlog-level & allowWeakCipher on')
+ _header(topology_st,
+ 'Test Case 10 - Check no nsSSL3Ciphers (default setting) with no errorlog-level & allowWeakCipher on')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', None),
- (ldap.MOD_REPLACE, 'allowWeakCipher', 'on')])
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', None)])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', None),
+ (ldap.MOD_REPLACE, 'allowWeakCipher', 'on')])
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', None)])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.48194_8' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.48194_8' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(2)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- connectWithOpenssl(topology, 'RC4-SHA', True)
- connectWithOpenssl(topology, 'AES256-SHA256', True)
+ connectWithOpenssl(topology_st, 'RC4-SHA', True)
+ connectWithOpenssl(topology_st, 'AES256-SHA256', True)
-def test_run_10(topology):
+def test_run_10(topology_st):
"""
Check nsSSL3Ciphers: -TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5,
+TLS_RSA_EXPORT_WITH_RC4_40_MD5,+TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
@@ -449,43 +409,44 @@ def test_run_10(topology):
allowWeakCipher: on
nsslapd-errorlog-level: 0
"""
- _header(topology, 'Test Case 11 - Check nsSSL3Ciphers: long list using the NSS Cipher Suite name with allowWeakCipher on')
+ _header(topology_st,
+ 'Test Case 11 - Check nsSSL3Ciphers: long list using the NSS Cipher Suite name with allowWeakCipher on')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers',
- '-TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5,+TLS_RSA_EXPORT_WITH_RC4_40_MD5,+TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,+TLS_DHE_RSA_WITH_DES_CBC_SHA,+SSL_RSA_FIPS_WITH_DES_CBC_SHA,+TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,+SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,+TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,+TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5,-SSL_CK_RC2_128_CBC_WITH_MD5,-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5,-SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers',
+ '-TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5,+TLS_RSA_EXPORT_WITH_RC4_40_MD5,+TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,+TLS_DHE_RSA_WITH_DES_CBC_SHA,+SSL_RSA_FIPS_WITH_DES_CBC_SHA,+TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,+SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,+TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,+TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5,-SSL_CK_RC2_128_CBC_WITH_MD5,-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5,-SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5')])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.48194_9' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.48194_9' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(1)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- connectWithOpenssl(topology, 'RC4-SHA', False)
- connectWithOpenssl(topology, 'RC4-MD5', True)
- connectWithOpenssl(topology, 'AES256-SHA256', False)
+ connectWithOpenssl(topology_st, 'RC4-SHA', False)
+ connectWithOpenssl(topology_st, 'RC4-MD5', True)
+ connectWithOpenssl(topology_st, 'AES256-SHA256', False)
-def test_run_11(topology):
+def test_run_11(topology_st):
"""
Check nsSSL3Ciphers: +fortezza
SSL_GetImplementedCiphers does not return this as a secuire cipher suite
"""
- _header(topology, 'Test Case 12 - Check nsSSL3Ciphers: +fortezza, which is not supported')
+ _header(topology_st, 'Test Case 12 - Check nsSSL3Ciphers: +fortezza, which is not supported')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+fortezza')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+fortezza')])
log.info("\n######################### Restarting the server ######################\n")
- topology.standalone.stop(timeout=10)
- os.system('mv %s %s.48194_10' % (topology.standalone.errlog, topology.standalone.errlog))
- os.system('touch %s' % (topology.standalone.errlog))
+ topology_st.standalone.stop(timeout=10)
+ os.system('mv %s %s.48194_10' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+ os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(1)
- topology.standalone.start(timeout=120)
+ topology_st.standalone.start(timeout=120)
- connectWithOpenssl(topology, 'RC4-SHA', False)
- connectWithOpenssl(topology, 'AES256-SHA256', False)
+ connectWithOpenssl(topology_st, 'RC4-SHA', False)
+ connectWithOpenssl(topology_st, 'AES256-SHA256', False)
if __name__ == '__main__':
diff --git a/dirsrvtests/tests/tickets/ticket48212_test.py b/dirsrvtests/tests/tickets/ticket48212_test.py
index 66ce669..6ae0ae9 100644
--- a/dirsrvtests/tests/tickets/ticket48212_test.py
+++ b/dirsrvtests/tests/tickets/ticket48212_test.py
@@ -1,68 +1,22 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
-from ldap.controls import SimplePagedResultsControl
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
-installation1_prefix = None
-
MYSUFFIX = 'dc=example,dc=com'
MYSUFFIXBE = 'userRoot'
_MYLDIF = 'example1k_posix.ldif'
UIDNUMBERDN = "cn=uidnumber,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config"
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-def runDbVerify(topology):
- topology.standalone.log.info("\n\n +++++ dbverify +++++\n")
- sbin_dir = get_sbin_dir(prefix=topology.standalone.prefix)
- dbverifyCMD = sbin_dir + "/dbverify -Z " + topology.standalone.inst + " -V"
+def runDbVerify(topology_st):
+ topology_st.standalone.log.info("\n\n +++++ dbverify +++++\n")
+ sbin_dir = get_sbin_dir(prefix=topology_st.standalone.prefix)
+ dbverifyCMD = sbin_dir + "/dbverify -Z " + topology_st.standalone.inst + " -V"
dbverifyOUT = os.popen(dbverifyCMD, "r")
- topology.standalone.log.info("Running %s" % dbverifyCMD)
+ topology_st.standalone.log.info("Running %s" % dbverifyCMD)
running = True
error = False
while running:
@@ -72,34 +26,35 @@ def runDbVerify(topology):
elif "libdb:" in l:
running = False
error = True
- topology.standalone.log.info("%s" % l)
+ topology_st.standalone.log.info("%s" % l)
elif "verify failed" in l:
error = True
running = False
- topology.standalone.log.info("%s" % l)
+ topology_st.standalone.log.info("%s" % l)
if error:
- topology.standalone.log.fatal("dbverify failed")
+ topology_st.standalone.log.fatal("dbverify failed")
assert False
else:
- topology.standalone.log.info("dbverify passed")
+ topology_st.standalone.log.info("dbverify passed")
+
-def reindexUidNumber(topology):
- topology.standalone.log.info("\n\n +++++ reindex uidnumber +++++\n")
- sbin_dir = get_sbin_dir(prefix=topology.standalone.prefix)
- indexCMD = sbin_dir + "/db2index.pl -Z " + topology.standalone.inst + " -D \"" + DN_DM + "\" -w \"" + PASSWORD + "\" -n " + MYSUFFIXBE + " -t uidnumber"
+def reindexUidNumber(topology_st):
+ topology_st.standalone.log.info("\n\n +++++ reindex uidnumber +++++\n")
+ sbin_dir = get_sbin_dir(prefix=topology_st.standalone.prefix)
+ indexCMD = sbin_dir + "/db2index.pl -Z " + topology_st.standalone.inst + " -D \"" + DN_DM + "\" -w \"" + PASSWORD + "\" -n " + MYSUFFIXBE + " -t uidnumber"
indexOUT = os.popen(indexCMD, "r")
- topology.standalone.log.info("Running %s" % indexCMD)
+ topology_st.standalone.log.info("Running %s" % indexCMD)
time.sleep(30)
- tailCMD = "tail -n 3 " + topology.standalone.errlog
+ tailCMD = "tail -n 3 " + topology_st.standalone.errlog
tailOUT = os.popen(tailCMD, "r")
assert 'Finished indexing' in tailOUT.read()
-def test_ticket48212(topology):
+def test_ticket48212(topology_st):
"""
Import posixAccount entries.
Index uidNumber
@@ -109,71 +64,72 @@ def test_ticket48212(topology):
run dbverify to see if it reports the db corruption or not
if no corruption is reported, the bug fix was verified.
"""
- log.info('Testing Ticket 48212 - Dynamic nsMatchingRule changes had no effect on the attrinfo thus following reindexing, as well.')
+ log.info(
+ 'Testing Ticket 48212 - Dynamic nsMatchingRule changes had no effect on the attrinfo thus following reindexing, as well.')
# bind as directory manager
- topology.standalone.log.info("Bind as %s" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-
+ topology_st.standalone.log.info("Bind as %s" % DN_DM)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
- data_dir_path = topology.standalone.getDir(__file__, DATA_DIR)
+ data_dir_path = topology_st.standalone.getDir(__file__, DATA_DIR)
ldif_file = data_dir_path + "ticket48212/" + _MYLDIF
try:
- ldif_dir = topology.standalone.get_ldif_dir()
+ ldif_dir = topology_st.standalone.get_ldif_dir()
shutil.copy(ldif_file, ldif_dir)
- ldif_file = ldif_dir + '/' + _MYLDIF
+ ldif_file = ldif_dir + '/' + _MYLDIF
except:
log.fatal('Failed to copy ldif to instance ldif dir')
assert False
- topology.standalone.log.info("\n\n######################### Import Test data (%s) ######################\n" % ldif_file)
+ topology_st.standalone.log.info(
+ "\n\n######################### Import Test data (%s) ######################\n" % ldif_file)
args = {TASK_WAIT: True}
- importTask = Tasks(topology.standalone)
+ importTask = Tasks(topology_st.standalone)
importTask.importLDIF(MYSUFFIX, MYSUFFIXBE, ldif_file, args)
args = {TASK_WAIT: True}
- runDbVerify(topology)
+ runDbVerify(topology_st)
- topology.standalone.log.info("\n\n######################### Add index by uidnumber ######################\n")
+ topology_st.standalone.log.info("\n\n######################### Add index by uidnumber ######################\n")
try:
- topology.standalone.add_s(Entry((UIDNUMBERDN, {'objectclass': "top nsIndex".split(),
- 'cn': 'uidnumber',
- 'nsSystemIndex': 'false',
- 'nsIndexType': "pres eq".split()})))
+ topology_st.standalone.add_s(Entry((UIDNUMBERDN, {'objectclass': "top nsIndex".split(),
+ 'cn': 'uidnumber',
+ 'nsSystemIndex': 'false',
+ 'nsIndexType': "pres eq".split()})))
except ValueError:
- topology.standalone.log.fatal("add_s failed: %s", ValueError)
+ topology_st.standalone.log.fatal("add_s failed: %s", ValueError)
- topology.standalone.log.info("\n\n######################### reindexing... ######################\n")
- reindexUidNumber(topology)
+ topology_st.standalone.log.info("\n\n######################### reindexing... ######################\n")
+ reindexUidNumber(topology_st)
- runDbVerify(topology)
+ runDbVerify(topology_st)
- topology.standalone.log.info("\n\n######################### Add nsMatchingRule ######################\n")
+ topology_st.standalone.log.info("\n\n######################### Add nsMatchingRule ######################\n")
try:
- topology.standalone.modify_s(UIDNUMBERDN, [(ldap.MOD_ADD, 'nsMatchingRule', 'integerOrderingMatch')])
+ topology_st.standalone.modify_s(UIDNUMBERDN, [(ldap.MOD_ADD, 'nsMatchingRule', 'integerOrderingMatch')])
except ValueError:
- topology.standalone.log.fatal("modify_s failed: %s", ValueError)
+ topology_st.standalone.log.fatal("modify_s failed: %s", ValueError)
- topology.standalone.log.info("\n\n######################### reindexing... ######################\n")
- reindexUidNumber(topology)
+ topology_st.standalone.log.info("\n\n######################### reindexing... ######################\n")
+ reindexUidNumber(topology_st)
- runDbVerify(topology)
+ runDbVerify(topology_st)
- topology.standalone.log.info("\n\n######################### Delete nsMatchingRule ######################\n")
+ topology_st.standalone.log.info("\n\n######################### Delete nsMatchingRule ######################\n")
try:
- topology.standalone.modify_s(UIDNUMBERDN, [(ldap.MOD_DELETE, 'nsMatchingRule', 'integerOrderingMatch')])
+ topology_st.standalone.modify_s(UIDNUMBERDN, [(ldap.MOD_DELETE, 'nsMatchingRule', 'integerOrderingMatch')])
except ValueError:
- topology.standalone.log.fatal("modify_s failed: %s", ValueError)
+ topology_st.standalone.log.fatal("modify_s failed: %s", ValueError)
- reindexUidNumber(topology)
+ reindexUidNumber(topology_st)
- runDbVerify(topology)
+ runDbVerify(topology_st)
log.info('Testcase PASSED')
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
diff --git a/dirsrvtests/tests/tickets/ticket48214_test.py b/dirsrvtests/tests/tickets/ticket48214_test.py
index 492a39c..32f9231 100644
--- a/dirsrvtests/tests/tickets/ticket48214_test.py
+++ b/dirsrvtests/tests/tickets/ticket48214_test.py
@@ -1,15 +1,8 @@
-import os
-import sys
-import time
-import ldap
import logging
+
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
-from ldap.controls import SimplePagedResultsControl
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
@@ -17,127 +10,88 @@ MYSUFFIX = 'dc=example,dc=com'
MYSUFFIXBE = 'userRoot'
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def getMaxBerSizeFromDseLdif(topology):
- topology.standalone.log.info(" +++++ Get maxbersize from dse.ldif +++++\n")
- dse_ldif = topology.standalone.confdir + '/dse.ldif'
+def getMaxBerSizeFromDseLdif(topology_st):
+ topology_st.standalone.log.info(" +++++ Get maxbersize from dse.ldif +++++\n")
+ dse_ldif = topology_st.standalone.confdir + '/dse.ldif'
grepMaxBerCMD = "egrep nsslapd-maxbersize " + dse_ldif
- topology.standalone.log.info(" Run CMD: %s\n" % grepMaxBerCMD)
+ topology_st.standalone.log.info(" Run CMD: %s\n" % grepMaxBerCMD)
grepMaxBerOUT = os.popen(grepMaxBerCMD, "r")
running = True
maxbersize = -1
while running:
l = grepMaxBerOUT.readline()
if l == "":
- topology.standalone.log.info(" Empty: %s\n" % l)
+ topology_st.standalone.log.info(" Empty: %s\n" % l)
running = False
elif "nsslapd-maxbersize:" in l.lower():
running = False
fields = l.split()
if len(fields) >= 2:
maxbersize = fields[1]
- topology.standalone.log.info(" Right format - %s %s\n" % (fields[0], fields[1]))
+ topology_st.standalone.log.info(" Right format - %s %s\n" % (fields[0], fields[1]))
else:
- topology.standalone.log.info(" Wrong format - %s\n" % l)
+ topology_st.standalone.log.info(" Wrong format - %s\n" % l)
else:
- topology.standalone.log.info(" Else?: %s\n" % l)
+ topology_st.standalone.log.info(" Else?: %s\n" % l)
return maxbersize
-def checkMaxBerSize(topology):
- topology.standalone.log.info(" +++++ Check Max Ber Size +++++\n")
- maxbersizestr = getMaxBerSizeFromDseLdif(topology)
+
+def checkMaxBerSize(topology_st):
+ topology_st.standalone.log.info(" +++++ Check Max Ber Size +++++\n")
+ maxbersizestr = getMaxBerSizeFromDseLdif(topology_st)
maxbersize = int(maxbersizestr)
isdefault = True
defaultvalue = 2097152
if maxbersize < 0:
- topology.standalone.log.info(" No nsslapd-maxbersize found in dse.ldif\n")
+ topology_st.standalone.log.info(" No nsslapd-maxbersize found in dse.ldif\n")
elif maxbersize == 0:
- topology.standalone.log.info(" nsslapd-maxbersize: %d\n" % maxbersize)
+ topology_st.standalone.log.info(" nsslapd-maxbersize: %d\n" % maxbersize)
else:
isdefault = False
- topology.standalone.log.info(" nsslapd-maxbersize: %d\n" % maxbersize)
+ topology_st.standalone.log.info(" nsslapd-maxbersize: %d\n" % maxbersize)
try:
- entry = topology.standalone.search_s('cn=config', ldap.SCOPE_BASE,
- "(cn=*)",
- ['nsslapd-maxbersize'])
+ entry = topology_st.standalone.search_s('cn=config', ldap.SCOPE_BASE,
+ "(cn=*)",
+ ['nsslapd-maxbersize'])
if entry:
searchedsize = entry[0].getValue('nsslapd-maxbersize')
- topology.standalone.log.info(" ldapsearch returned nsslapd-maxbersize: %s\n" % searchedsize)
+ topology_st.standalone.log.info(" ldapsearch returned nsslapd-maxbersize: %s\n" % searchedsize)
else:
- topology.standalone.log.fatal('ERROR: cn=config is not found?')
+ topology_st.standalone.log.fatal('ERROR: cn=config is not found?')
assert False
except ldap.LDAPError as e:
- topology.standalone.log.error('ERROR: Failed to search for user entry: ' + e.message['desc'])
+ topology_st.standalone.log.error('ERROR: Failed to search for user entry: ' + e.message['desc'])
assert False
if isdefault:
- topology.standalone.log.info(" Checking %d vs %d\n" % (int(searchedsize), defaultvalue))
+ topology_st.standalone.log.info(" Checking %d vs %d\n" % (int(searchedsize), defaultvalue))
assert int(searchedsize) == defaultvalue
-def test_ticket48214_run(topology):
+def test_ticket48214_run(topology_st):
"""
Check ldapsearch returns the correct maxbersize when it is not explicitly set.
"""
log.info('Testing Ticket 48214 - ldapsearch on nsslapd-maxbersize returns 0 instead of current value')
# bind as directory manager
- topology.standalone.log.info("Bind as %s" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.log.info("Bind as %s" % DN_DM)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.log.info("\n\n######################### Out of Box ######################\n")
- checkMaxBerSize(topology)
+ topology_st.standalone.log.info("\n\n######################### Out of Box ######################\n")
+ checkMaxBerSize(topology_st)
- topology.standalone.log.info("\n\n######################### Add nsslapd-maxbersize: 0 ######################\n")
- topology.standalone.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-maxbersize', '0')])
- checkMaxBerSize(topology)
+ topology_st.standalone.log.info("\n\n######################### Add nsslapd-maxbersize: 0 ######################\n")
+ topology_st.standalone.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-maxbersize', '0')])
+ checkMaxBerSize(topology_st)
- topology.standalone.log.info("\n\n######################### Add nsslapd-maxbersize: 10000 ######################\n")
- topology.standalone.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-maxbersize', '10000')])
- checkMaxBerSize(topology)
+ topology_st.standalone.log.info(
+ "\n\n######################### Add nsslapd-maxbersize: 10000 ######################\n")
+ topology_st.standalone.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-maxbersize', '10000')])
+ checkMaxBerSize(topology_st)
- topology.standalone.log.info("ticket48214 was successfully verified.")
+ topology_st.standalone.log.info("ticket48214 was successfully verified.")
if __name__ == '__main__':
diff --git a/dirsrvtests/tests/tickets/ticket48226_test.py b/dirsrvtests/tests/tickets/ticket48226_test.py
index 9812d74..006623f 100644
--- a/dirsrvtests/tests/tickets/ticket48226_test.py
+++ b/dirsrvtests/tests/tickets/ticket48226_test.py
@@ -6,163 +6,50 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_m2
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
-
-class TopologyReplication(object):
- def __init__(self, master1, master2):
- master1.open()
- self.master1 = master1
- master2.open()
- self.master2 = master2
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- os.environ['USE_VALGRIND'] = '1'
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating master 1...
- master1 = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master1.allocate(args_master)
- instance_master1 = master1.exists()
- if instance_master1:
- master1.delete()
- master1.create()
- master1.open()
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-
- # Creating master 2...
- master2 = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master2.allocate(args_master)
- instance_master2 = master2.exists()
- if instance_master2:
- master2.delete()
- master2.create()
- master2.open()
- master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
- #
- # Create all the agreements
- #
- # Creating agreement from master 1 to master 2
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
- if not m1_m2_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m1_m2_agmt)
-
- # Creating agreement from master 2 to master 1
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
- if not m2_m1_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m2_m1_agmt)
-
- # Allow the replicas to get situated with the new agreements...
- time.sleep(5)
-
- #
- # Initialize all the agreements
- #
- master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- master1.waitForReplInit(m1_m2_agmt)
-
- def fin():
- master1.delete()
- master2.delete()
- sbin_dir = master2.get_sbin_dir()
- if not master2.has_asan():
- valgrind_disable(sbin_dir)
- request.addfinalizer(fin)
-
- # Check replication is working...
- if master1.testReplication(DEFAULT_SUFFIX, master2):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- return TopologyReplication(master1, master2)
-
-def test_ticket48226_set_purgedelay(topology):
+def test_ticket48226_set_purgedelay(topology_m2):
args = {REPLICA_PURGE_DELAY: '5',
REPLICA_PURGE_INTERVAL: '5'}
try:
- topology.master1.replica.setProperties(DEFAULT_SUFFIX, None, None, args)
+ topology_m2.ms["master1"].replica.setProperties(DEFAULT_SUFFIX, None, None, args)
except:
log.fatal('Failed to configure replica')
assert False
try:
- topology.master2.replica.setProperties(DEFAULT_SUFFIX, None, None, args)
+ topology_m2.ms["master2"].replica.setProperties(DEFAULT_SUFFIX, None, None, args)
except:
log.fatal('Failed to configure replica')
assert False
- topology.master1.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-auditlog-logging-enabled', 'on')])
- topology.master2.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-auditlog-logging-enabled', 'on')])
- topology.master1.restart(30)
- topology.master2.restart(30)
+ topology_m2.ms["master1"].modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-auditlog-logging-enabled', 'on')])
+ topology_m2.ms["master2"].modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-auditlog-logging-enabled', 'on')])
+ topology_m2.ms["master1"].restart(30)
+ topology_m2.ms["master2"].restart(30)
-def test_ticket48226_1(topology):
+def test_ticket48226_1(topology_m2):
name = 'test_entry'
dn = "cn=%s,%s" % (name, SUFFIX)
- topology.master1.add_s(Entry((dn, {'objectclass': "top person".split(),
- 'sn': name,
- 'cn': name})))
+ topology_m2.ms["master1"].add_s(Entry((dn, {'objectclass': "top person".split(),
+ 'sn': name,
+ 'cn': name})))
# First do an update that is replicated
mods = [(ldap.MOD_ADD, 'description', '5')]
- topology.master1.modify_s(dn, mods)
+ topology_m2.ms["master1"].modify_s(dn, mods)
nbtry = 0
while (nbtry <= 10):
try:
- ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
+ ent = topology_m2.ms["master2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
if ent.hasAttr('description') and ent.getValue('description') == '5':
break
except ldap.NO_SUCH_OBJECT:
@@ -172,51 +59,51 @@ def test_ticket48226_1(topology):
assert nbtry <= 10
# Stop M2 so that it will not receive the next update
- topology.master2.stop(10)
+ topology_m2.ms["master2"].stop(10)
# ADD a new value that is not replicated
mods = [(ldap.MOD_DELETE, 'description', '5')]
- topology.master1.modify_s(dn, mods)
+ topology_m2.ms["master1"].modify_s(dn, mods)
# Stop M1 so that it will keep del '5' that is unknown from master2
- topology.master1.stop(10)
+ topology_m2.ms["master1"].stop(10)
# Get the sbin directory so we know where to replace 'ns-slapd'
- sbin_dir = topology.master2.get_sbin_dir()
+ sbin_dir = topology_m2.ms["master2"].get_sbin_dir()
# Enable valgrind
- if not topology.master2.has_asan():
+ if not topology_m2.ms["master2"].has_asan():
valgrind_enable(sbin_dir)
# start M2 to do the next updates
- topology.master2.start()
+ topology_m2.ms["master2"].start()
# ADD 'description' by '5'
mods = [(ldap.MOD_DELETE, 'description', '5')]
- topology.master2.modify_s(dn, mods)
+ topology_m2.ms["master2"].modify_s(dn, mods)
# DEL 'description' by '5'
mods = [(ldap.MOD_ADD, 'description', '5')]
- topology.master2.modify_s(dn, mods)
+ topology_m2.ms["master2"].modify_s(dn, mods)
# sleep of purge delay so that the next update will purge the CSN_7
time.sleep(6)
# ADD 'description' by '6' that purge the state info
mods = [(ldap.MOD_ADD, 'description', '6')]
- topology.master2.modify_s(dn, mods)
+ topology_m2.ms["master2"].modify_s(dn, mods)
# Restart master1
- #topology.master1.start(30)
+ # topology_m2.ms["master1"].start(30)
- if not topology.master2.has_asan():
- results_file = valgrind_get_results_file(topology.master2)
+ if not topology_m2.ms["master2"].has_asan():
+ results_file = valgrind_get_results_file(topology_m2.ms["master2"])
# Stop master2
- topology.master2.stop(30)
+ topology_m2.ms["master2"].stop(30)
# Check for leak
- if not topology.master2.has_asan():
+ if not topology_m2.ms["master2"].has_asan():
if valgrind_check_file(results_file, VALGRIND_LEAK_STR, 'csnset_dup'):
log.info('Valgrind reported leak in csnset_dup!')
assert False
@@ -245,4 +132,3 @@ if __name__ == '__main__':
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
diff --git a/dirsrvtests/tests/tickets/ticket48228_test.py b/dirsrvtests/tests/tickets/ticket48228_test.py
index 8559bcd..c8df053 100644
--- a/dirsrvtests/tests/tickets/ticket48228_test.py
+++ b/dirsrvtests/tests/tickets/ticket48228_test.py
@@ -6,17 +6,11 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
import logging
+
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
@@ -32,60 +26,19 @@ USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX
USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def set_global_pwpolicy(topology, inhistory):
+def set_global_pwpolicy(topology_st, inhistory):
log.info(" +++++ Enable global password policy +++++\n")
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
# Enable password policy
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])
except ldap.LDAPError as e:
log.error('Failed to set pwpolicy-local: error ' + e.message['desc'])
assert False
log.info(" Set global password history on\n")
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordHistory', 'on')])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordHistory', 'on')])
except ldap.LDAPError as e:
log.error('Failed to set passwordHistory: error ' + e.message['desc'])
assert False
@@ -93,66 +46,68 @@ def set_global_pwpolicy(topology, inhistory):
log.info(" Set global passwords in history\n")
try:
count = "%d" % inhistory
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordInHistory', count)])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordInHistory', count)])
except ldap.LDAPError as e:
log.error('Failed to set passwordInHistory: error ' + e.message['desc'])
assert False
-def set_subtree_pwpolicy(topology):
+def set_subtree_pwpolicy(topology_st):
log.info(" +++++ Enable subtree level password policy +++++\n")
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
log.info(" Add the container")
try:
- topology.standalone.add_s(Entry((SUBTREE_CONTAINER, {'objectclass': 'top nsContainer'.split(),
- 'cn': 'nsPwPolicyContainer'})))
+ topology_st.standalone.add_s(Entry((SUBTREE_CONTAINER, {'objectclass': 'top nsContainer'.split(),
+ 'cn': 'nsPwPolicyContainer'})))
except ldap.LDAPError as e:
log.error('Failed to add subtree container: error ' + e.message['desc'])
assert False
log.info(" Add the password policy subentry {passwordHistory: on, passwordInHistory: 6}")
try:
- topology.standalone.add_s(Entry((SUBTREE_PWP, {'objectclass': 'top ldapsubentry passwordpolicy'.split(),
- 'cn': SUBTREE_PWPDN,
- 'passwordMustChange': 'off',
- 'passwordExp': 'off',
- 'passwordHistory': 'on',
- 'passwordInHistory': '6',
- 'passwordMinAge': '0',
- 'passwordChange': 'on',
- 'passwordStorageScheme': 'clear'})))
+ topology_st.standalone.add_s(Entry((SUBTREE_PWP, {'objectclass': 'top ldapsubentry passwordpolicy'.split(),
+ 'cn': SUBTREE_PWPDN,
+ 'passwordMustChange': 'off',
+ 'passwordExp': 'off',
+ 'passwordHistory': 'on',
+ 'passwordInHistory': '6',
+ 'passwordMinAge': '0',
+ 'passwordChange': 'on',
+ 'passwordStorageScheme': 'clear'})))
except ldap.LDAPError as e:
log.error('Failed to add passwordpolicy: error ' + e.message['desc'])
assert False
log.info(" Add the COS template")
try:
- topology.standalone.add_s(Entry((SUBTREE_COS_TMPL, {'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
- 'cn': SUBTREE_PWPDN,
- 'cosPriority': '1',
- 'cn': SUBTREE_COS_TMPLDN,
- 'pwdpolicysubentry': SUBTREE_PWP})))
+ topology_st.standalone.add_s(
+ Entry((SUBTREE_COS_TMPL, {'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
+ 'cn': SUBTREE_PWPDN,
+ 'cosPriority': '1',
+ 'cn': SUBTREE_COS_TMPLDN,
+ 'pwdpolicysubentry': SUBTREE_PWP})))
except ldap.LDAPError as e:
log.error('Failed to add COS template: error ' + e.message['desc'])
assert False
log.info(" Add the COS definition")
try:
- topology.standalone.add_s(Entry((SUBTREE_COS_DEF, {'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
- 'cn': SUBTREE_PWPDN,
- 'costemplatedn': SUBTREE_COS_TMPL,
- 'cosAttribute': 'pwdpolicysubentry default operational-default'})))
+ topology_st.standalone.add_s(
+ Entry((SUBTREE_COS_DEF, {'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
+ 'cn': SUBTREE_PWPDN,
+ 'costemplatedn': SUBTREE_COS_TMPL,
+ 'cosAttribute': 'pwdpolicysubentry default operational-default'})))
except ldap.LDAPError as e:
log.error('Failed to add COS def: error ' + e.message['desc'])
assert False
-def check_passwd_inhistory(topology, user, cpw, passwd):
+def check_passwd_inhistory(topology_st, user, cpw, passwd):
inhistory = 0
log.info(" Bind as {%s,%s}" % (user, cpw))
- topology.standalone.simple_bind_s(user, cpw)
+ topology_st.standalone.simple_bind_s(user, cpw)
try:
- topology.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', passwd)])
+ topology_st.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', passwd)])
except ldap.LDAPError as e:
log.info(' The password ' + passwd + ' of user' + USER1_DN + ' in history: error ' + e.message['desc'])
inhistory = 1
@@ -160,114 +115,118 @@ def check_passwd_inhistory(topology, user, cpw, passwd):
return inhistory
-def update_passwd(topology, user, passwd, times):
+def update_passwd(topology_st, user, passwd, times):
cpw = passwd
for i in range(times):
log.info(" Bind as {%s,%s}" % (user, cpw))
- topology.standalone.simple_bind_s(user, cpw)
+ topology_st.standalone.simple_bind_s(user, cpw)
cpw = 'password%d' % i
try:
- topology.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', cpw)])
+ topology_st.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', cpw)])
except ldap.LDAPError as e:
- log.fatal('test_ticket48228: Failed to update the password ' + cpw + ' of user ' + user + ': error ' + e.message['desc'])
+ log.fatal(
+ 'test_ticket48228: Failed to update the password ' + cpw + ' of user ' + user + ': error ' + e.message[
+ 'desc'])
assert False
time.sleep(1)
# checking the first password, which is supposed to be in history
- inhistory = check_passwd_inhistory(topology, user, cpw, passwd)
+ inhistory = check_passwd_inhistory(topology_st, user, cpw, passwd)
assert inhistory == 1
-def test_ticket48228_test_global_policy(topology):
+def test_ticket48228_test_global_policy(topology_st):
"""
Check global password policy
"""
log.info(' Set inhistory = 6')
- set_global_pwpolicy(topology, 6)
+ set_global_pwpolicy(topology_st, 6)
log.info(' Bind as directory manager')
log.info("Bind as %s" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
log.info(' Add an entry' + USER1_DN)
try:
- topology.standalone.add_s(Entry((USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'sn': '1',
- 'cn': 'user 1',
- 'uid': 'user1',
- 'givenname': 'user',
- 'mail': 'user1(a)example.com',
- 'userpassword': 'password'})))
+ topology_st.standalone.add_s(
+ Entry((USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'sn': '1',
+ 'cn': 'user 1',
+ 'uid': 'user1',
+ 'givenname': 'user',
+ 'mail': 'user1(a)example.com',
+ 'userpassword': 'password'})))
except ldap.LDAPError as e:
log.fatal('test_ticket48228: Failed to add user' + USER1_DN + ': error ' + e.message['desc'])
assert False
log.info(' Update the password of ' + USER1_DN + ' 6 times')
- update_passwd(topology, USER1_DN, 'password', 6)
+ update_passwd(topology_st, USER1_DN, 'password', 6)
log.info(' Set inhistory = 4')
- set_global_pwpolicy(topology, 4)
+ set_global_pwpolicy(topology_st, 4)
log.info(' checking the first password, which is supposed NOT to be in history any more')
cpw = 'password%d' % 5
tpw = 'password'
- inhistory = check_passwd_inhistory(topology, USER1_DN, cpw, tpw)
+ inhistory = check_passwd_inhistory(topology_st, USER1_DN, cpw, tpw)
assert inhistory == 0
log.info(' checking the second password, which is supposed NOT to be in history any more')
cpw = tpw
tpw = 'password%d' % 0
- inhistory = check_passwd_inhistory(topology, USER1_DN, cpw, tpw)
+ inhistory = check_passwd_inhistory(topology_st, USER1_DN, cpw, tpw)
assert inhistory == 0
log.info(' checking the third password, which is supposed NOT to be in history any more')
cpw = tpw
tpw = 'password%d' % 1
- inhistory = check_passwd_inhistory(topology, USER1_DN, cpw, tpw)
+ inhistory = check_passwd_inhistory(topology_st, USER1_DN, cpw, tpw)
assert inhistory == 0
log.info(' checking the sixth password, which is supposed to be in history')
cpw = tpw
tpw = 'password%d' % 5
- inhistory = check_passwd_inhistory(topology, USER1_DN, cpw, tpw)
+ inhistory = check_passwd_inhistory(topology_st, USER1_DN, cpw, tpw)
assert inhistory == 1
log.info("Global policy was successfully verified.")
-def test_ticket48228_test_subtree_policy(topology):
+def test_ticket48228_test_subtree_policy(topology_st):
"""
Check subtree level password policy
"""
log.info(' Set inhistory = 6')
- set_subtree_pwpolicy(topology)
+ set_subtree_pwpolicy(topology_st)
log.info(' Bind as directory manager')
log.info("Bind as %s" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
log.info(' Add an entry' + USER2_DN)
try:
- topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'sn': '2',
- 'cn': 'user 2',
- 'uid': 'user2',
- 'givenname': 'user',
- 'mail': 'user2(a)example.com',
- 'userpassword': 'password'})))
+ topology_st.standalone.add_s(
+ Entry((USER2_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'sn': '2',
+ 'cn': 'user 2',
+ 'uid': 'user2',
+ 'givenname': 'user',
+ 'mail': 'user2(a)example.com',
+ 'userpassword': 'password'})))
except ldap.LDAPError as e:
log.fatal('test_ticket48228: Failed to add user' + USER2_DN + ': error ' + e.message['desc'])
assert False
log.info(' Update the password of ' + USER2_DN + ' 6 times')
- update_passwd(topology, USER2_DN, 'password', 6)
+ update_passwd(topology_st, USER2_DN, 'password', 6)
log.info(' Set inhistory = 4')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
try:
- topology.standalone.modify_s(SUBTREE_PWP, [(ldap.MOD_REPLACE, 'passwordInHistory', '4')])
+ topology_st.standalone.modify_s(SUBTREE_PWP, [(ldap.MOD_REPLACE, 'passwordInHistory', '4')])
except ldap.LDAPError as e:
log.error('Failed to set pwpolicy-local: error ' + e.message['desc'])
assert False
@@ -275,25 +234,25 @@ def test_ticket48228_test_subtree_policy(topology):
log.info(' checking the first password, which is supposed NOT to be in history any more')
cpw = 'password%d' % 5
tpw = 'password'
- inhistory = check_passwd_inhistory(topology, USER2_DN, cpw, tpw)
+ inhistory = check_passwd_inhistory(topology_st, USER2_DN, cpw, tpw)
assert inhistory == 0
log.info(' checking the second password, which is supposed NOT to be in history any more')
cpw = tpw
tpw = 'password%d' % 1
- inhistory = check_passwd_inhistory(topology, USER2_DN, cpw, tpw)
+ inhistory = check_passwd_inhistory(topology_st, USER2_DN, cpw, tpw)
assert inhistory == 0
log.info(' checking the third password, which is supposed NOT to be in history any more')
cpw = tpw
tpw = 'password%d' % 2
- inhistory = check_passwd_inhistory(topology, USER2_DN, cpw, tpw)
+ inhistory = check_passwd_inhistory(topology_st, USER2_DN, cpw, tpw)
assert inhistory == 0
log.info(' checking the six password, which is supposed to be in history')
cpw = tpw
tpw = 'password%d' % 5
- inhistory = check_passwd_inhistory(topology, USER2_DN, cpw, tpw)
+ inhistory = check_passwd_inhistory(topology_st, USER2_DN, cpw, tpw)
assert inhistory == 1
log.info("Subtree level policy was successfully verified.")
diff --git a/dirsrvtests/tests/tickets/ticket48233_test.py b/dirsrvtests/tests/tickets/ticket48233_test.py
index d9b0aae..4550e92 100644
--- a/dirsrvtests/tests/tickets/ticket48233_test.py
+++ b/dirsrvtests/tests/tickets/ticket48233_test.py
@@ -1,60 +1,12 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-
-def test_ticket48233(topology):
+def test_ticket48233(topology_st):
"""Test that ACI's that use IP restrictions do not crash the server at
shutdown
"""
@@ -65,7 +17,7 @@ def test_ticket48233(topology):
'(userdn = "ldap:///anyone") and (ip="127.0.0.1");)')
try:
- topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', aci_text)])
+ topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', aci_text)])
except ldap.LDAPError as e:
log.error('Failed to add aci: (%s) error %s' % (aci_text, e.message['desc']))
assert False
@@ -73,13 +25,13 @@ def test_ticket48233(topology):
# Anonymous search to engage the aci
try:
- topology.standalone.simple_bind_s("", "")
+ topology_st.standalone.simple_bind_s("", "")
except ldap.LDAPError as e:
log.error('Failed to anonymously bind -error %s' % (e.message['desc']))
assert False
try:
- entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*')
+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*')
if not entries:
log.fatal('Failed return an entries from search')
assert False
@@ -88,10 +40,10 @@ def test_ticket48233(topology):
assert False
# Restart the server
- topology.standalone.restart(timeout=10)
+ topology_st.standalone.restart(timeout=10)
# Check for crash
- if topology.standalone.detectDisorderlyShutdown():
+ if topology_st.standalone.detectDisorderlyShutdown():
log.fatal('Server crashed!')
assert False
@@ -102,4 +54,4 @@ if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
\ No newline at end of file
+ pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/tickets/ticket48234_test.py b/dirsrvtests/tests/tickets/ticket48234_test.py
index 27eff1a..e63594a 100644
--- a/dirsrvtests/tests/tickets/ticket48234_test.py
+++ b/dirsrvtests/tests/tickets/ticket48234_test.py
@@ -1,63 +1,18 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
def add_ou_entry(server, name, myparent):
dn = 'ou=%s,%s' % (name, myparent)
server.add_s(Entry((dn, {'objectclass': ['top', 'organizationalunit'],
'ou': name})))
+
def add_user_entry(server, name, pw, myparent):
dn = 'cn=%s,%s' % (name, myparent)
server.add_s(Entry((dn, {'objectclass': ['top', 'person'],
@@ -66,7 +21,8 @@ def add_user_entry(server, name, pw, myparent):
'telephonenumber': '+1 222 333-4444',
'userpassword': pw})))
-def test_ticket48234(topology):
+
+def test_ticket48234(topology_st):
"""
Test aci which contains an extensible filter.
shutdown
@@ -74,9 +30,9 @@ def test_ticket48234(topology):
log.info('Bind as root DN')
try:
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
- topology.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc'])
+ topology_st.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc'])
assert False
ouname = 'outest'
@@ -86,11 +42,11 @@ def test_ticket48234(topology):
log.info('Add aci which contains extensible filter.')
aci_text = ('(targetattr = "%s")' % (deniedattr) +
'(target = "ldap:///%s")' % (DEFAULT_SUFFIX) +
- '(version 3.0;acl "admin-tel-matching-rule-outest";deny (all)' +
+ '(version 3.0;acl "admin-tel-matching-rule-outest";deny (all)' +
'(userdn = "ldap:///%s??sub?(&(cn=%s)(ou:dn:=%s))");)' % (DEFAULT_SUFFIX, username, ouname))
try:
- topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', aci_text)])
+ topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', aci_text)])
except ldap.LDAPError as e:
log.error('Failed to add aci: (%s) error %s' % (aci_text, e.message['desc']))
assert False
@@ -99,34 +55,34 @@ def test_ticket48234(topology):
for idx in range(0, 2):
ou0 = 'OU%d' % idx
log.info('adding %s under %s...' % (ou0, DEFAULT_SUFFIX))
- add_ou_entry(topology.standalone, ou0, DEFAULT_SUFFIX)
+ add_ou_entry(topology_st.standalone, ou0, DEFAULT_SUFFIX)
parent = 'ou=%s,%s' % (ou0, DEFAULT_SUFFIX)
log.info('adding %s under %s...' % (ouname, parent))
- add_ou_entry(topology.standalone, ouname, parent)
+ add_ou_entry(topology_st.standalone, ouname, parent)
for idx in range(0, 2):
parent = 'ou=%s,ou=OU%d,%s' % (ouname, idx, DEFAULT_SUFFIX)
log.info('adding %s under %s...' % (username, parent))
- add_user_entry(topology.standalone, username, passwd, parent)
+ add_user_entry(topology_st.standalone, username, passwd, parent)
binddn = 'cn=%s,%s' % (username, parent)
log.info('Bind as user %s' % binddn)
try:
- topology.standalone.simple_bind_s(binddn, passwd)
+ topology_st.standalone.simple_bind_s(binddn, passwd)
except ldap.LDAPError as e:
- topology.standalone.log.error(bindn + ' failed to authenticate: ' + e.message['desc'])
+ topology_st.standalone.log.error(bindn + ' failed to authenticate: ' + e.message['desc'])
assert False
filter = '(cn=%s)' % username
try:
- entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, filter, [deniedattr, 'dn'])
+ entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, filter, [deniedattr, 'dn'])
assert 2 == len(entries)
for idx in range(0, 1):
if entries[idx].hasAttr(deniedattr):
log.fatal('aci with extensible filter failed -- %s')
assert False
except ldap.LDAPError as e:
- topology.standalone.log.error('Search (%s, %s) failed: ' % (DEFAULT_SUFFIX, filter) + e.message['desc'])
+ topology_st.standalone.log.error('Search (%s, %s) failed: ' % (DEFAULT_SUFFIX, filter) + e.message['desc'])
assert False
log.info('Test complete')
diff --git a/dirsrvtests/tests/tickets/ticket48252_test.py b/dirsrvtests/tests/tickets/ticket48252_test.py
index 37f2635..1a29d4d 100644
--- a/dirsrvtests/tests/tickets/ticket48252_test.py
+++ b/dirsrvtests/tests/tickets/ticket48252_test.py
@@ -6,17 +6,11 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
import logging
+
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
@@ -25,48 +19,7 @@ USER_NUM = 10
TEST_USER = "test_user"
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def test_ticket48252_setup(topology):
+def test_ticket48252_setup(topology_st):
"""
Enable USN plug-in for enabling tombstones
Add test entries
@@ -74,7 +27,7 @@ def test_ticket48252_setup(topology):
log.info("Enable the USN plugin...")
try:
- topology.standalone.plugins.enable(name=PLUGIN_USN)
+ topology_st.standalone.plugins.enable(name=PLUGIN_USN)
except e:
log.error("Failed to enable USN Plugin: error " + e.message['desc'])
assert False
@@ -82,28 +35,28 @@ def test_ticket48252_setup(topology):
log.info("Adding test entries...")
for id in range(USER_NUM):
name = "%s%d" % (TEST_USER, id)
- topology.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
- 'objectclass': "top person".split(),
- 'sn': name,
- 'cn': name})))
+ topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
+ 'objectclass': "top person".split(),
+ 'sn': name,
+ 'cn': name})))
-def in_index_file(topology, id, index):
+def in_index_file(topology_st, id, index):
key = "%s%s" % (TEST_USER, id)
log.info(" dbscan - checking %s is in index file %s..." % (key, index))
- dbscanOut = topology.standalone.dbscan(DEFAULT_BENAME, index)
+ dbscanOut = topology_st.standalone.dbscan(DEFAULT_BENAME, index)
if key in dbscanOut:
found = True
- topology.standalone.log.info("Found key %s in dbscan output" % key)
+ topology_st.standalone.log.info("Found key %s in dbscan output" % key)
else:
found = False
- topology.standalone.log.info("Did not found key %s in dbscan output" % key)
+ topology_st.standalone.log.info("Did not found key %s in dbscan output" % key)
return found
-def test_ticket48252_run_0(topology):
+def test_ticket48252_run_0(topology_st):
"""
Delete an entry cn=test_entry0
Check it is not in the 'cn' index file
@@ -112,19 +65,19 @@ def test_ticket48252_run_0(topology):
del_rdn = "cn=%s0" % TEST_USER
del_entry = "%s,%s" % (del_rdn, SUFFIX)
log.info(" Deleting a test entry %s..." % del_entry)
- topology.standalone.delete_s(del_entry)
+ topology_st.standalone.delete_s(del_entry)
- assert in_index_file(topology, 0, 'cn') == False
+ assert in_index_file(topology_st, 0, 'cn') == False
log.info(" db2index - reindexing %s ..." % 'cn')
- assert topology.standalone.db2index(DEFAULT_BENAME, 'cn')
+ assert topology_st.standalone.db2index(DEFAULT_BENAME, 'cn')
- assert in_index_file(topology, 0, 'cn') == False
+ assert in_index_file(topology_st, 0, 'cn') == False
log.info(" entry %s is not in the cn index file after reindexed." % del_entry)
log.info('Case 1 - PASSED')
-def test_ticket48252_run_1(topology):
+def test_ticket48252_run_1(topology_st):
"""
Delete an entry cn=test_entry1
Check it is in the 'objectclass' index file as a tombstone entry
@@ -133,16 +86,16 @@ def test_ticket48252_run_1(topology):
del_rdn = "cn=%s1" % TEST_USER
del_entry = "%s,%s" % (del_rdn, SUFFIX)
log.info(" Deleting a test entry %s..." % del_entry)
- topology.standalone.delete_s(del_entry)
+ topology_st.standalone.delete_s(del_entry)
- entry = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(&(objectclass=nstombstone)(%s))' % del_rdn)
+ entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(&(objectclass=nstombstone)(%s))' % del_rdn)
assert len(entry) == 1
log.info(" entry %s is in the objectclass index file." % del_entry)
log.info(" db2index - reindexing %s ..." % 'objectclass')
- assert topology.standalone.db2index(DEFAULT_BENAME, 'objectclass')
+ assert topology_st.standalone.db2index(DEFAULT_BENAME, 'objectclass')
- entry = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(&(objectclass=nstombstone)(%s))' % del_rdn)
+ entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(&(objectclass=nstombstone)(%s))' % del_rdn)
assert len(entry) == 1
log.info(" entry %s is in the objectclass index file after reindexed." % del_entry)
log.info('Case 2 - PASSED')
diff --git a/dirsrvtests/tests/tickets/ticket48265_test.py b/dirsrvtests/tests/tickets/ticket48265_test.py
index 8195ea9..02193b5 100644
--- a/dirsrvtests/tests/tickets/ticket48265_test.py
+++ b/dirsrvtests/tests/tickets/ticket48265_test.py
@@ -6,59 +6,19 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
USER_NUM = 20
TEST_USER = 'test_user'
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_ticket48265_test(topology):
+def test_ticket48265_test(topology_st):
"""
Complex filter issues
Ticket 47521 type complex filter:
@@ -72,32 +32,34 @@ def test_ticket48265_test(topology):
name = "%s%d" % (TEST_USER, id)
mail = "%s(a)example.com" % name
secretary = "cn=%s,ou=secretary,%s" % (name, SUFFIX)
- topology.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
- 'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'sn': name,
- 'cn': name,
- 'uid': name,
- 'givenname': 'test',
- 'mail': mail,
- 'description': 'description',
- 'secretary': secretary,
- 'l': 'MV',
- 'title': 'Engineer'})))
+ topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
+ 'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'sn': name,
+ 'cn': name,
+ 'uid': name,
+ 'givenname': 'test',
+ 'mail': mail,
+ 'description': 'description',
+ 'secretary': secretary,
+ 'l': 'MV',
+ 'title': 'Engineer'})))
log.info("Search with Ticket 47521 type complex filter")
for id in range(USER_NUM):
name = "%s%d" % (TEST_USER, id)
mail = "%s(a)example.com" % name
- filter47521 = '(&(|(uid=%s*)(cn=%s*))(&(givenname=test))(mail=%s)(&(description=*)))' % (TEST_USER, TEST_USER, mail)
- entry = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, filter47521)
+ filter47521 = '(&(|(uid=%s*)(cn=%s*))(&(givenname=test))(mail=%s)(&(description=*)))' % (
+ TEST_USER, TEST_USER, mail)
+ entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, filter47521)
assert len(entry) == 1
log.info("Search with Ticket 48265 type complex filter")
for id in range(USER_NUM):
name = "%s%d" % (TEST_USER, id)
mail = "%s(a)example.com" % name
- filter48265 = '(&(&(|(l=AA)(l=BB)(l=MV))(|(title=admin)(title=engineer)))(|(uid=%s)(mail=%s))(description=description))' % (name, mail)
- entry = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, filter48265)
+ filter48265 = '(&(&(|(l=AA)(l=BB)(l=MV))(|(title=admin)(title=engineer)))(|(uid=%s)(mail=%s))(description=description))' % (
+ name, mail)
+ entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, filter48265)
assert len(entry) == 1
log.info('Test 48265 complete\n')
diff --git a/dirsrvtests/tests/tickets/ticket48266_test.py b/dirsrvtests/tests/tickets/ticket48266_test.py
index 18d1ba3..1964c85 100644
--- a/dirsrvtests/tests/tickets/ticket48266_test.py
+++ b/dirsrvtests/tests/tickets/ticket48266_test.py
@@ -1,134 +1,21 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_m2
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
NEW_ACCOUNT = "new_account"
MAX_ACCOUNTS = 20
-class TopologyReplication(object):
- def __init__(self, master1, master2):
- master1.open()
- self.master1 = master1
- master2.open()
- self.master2 = master2
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating master 1...
- master1 = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master1.allocate(args_master)
- instance_master1 = master1.exists()
- if instance_master1:
- master1.delete()
- master1.create()
- master1.open()
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-
- # Creating master 2...
- master2 = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master2.allocate(args_master)
- instance_master2 = master2.exists()
- if instance_master2:
- master2.delete()
- master2.create()
- master2.open()
- master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
- #
- # Create all the agreements
- #
- # Creating agreement from master 1 to master 2
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
- if not m1_m2_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m1_m2_agmt)
-
- # Creating agreement from master 2 to master 1
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
- if not m2_m1_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m2_m1_agmt)
-
- # Allow the replicas to get situated with the new agreements...
- time.sleep(5)
-
- #
- # Initialize all the agreements
- #
- master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- master1.waitForReplInit(m1_m2_agmt)
-
- # Check replication is working...
- if master1.testReplication(DEFAULT_SUFFIX, master2):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- # Delete each instance in the end
- def fin():
- master1.delete()
- master2.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- master1.clearTmpDir(__file__)
-
- return TopologyReplication(master1, master2)
-
-
def pattern_accesslog(file, log_pattern):
try:
pattern_accesslog.last_pos += 1
except AttributeError:
pattern_accesslog.last_pos = 0
-
found = None
file.seek(pattern_accesslog.last_pos)
@@ -148,51 +35,51 @@ def pattern_accesslog(file, log_pattern):
@pytest.fixture(scope="module")
-def entries(topology):
+def entries(topology_m2):
# add dummy entries in the staging DIT
for cpt in range(MAX_ACCOUNTS):
name = "%s%d" % (NEW_ACCOUNT, cpt)
- topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
- 'objectclass': "top person".split(),
- 'sn': name,
- 'cn': name})))
+ topology_m2.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
+ 'objectclass': "top person".split(),
+ 'sn': name,
+ 'cn': name})))
mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '8192'),
- (ldap.MOD_REPLACE, 'nsslapd-accesslog-level', str(256+4))]
- topology.master1.modify_s(DN_CONFIG, mod)
- topology.master2.modify_s(DN_CONFIG, mod)
+ (ldap.MOD_REPLACE, 'nsslapd-accesslog-level', str(256 + 4))]
+ topology_m2.ms["master1"].modify_s(DN_CONFIG, mod)
+ topology_m2.ms["master2"].modify_s(DN_CONFIG, mod)
-def test_ticket48266_fractional(topology, entries):
- ents = topology.master1.agreement.list(suffix=SUFFIX)
+def test_ticket48266_fractional(topology_m2, entries):
+ ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX)
assert len(ents) == 1
mod = [(ldap.MOD_REPLACE, 'nsDS5ReplicatedAttributeList', ['(objectclass=*) $ EXCLUDE telephonenumber']),
(ldap.MOD_REPLACE, 'nsds5ReplicaStripAttrs', ['modifiersname modifytimestamp'])]
- ents = topology.master1.agreement.list(suffix=SUFFIX)
+ ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX)
assert len(ents) == 1
m1_m2_agmt = ents[0].dn
- topology.master1.modify_s(ents[0].dn, mod)
+ topology_m2.ms["master1"].modify_s(ents[0].dn, mod)
- ents = topology.master2.agreement.list(suffix=SUFFIX)
+ ents = topology_m2.ms["master2"].agreement.list(suffix=SUFFIX)
assert len(ents) == 1
- topology.master2.modify_s(ents[0].dn, mod)
+ topology_m2.ms["master2"].modify_s(ents[0].dn, mod)
- topology.master1.restart(timeout=10)
- topology.master2.restart(timeout=10)
+ topology_m2.ms["master1"].restart(timeout=10)
+ topology_m2.ms["master2"].restart(timeout=10)
- topology.master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- topology.master1.waitForReplInit(m1_m2_agmt)
+ topology_m2.ms["master1"].agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
+ topology_m2.ms["master1"].waitForReplInit(m1_m2_agmt)
-def test_ticket48266_check_repl_desc(topology, entries):
+def test_ticket48266_check_repl_desc(topology_m2, entries):
name = "cn=%s1,%s" % (NEW_ACCOUNT, SUFFIX)
value = 'check repl. description'
mod = [(ldap.MOD_REPLACE, 'description', value)]
- topology.master1.modify_s(name, mod)
+ topology_m2.ms["master1"].modify_s(name, mod)
loop = 0
while loop <= 10:
- ent = topology.master2.getEntry(name, ldap.SCOPE_BASE, "(objectclass=*)")
+ ent = topology_m2.ms["master2"].getEntry(name, ldap.SCOPE_BASE, "(objectclass=*)")
if ent.hasAttr('description') and ent.getValue('description') == value:
break
time.sleep(1)
@@ -203,17 +90,17 @@ def test_ticket48266_check_repl_desc(topology, entries):
# will use this CSN as a starting point on error log
# after this is one 'Skipped' then the first csn _get_first_not_replicated_csn
# should no longer be Skipped in the error log
-def _get_last_not_replicated_csn(topology):
+def _get_last_not_replicated_csn(topology_m2):
name = "cn=%s5,%s" % (NEW_ACCOUNT, SUFFIX)
# read the first CSN that will not be replicated
mod = [(ldap.MOD_REPLACE, 'telephonenumber', str(123456))]
- topology.master1.modify_s(name, mod)
- msgid = topology.master1.search_ext(name, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
- rtype, rdata, rmsgid = topology.master1.result2(msgid)
+ topology_m2.ms["master1"].modify_s(name, mod)
+ msgid = topology_m2.ms["master1"].search_ext(name, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
+ rtype, rdata, rmsgid = topology_m2.ms["master1"].result2(msgid)
attrs = None
for dn, raw_attrs in rdata:
- topology.master1.log.info("dn: %s" % dn)
+ topology_m2.ms["master1"].log.info("dn: %s" % dn)
if 'nscpentrywsi' in raw_attrs:
attrs = raw_attrs['nscpentrywsi']
assert attrs
@@ -224,8 +111,8 @@ def _get_last_not_replicated_csn(topology):
# now retrieve the CSN of the operation we are looking for
csn = None
- topology.master1.stop(timeout=10)
- file_obj = open(topology.master1.accesslog, "r")
+ topology_m2.ms["master1"].stop(timeout=10)
+ file_obj = open(topology_m2.ms["master1"].accesslog, "r")
# First the conn/op of the operation
regex = re.compile("MOD dn=\"%s\"" % name)
@@ -233,8 +120,8 @@ def _get_last_not_replicated_csn(topology):
assert found_op
if found_op:
conn_op_pattern = '.* (conn=[0-9]* op=[0-9]*) .*'
- conn_op_re= re.compile(conn_op_pattern)
- conn_op_match = conn_op_re.match(found_op)
+ conn_op_re = re.compile(conn_op_pattern)
+ conn_op_match = conn_op_re.match(found_op)
conn_op = conn_op_match.group(1)
# now the related CSN
@@ -245,21 +132,21 @@ def _get_last_not_replicated_csn(topology):
csn_match = csn_re.match(found_result)
csn = csn_match.group(1)
- topology.master1.start(timeout=10)
+ topology_m2.ms["master1"].start(timeout=10)
return csn
-def _get_first_not_replicated_csn(topology):
+def _get_first_not_replicated_csn(topology_m2):
name = "cn=%s2,%s" % (NEW_ACCOUNT, SUFFIX)
# read the first CSN that will not be replicated
mod = [(ldap.MOD_REPLACE, 'telephonenumber', str(123456))]
- topology.master1.modify_s(name, mod)
- msgid = topology.master1.search_ext(name, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
- rtype, rdata, rmsgid = topology.master1.result2(msgid)
+ topology_m2.ms["master1"].modify_s(name, mod)
+ msgid = topology_m2.ms["master1"].search_ext(name, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
+ rtype, rdata, rmsgid = topology_m2.ms["master1"].result2(msgid)
attrs = None
for dn, raw_attrs in rdata:
- topology.master1.log.info("dn: %s" % dn)
+ topology_m2.ms["master1"].log.info("dn: %s" % dn)
if 'nscpentrywsi' in raw_attrs:
attrs = raw_attrs['nscpentrywsi']
assert attrs
@@ -270,8 +157,8 @@ def _get_first_not_replicated_csn(topology):
# now retrieve the CSN of the operation we are looking for
csn = None
- topology.master1.stop(timeout=10)
- file_obj = open(topology.master1.accesslog, "r")
+ topology_m2.ms["master1"].stop(timeout=10)
+ file_obj = open(topology_m2.ms["master1"].accesslog, "r")
# First the conn/op of the operation
regex = re.compile("MOD dn=\"%s\"" % name)
@@ -279,8 +166,8 @@ def _get_first_not_replicated_csn(topology):
assert found_op
if found_op:
conn_op_pattern = '.* (conn=[0-9]* op=[0-9]*) .*'
- conn_op_re= re.compile(conn_op_pattern)
- conn_op_match = conn_op_re.match(found_op)
+ conn_op_re = re.compile(conn_op_pattern)
+ conn_op_match = conn_op_re.match(found_op)
conn_op = conn_op_match.group(1)
# now the related CSN
@@ -291,15 +178,15 @@ def _get_first_not_replicated_csn(topology):
csn_match = csn_re.match(found_result)
csn = csn_match.group(1)
- topology.master1.start(timeout=10)
+ topology_m2.ms["master1"].start(timeout=10)
return csn
-def _count_full_session(topology):
+def _count_full_session(topology_m2):
#
# compute the number of 'No more updates'
#
- file_obj = open(topology.master1.errlog, "r")
+ file_obj = open(topology_m2.ms["master1"].errlog, "r")
# pattern to find
pattern = ".*No more updates to send.*"
regex = re.compile(pattern)
@@ -318,60 +205,61 @@ def _count_full_session(topology):
return no_more_updates
-def test_ticket48266_count_csn_evaluation(topology, entries):
- ents = topology.master1.agreement.list(suffix=SUFFIX)
+def test_ticket48266_count_csn_evaluation(topology_m2, entries):
+ ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX)
assert len(ents) == 1
- first_csn = _get_first_not_replicated_csn(topology)
+ first_csn = _get_first_not_replicated_csn(topology_m2)
name = "cn=%s3,%s" % (NEW_ACCOUNT, SUFFIX)
NB_SESSION = 102
- no_more_update_cnt = _count_full_session(topology)
- topology.master1.agreement.pause(ents[0].dn)
+ no_more_update_cnt = _count_full_session(topology_m2)
+ topology_m2.ms["master1"].agreement.pause(ents[0].dn)
# now do a set of updates that will NOT be replicated
for telNumber in range(NB_SESSION):
mod = [(ldap.MOD_REPLACE, 'telephonenumber', str(telNumber))]
- topology.master1.modify_s(name, mod)
+ topology_m2.ms["master1"].modify_s(name, mod)
- topology.master1.agreement.resume(ents[0].dn)
+ topology_m2.ms["master1"].agreement.resume(ents[0].dn)
# let's wait all replication session complete
MAX_LOOP = 10
cnt = 0
- current_no_more_update = _count_full_session(topology)
+ current_no_more_update = _count_full_session(topology_m2)
while (current_no_more_update == no_more_update_cnt):
cnt = cnt + 1
if (cnt > MAX_LOOP):
break
time.sleep(5)
- current_no_more_update = _count_full_session(topology)
+ current_no_more_update = _count_full_session(topology_m2)
- log.info('after %d MODs we have completed %d replication sessions' % (NB_SESSION, (current_no_more_update - no_more_update_cnt)))
+ log.info('after %d MODs we have completed %d replication sessions' % (
+ NB_SESSION, (current_no_more_update - no_more_update_cnt)))
no_more_update_cnt = current_no_more_update
-
# At this point, with the fix a dummy update was made BUT may be not sent it
# make sure it was sent so that the consumer CSN will be updated
- last_csn = _get_last_not_replicated_csn(topology)
+ last_csn = _get_last_not_replicated_csn(topology_m2)
# let's wait all replication session complete
MAX_LOOP = 10
cnt = 0
- current_no_more_update = _count_full_session(topology)
+ current_no_more_update = _count_full_session(topology_m2)
while (current_no_more_update == no_more_update_cnt):
cnt = cnt + 1
if (cnt > MAX_LOOP):
break
time.sleep(5)
- current_no_more_update = _count_full_session(topology)
+ current_no_more_update = _count_full_session(topology_m2)
- log.info('This MODs %s triggered the send of the dummy update completed %d replication sessions' % (last_csn, (current_no_more_update - no_more_update_cnt)))
+ log.info('This MODs %s triggered the send of the dummy update completed %d replication sessions' % (
+ last_csn, (current_no_more_update - no_more_update_cnt)))
no_more_update_cnt = current_no_more_update
# so we should no longer see the first_csn in the log
# Let's create a new csn (last_csn) and check there is no longer first_csn
- topology.master1.agreement.pause(ents[0].dn)
- last_csn = _get_last_not_replicated_csn(topology)
- topology.master1.agreement.resume(ents[0].dn)
+ topology_m2.ms["master1"].agreement.pause(ents[0].dn)
+ last_csn = _get_last_not_replicated_csn(topology_m2)
+ topology_m2.ms["master1"].agreement.resume(ents[0].dn)
# let's wait for the session to complete
MAX_LOOP = 10
@@ -381,14 +269,15 @@ def test_ticket48266_count_csn_evaluation(topology, entries):
if (cnt > MAX_LOOP):
break
time.sleep(5)
- current_no_more_update = _count_full_session(topology)
+ current_no_more_update = _count_full_session(topology_m2)
- log.info('This MODs %s completed in %d replication sessions, should be sent without evaluating %s' % (last_csn, (current_no_more_update - no_more_update_cnt), first_csn))
+ log.info('This MODs %s completed in %d replication sessions, should be sent without evaluating %s' % (
+ last_csn, (current_no_more_update - no_more_update_cnt), first_csn))
no_more_update_cnt = current_no_more_update
# Now determine how many times we have skipped 'csn'
# no need to stop the server to check the error log
- file_obj = open(topology.master1.errlog, "r")
+ file_obj = open(topology_m2.ms["master1"].errlog, "r")
# find where the last_csn operation was processed
pattern = ".*ruv_add_csn_inprogress: successfully inserted csn %s.*" % last_csn
@@ -403,7 +292,7 @@ def test_ticket48266_count_csn_evaluation(topology, entries):
if (found):
log.info('last operation was found at %d' % file_obj.tell())
log.info(line)
- log.info('Now check the we can not find the first csn %s in the log'% first_csn)
+ log.info('Now check the we can not find the first csn %s in the log' % first_csn)
pattern = ".*Skipping update operation.*CSN %s.*" % first_csn
regex = re.compile(pattern)
diff --git a/dirsrvtests/tests/tickets/ticket48270_test.py b/dirsrvtests/tests/tickets/ticket48270_test.py
index 6324ef3..3ec9f28 100644
--- a/dirsrvtests/tests/tickets/ticket48270_test.py
+++ b/dirsrvtests/tests/tickets/ticket48270_test.py
@@ -1,102 +1,63 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
+NEW_ACCOUNT = "new_account"
+MAX_ACCOUNTS = 20
-NEW_ACCOUNT = "new_account"
-MAX_ACCOUNTS = 20
-
-MIXED_VALUE="/home/mYhOmEdIrEcToRy"
-LOWER_VALUE="/home/myhomedirectory"
+MIXED_VALUE = "/home/mYhOmEdIrEcToRy"
+LOWER_VALUE = "/home/myhomedirectory"
HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
-HOMEDIRECTORY_CN="homedirectory"
+HOMEDIRECTORY_CN = "homedirectory"
MATCHINGRULE = 'nsMatchingRule'
UIDNUMBER_INDEX = 'cn=uidnumber,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
-UIDNUMBER_CN="uidnumber"
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
+UIDNUMBER_CN = "uidnumber"
-def test_ticket48270_init(topology):
+def test_ticket48270_init(topology_st):
log.info("Initialization: add dummy entries for the tests")
for cpt in range(MAX_ACCOUNTS):
name = "%s%d" % (NEW_ACCOUNT, cpt)
- topology.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), {
- 'objectclass': "top posixAccount".split(),
- 'uid': name,
- 'cn': name,
- 'uidnumber': str(111),
- 'gidnumber': str(222),
- 'homedirectory': "/home/tbordaz_%d" % cpt})))
+ topology_st.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), {
+ 'objectclass': "top posixAccount".split(),
+ 'uid': name,
+ 'cn': name,
+ 'uidnumber': str(111),
+ 'gidnumber': str(222),
+ 'homedirectory': "/home/tbordaz_%d" % cpt})))
-def test_ticket48270_homeDirectory_indexed_cis(topology):
+def test_ticket48270_homeDirectory_indexed_cis(topology_st):
log.info("\n\nindex homeDirectory in caseIgnoreIA5Match and caseExactIA5Match")
try:
- ent = topology.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE)
+ ent = topology_st.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE)
except ldap.NO_SUCH_OBJECT:
- topology.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, {
- 'objectclass': "top nsIndex".split(),
- 'cn': HOMEDIRECTORY_CN,
- 'nsSystemIndex': 'false',
- 'nsIndexType': 'eq'})))
- #log.info("attach debugger")
- #time.sleep(60)
-
- IGNORE_MR_NAME='caseIgnoreIA5Match'
- EXACT_MR_NAME='caseExactIA5Match'
+ topology_st.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, {
+ 'objectclass': "top nsIndex".split(),
+ 'cn': HOMEDIRECTORY_CN,
+ 'nsSystemIndex': 'false',
+ 'nsIndexType': 'eq'})))
+ # log.info("attach debugger")
+ # time.sleep(60)
+
+ IGNORE_MR_NAME = 'caseIgnoreIA5Match'
+ EXACT_MR_NAME = 'caseExactIA5Match'
mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (IGNORE_MR_NAME, EXACT_MR_NAME))]
- topology.standalone.modify_s(HOMEDIRECTORY_INDEX, mod)
+ topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod)
- #topology.standalone.stop(timeout=10)
+ # topology_st.standalone.stop(timeout=10)
log.info("successfully checked that filter with exact mr , a filter with lowercase eq is failing")
- #assert topology.standalone.db2index(bename=DEFAULT_BENAME, suffixes=None, attrs=['homeDirectory'])
- #topology.standalone.start(timeout=10)
+ # assert topology_st.standalone.db2index(bename=DEFAULT_BENAME, suffixes=None, attrs=['homeDirectory'])
+ # topology_st.standalone.start(timeout=10)
args = {TASK_WAIT: True}
- topology.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args)
+ topology_st.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args)
log.info("Check indexing succeeded with a specified matching rule")
- file_obj = open(topology.standalone.errlog, "r")
+ file_obj = open(topology_st.standalone.errlog, "r")
# Check if the MR configuration failure occurs
regex = re.compile("unknown or invalid matching rule")
@@ -109,40 +70,41 @@ def test_ticket48270_homeDirectory_indexed_cis(topology):
if (found):
log.info("The configuration of a specific MR fails")
log.info(line)
- #assert not found
+ # assert not found
-def test_ticket48270_homeDirectory_mixed_value(topology):
+def test_ticket48270_homeDirectory_mixed_value(topology_st):
# Set a homedirectory value with mixed case
name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX)
mod = [(ldap.MOD_REPLACE, 'homeDirectory', MIXED_VALUE)]
- topology.standalone.modify_s(name, mod)
+ topology_st.standalone.modify_s(name, mod)
-def test_ticket48270_extensible_search(topology):
+def test_ticket48270_extensible_search(topology_st):
name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX)
# check with the exact stored value
log.info("Default: can retrieve an entry filter syntax with exact stored value")
- ent = topology.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % MIXED_VALUE)
+ ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % MIXED_VALUE)
log.info("Default: can retrieve an entry filter caseExactIA5Match with exact stored value")
- ent = topology.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % MIXED_VALUE)
+ ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % MIXED_VALUE)
# check with a lower case value that is different from the stored value
log.info("Default: can not retrieve an entry filter syntax match with lowered stored value")
try:
- ent = topology.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % LOWER_VALUE)
+ ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % LOWER_VALUE)
assert ent is None
except ldap.NO_SUCH_OBJECT:
pass
log.info("Default: can not retrieve an entry filter caseExactIA5Match with lowered stored value")
try:
- ent = topology.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % LOWER_VALUE)
+ ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE,
+ "(homeDirectory:caseExactIA5Match:=%s)" % LOWER_VALUE)
assert ent is None
except ldap.NO_SUCH_OBJECT:
pass
log.info("Default: can retrieve an entry filter caseIgnoreIA5Match with lowered stored value")
- ent = topology.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseIgnoreIA5Match:=%s)" % LOWER_VALUE)
+ ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseIgnoreIA5Match:=%s)" % LOWER_VALUE)
if __name__ == '__main__':
diff --git a/dirsrvtests/tests/tickets/ticket48272_test.py b/dirsrvtests/tests/tickets/ticket48272_test.py
index 6f81152..2b71609 100644
--- a/dirsrvtests/tests/tickets/ticket48272_test.py
+++ b/dirsrvtests/tests/tickets/ticket48272_test.py
@@ -1,24 +1,15 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
-DEBUGGING = False
+DEBUGGING = os.getenv('DEBUGGING', False)
if DEBUGGING:
logging.getLogger(__name__).setLevel(logging.DEBUG)
else:
logging.getLogger(__name__).setLevel(logging.INFO)
-
log = logging.getLogger(__name__)
USER1 = 'user1'
@@ -28,58 +19,14 @@ USER1_DN = 'uid=user1,ou=People,%s' % DEFAULT_SUFFIX
USER1_CONFLICT_DN = 'uid=user1,%s' % DEFAULT_SUFFIX
-class TopologyStandalone(object):
- """The DS Topology Class"""
- def __init__(self, standalone):
- """Init"""
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- """Create DS Deployment"""
-
- # Creating standalone instance ...
- if DEBUGGING:
- standalone = DirSrv(verbose=True)
- else:
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- """If we are debugging just stop the instances, otherwise remove
- them
- """
- if DEBUGGING:
- standalone.stop(60)
- else:
- standalone.delete()
-
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
def _create_user(inst, name, dn):
inst.add_s(Entry((
- dn, {
- 'objectClass': 'top account simplesecurityobject'.split(),
- 'uid': name,
- 'userpassword': PW
- })))
+ dn, {
+ 'objectClass': 'top account simplesecurityobject'.split(),
+ 'uid': name,
+ 'userpassword': PW
+ })))
+
def _bind(name, cred):
# Returns true or false if it worked.
@@ -90,22 +37,22 @@ def _bind(name, cred):
try:
conn.simple_bind_s(name, cred)
conn.unbind_s()
- except ldap.NO_SUCH_OBJECT:
+ except ldap.INVALID_CREDENTIALS:
status = False
return status
-def test_ticket48272(topology):
+def test_ticket48272(topology_st):
"""
Test the functionality of the addn bind plugin. This should allow users
of the type "name" or "name(a)domain.com" to bind.
"""
# There will be a better way to do this in the future.
- topology.standalone.add_s(Entry((
+ topology_st.standalone.add_s(Entry((
"cn=addn,cn=plugins,cn=config", {
- "objectClass" : "top nsSlapdPlugin extensibleObject".split(),
- "cn" : "addn",
+ "objectClass": "top nsSlapdPlugin extensibleObject".split(),
+ "cn": "addn",
"nsslapd-pluginPath": "libaddn-plugin",
"nsslapd-pluginInitfunc": "addn_init",
"nsslapd-pluginType": "preoperation",
@@ -118,54 +65,54 @@ def test_ticket48272(topology):
}
)))
- topology.standalone.add_s(Entry((
+ topology_st.standalone.add_s(Entry((
"cn=example.com,cn=addn,cn=plugins,cn=config", {
- "objectClass" : "top extensibleObject".split(),
- "cn" : "example.com",
+ "objectClass": "top extensibleObject".split(),
+ "cn": "example.com",
"addn_base": "ou=People,%s" % DEFAULT_SUFFIX,
"addn_filter": "(&(objectClass=account)(uid=%s))",
}
)))
- topology.standalone.restart(60)
+ topology_st.standalone.restart(60)
# Add a user
- _create_user(topology.standalone, USER1, USER1_DN)
+ _create_user(topology_st.standalone, USER1, USER1_DN)
# Make sure our binds still work.
- assert(_bind(USER1_DN, PW))
+ assert (_bind(USER1_DN, PW))
# Test an anonymous bind
- for i in range(0,10):
-
+ for i in range(0, 10):
# Test bind as name
- assert(_bind(USER1, PW))
+ assert (_bind(USER1, PW))
# Make sure that name@fakedom fails
- assert(_bind(USER1_DOMAIN, PW))
+ assert (_bind(USER1_DOMAIN, PW))
# Add a conflicting user to an alternate subtree
- _create_user(topology.standalone, USER1, USER1_CONFLICT_DN)
+ _create_user(topology_st.standalone, USER1, USER1_CONFLICT_DN)
# Change the plugin to search from the rootdn instead
# This means we have a conflicting user in scope now!
- topology.standalone.modify_s("cn=example.com,cn=addn,cn=plugins,cn=config", [(ldap.MOD_REPLACE, 'addn_base', DEFAULT_SUFFIX)])
- topology.standalone.restart(60)
+ topology_st.standalone.modify_s("cn=example.com,cn=addn,cn=plugins,cn=config",
+ [(ldap.MOD_REPLACE, 'addn_base', DEFAULT_SUFFIX)])
+ topology_st.standalone.restart(60)
# Make sure our binds still work.
- assert(_bind(USER1_DN, PW))
- assert(_bind(USER1_CONFLICT_DN, PW))
- for i in range(0,10):
+ assert (_bind(USER1_DN, PW))
+ assert (_bind(USER1_CONFLICT_DN, PW))
+ for i in range(0, 10):
# Test bind as name fails
try:
_bind(USER1, PW)
- assert(False)
+ assert (False)
except:
pass
# Test bind as name@domain fails too
try:
_bind(USER1_DOMAIN, PW)
- assert(False)
+ assert (False)
except:
pass
@@ -177,4 +124,3 @@ if __name__ == '__main__':
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
diff --git a/dirsrvtests/tests/tickets/ticket48294_test.py b/dirsrvtests/tests/tickets/ticket48294_test.py
index 265533f..260d9f6 100644
--- a/dirsrvtests/tests/tickets/ticket48294_test.py
+++ b/dirsrvtests/tests/tickets/ticket48294_test.py
@@ -6,18 +6,14 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
+import logging
import time
+
import ldap
-import logging
import pytest
-import shutil
-from lib389 import DirSrv, Entry, tools
-from lib389 import DirSrvTools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
@@ -28,56 +24,15 @@ LINKTYPE = 'directReport'
MANAGEDTYPE = 'manager'
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def _header(topology, label):
- topology.standalone.log.info("###############################################")
- topology.standalone.log.info("####### %s" % label)
- topology.standalone.log.info("###############################################")
+def _header(topology_st, label):
+ topology_st.standalone.log.info("###############################################")
+ topology_st.standalone.log.info("####### %s" % label)
+ topology_st.standalone.log.info("###############################################")
-def check_attr_val(topology, dn, attr, expected):
+def check_attr_val(topology_st, dn, attr, expected):
try:
- centry = topology.standalone.search_s(dn, ldap.SCOPE_BASE, 'uid=*')
+ centry = topology_st.standalone.search_s(dn, ldap.SCOPE_BASE, 'uid=*')
if centry:
val = centry[0].getValue(attr)
if val.lower() == expected.lower():
@@ -93,19 +48,19 @@ def check_attr_val(topology, dn, attr, expected):
assert False
-def _modrdn_entry(topology=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None):
- assert topology is not None
+def _modrdn_entry(topology_st=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None):
+ assert topology_st is not None
assert entry_dn is not None
assert new_rdn is not None
- topology.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn)
+ topology_st.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn)
try:
if new_superior:
- topology.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old)
+ topology_st.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old)
else:
- topology.standalone.rename_s(entry_dn, new_rdn, delold=del_old)
+ topology_st.standalone.rename_s(entry_dn, new_rdn, delold=del_old)
except ldap.NO_SUCH_ATTRIBUTE:
- topology.standalone.log.info("accepted failure due to 47833: modrdn reports error.. but succeeds")
+ topology_st.standalone.log.info("accepted failure due to 47833: modrdn reports error.. but succeeds")
attempt = 0
if new_superior:
dn = "%s,%s" % (new_rdn, new_superior)
@@ -117,139 +72,140 @@ def _modrdn_entry(topology=None, entry_dn=None, new_rdn=None, del_old=0, new_sup
while attempt < 10:
try:
- ent = topology.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter)
+ ent = topology_st.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter)
break
except ldap.NO_SUCH_OBJECT:
- topology.standalone.log.info("Accept failure due to 47833: unable to find (base) a modrdn entry")
+ topology_st.standalone.log.info("Accept failure due to 47833: unable to find (base) a modrdn entry")
attempt += 1
time.sleep(1)
if attempt == 10:
- ent = topology.standalone.getEntry(base, ldap.SCOPE_SUBTREE, myfilter)
- ent = topology.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter)
+ ent = topology_st.standalone.getEntry(base, ldap.SCOPE_SUBTREE, myfilter)
+ ent = topology_st.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter)
-def test_48294_init(topology):
+def test_48294_init(topology_st):
"""
Set up Linked Attribute
"""
- _header(topology, 'Testing Ticket 48294 - Linked Attributes plug-in - won\'t update links after MODRDN operation')
+ _header(topology_st,
+ 'Testing Ticket 48294 - Linked Attributes plug-in - won\'t update links after MODRDN operation')
log.info('Enable Dynamic plugins, and the linked Attrs plugin')
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
except ldap.LDAPError as e:
ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc'])
assert False
try:
- topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
+ topology_st.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
except ValueError as e:
ldap.fatal('Failed to enable linked attributes plugin!' + e.message['desc'])
assert False
log.info('Add the plugin config entry')
try:
- topology.standalone.add_s(Entry((MANAGER_LINK, {
- 'objectclass': 'top extensibleObject'.split(),
- 'cn': 'Manager Link',
- 'linkType': LINKTYPE,
- 'managedType': MANAGEDTYPE
- })))
+ topology_st.standalone.add_s(Entry((MANAGER_LINK, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'cn': 'Manager Link',
+ 'linkType': LINKTYPE,
+ 'managedType': MANAGEDTYPE
+ })))
except ldap.LDAPError as e:
log.fatal('Failed to add linked attr config entry: error ' + e.message['desc'])
assert False
log.info('Add 2 entries: manager1 and employee1')
try:
- topology.standalone.add_s(Entry(('uid=manager1,%s' % OU_PEOPLE, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'manager1'})))
+ topology_st.standalone.add_s(Entry(('uid=manager1,%s' % OU_PEOPLE, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'manager1'})))
except ldap.LDAPError as e:
log.fatal('Add manager1 failed: error ' + e.message['desc'])
assert False
try:
- topology.standalone.add_s(Entry(('uid=employee1,%s' % OU_PEOPLE, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'employee1'})))
+ topology_st.standalone.add_s(Entry(('uid=employee1,%s' % OU_PEOPLE, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'employee1'})))
except ldap.LDAPError as e:
log.fatal('Add employee1 failed: error ' + e.message['desc'])
assert False
log.info('Add linktype to manager1')
- topology.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE,
- [(ldap.MOD_ADD, LINKTYPE, 'uid=employee1,%s' % OU_PEOPLE)])
+ topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE,
+ [(ldap.MOD_ADD, LINKTYPE, 'uid=employee1,%s' % OU_PEOPLE)])
log.info('Check managed attribute')
- check_attr_val(topology, 'uid=employee1,%s' % OU_PEOPLE, MANAGEDTYPE, 'uid=manager1,%s' % OU_PEOPLE)
+ check_attr_val(topology_st, 'uid=employee1,%s' % OU_PEOPLE, MANAGEDTYPE, 'uid=manager1,%s' % OU_PEOPLE)
log.info('PASSED')
-def test_48294_run_0(topology):
+def test_48294_run_0(topology_st):
"""
Rename employee1 to employee2 and adjust the value of directReport by replace
"""
- _header(topology, 'Case 0 - Rename employee1 and adjust the link type value by replace')
+ _header(topology_st, 'Case 0 - Rename employee1 and adjust the link type value by replace')
log.info('Rename employee1 to employee2')
- _modrdn_entry(topology, entry_dn='uid=employee1,%s' % OU_PEOPLE, new_rdn='uid=employee2')
+ _modrdn_entry(topology_st, entry_dn='uid=employee1,%s' % OU_PEOPLE, new_rdn='uid=employee2')
log.info('Modify the value of directReport to uid=employee2')
try:
- topology.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE,
- [(ldap.MOD_REPLACE, LINKTYPE, 'uid=employee2,%s' % OU_PEOPLE)])
+ topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE,
+ [(ldap.MOD_REPLACE, LINKTYPE, 'uid=employee2,%s' % OU_PEOPLE)])
except ldap.LDAPError as e:
log.fatal('Failed to replace uid=employee1 with employee2: ' + e.message['desc'])
assert False
log.info('Check managed attribute')
- check_attr_val(topology, 'uid=employee2,%s' % OU_PEOPLE, MANAGEDTYPE, 'uid=manager1,%s' % OU_PEOPLE)
+ check_attr_val(topology_st, 'uid=employee2,%s' % OU_PEOPLE, MANAGEDTYPE, 'uid=manager1,%s' % OU_PEOPLE)
log.info('PASSED')
-def test_48294_run_1(topology):
+def test_48294_run_1(topology_st):
"""
Rename employee2 to employee3 and adjust the value of directReport by delete and add
"""
- _header(topology, 'Case 1 - Rename employee2 and adjust the link type value by delete and add')
+ _header(topology_st, 'Case 1 - Rename employee2 and adjust the link type value by delete and add')
log.info('Rename employee2 to employee3')
- _modrdn_entry(topology, entry_dn='uid=employee2,%s' % OU_PEOPLE, new_rdn='uid=employee3')
+ _modrdn_entry(topology_st, entry_dn='uid=employee2,%s' % OU_PEOPLE, new_rdn='uid=employee3')
log.info('Modify the value of directReport to uid=employee3')
try:
- topology.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE,
- [(ldap.MOD_DELETE, LINKTYPE, 'uid=employee2,%s' % OU_PEOPLE)])
+ topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE,
+ [(ldap.MOD_DELETE, LINKTYPE, 'uid=employee2,%s' % OU_PEOPLE)])
except ldap.LDAPError as e:
log.fatal('Failed to delete employee2: ' + e.message['desc'])
assert False
try:
- topology.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE,
- [(ldap.MOD_ADD, LINKTYPE, 'uid=employee3,%s' % OU_PEOPLE)])
+ topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE,
+ [(ldap.MOD_ADD, LINKTYPE, 'uid=employee3,%s' % OU_PEOPLE)])
except ldap.LDAPError as e:
log.fatal('Failed to add employee3: ' + e.message['desc'])
assert False
log.info('Check managed attribute')
- check_attr_val(topology, 'uid=employee3,%s' % OU_PEOPLE, MANAGEDTYPE, 'uid=manager1,%s' % OU_PEOPLE)
+ check_attr_val(topology_st, 'uid=employee3,%s' % OU_PEOPLE, MANAGEDTYPE, 'uid=manager1,%s' % OU_PEOPLE)
log.info('PASSED')
-def test_48294_run_2(topology):
+def test_48294_run_2(topology_st):
"""
Rename manager1 to manager2 and make sure the managed attribute value is updated
"""
- _header(topology, 'Case 2 - Rename manager1 to manager2 and make sure the managed attribute value is updated')
+ _header(topology_st, 'Case 2 - Rename manager1 to manager2 and make sure the managed attribute value is updated')
log.info('Rename manager1 to manager2')
- _modrdn_entry(topology, entry_dn='uid=manager1,%s' % OU_PEOPLE, new_rdn='uid=manager2')
+ _modrdn_entry(topology_st, entry_dn='uid=manager1,%s' % OU_PEOPLE, new_rdn='uid=manager2')
log.info('Check managed attribute')
- check_attr_val(topology, 'uid=employee3,%s' % OU_PEOPLE, MANAGEDTYPE, 'uid=manager2,%s' % OU_PEOPLE)
+ check_attr_val(topology_st, 'uid=employee3,%s' % OU_PEOPLE, MANAGEDTYPE, 'uid=manager2,%s' % OU_PEOPLE)
log.info('PASSED')
diff --git a/dirsrvtests/tests/tickets/ticket48295_test.py b/dirsrvtests/tests/tickets/ticket48295_test.py
index 42e2d38..db8fc64 100644
--- a/dirsrvtests/tests/tickets/ticket48295_test.py
+++ b/dirsrvtests/tests/tickets/ticket48295_test.py
@@ -6,18 +6,13 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
import logging
+
+import ldap
import pytest
-import shutil
-from lib389 import DirSrv, Entry, tools
-from lib389 import DirSrvTools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
@@ -28,55 +23,15 @@ LINKTYPE = 'directReport'
MANAGEDTYPE = 'manager'
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
+def _header(topology_st, label):
+ topology_st.standalone.log.info("###############################################")
+ topology_st.standalone.log.info("####### %s" % label)
+ topology_st.standalone.log.info("###############################################")
-def _header(topology, label):
- topology.standalone.log.info("###############################################")
- topology.standalone.log.info("####### %s" % label)
- topology.standalone.log.info("###############################################")
-def check_attr_val(topology, dn, attr, expected, revert):
+def check_attr_val(topology_st, dn, attr, expected, revert):
try:
- centry = topology.standalone.search_s(dn, ldap.SCOPE_BASE, 'uid=*')
+ centry = topology_st.standalone.search_s(dn, ldap.SCOPE_BASE, 'uid=*')
if centry:
val = centry[0].getValue(attr)
if val:
@@ -106,50 +61,51 @@ def check_attr_val(topology, dn, attr, expected, revert):
assert False
-def test_48295_init(topology):
+def test_48295_init(topology_st):
"""
Set up Linked Attribute
"""
- _header(topology, 'Testing Ticket 48295 - Entry cache is not rolled back -- Linked Attributes plug-in - wrong behaviour when adding valid and broken links')
+ _header(topology_st,
+ 'Testing Ticket 48295 - Entry cache is not rolled back -- Linked Attributes plug-in - wrong behaviour when adding valid and broken links')
log.info('Enable Dynamic plugins, and the linked Attrs plugin')
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
except ldap.LDAPError as e:
ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc'])
assert False
try:
- topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
+ topology_st.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
except ValueError as e:
ldap.fatal('Failed to enable linked attributes plugin!' + e.message['desc'])
assert False
log.info('Add the plugin config entry')
try:
- topology.standalone.add_s(Entry((MANAGER_LINK, {
- 'objectclass': 'top extensibleObject'.split(),
- 'cn': 'Manager Link',
- 'linkType': LINKTYPE,
- 'managedType': MANAGEDTYPE
- })))
+ topology_st.standalone.add_s(Entry((MANAGER_LINK, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'cn': 'Manager Link',
+ 'linkType': LINKTYPE,
+ 'managedType': MANAGEDTYPE
+ })))
except ldap.LDAPError as e:
log.fatal('Failed to add linked attr config entry: error ' + e.message['desc'])
assert False
log.info('Add 2 entries: manager1 and employee1')
try:
- topology.standalone.add_s(Entry(('uid=manager1,%s' % OU_PEOPLE, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'manager1'})))
+ topology_st.standalone.add_s(Entry(('uid=manager1,%s' % OU_PEOPLE, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'manager1'})))
except ldap.LDAPError as e:
log.fatal('Add manager1 failed: error ' + e.message['desc'])
assert False
try:
- topology.standalone.add_s(Entry(('uid=employee1,%s' % OU_PEOPLE, {
- 'objectclass': 'top extensibleObject'.split(),
- 'uid': 'employee1'})))
+ topology_st.standalone.add_s(Entry(('uid=employee1,%s' % OU_PEOPLE, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'employee1'})))
except ldap.LDAPError as e:
log.fatal('Add employee1 failed: error ' + e.message['desc'])
assert False
@@ -157,22 +113,23 @@ def test_48295_init(topology):
log.info('PASSED')
-def test_48295_run(topology):
+def test_48295_run(topology_st):
"""
Add 2 linktypes - one exists, another does not
"""
- _header(topology, 'Add 2 linktypes to manager1 - one exists, another does not to make sure the managed entry does not have managed type.')
+ _header(topology_st,
+ 'Add 2 linktypes to manager1 - one exists, another does not to make sure the managed entry does not have managed type.')
try:
- topology.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE,
- [(ldap.MOD_ADD, LINKTYPE, 'uid=employee1,%s' % OU_PEOPLE),
- (ldap.MOD_ADD, LINKTYPE, 'uid=doNotExist,%s' % OU_PEOPLE)])
+ topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE,
+ [(ldap.MOD_ADD, LINKTYPE, 'uid=employee1,%s' % OU_PEOPLE),
+ (ldap.MOD_ADD, LINKTYPE, 'uid=doNotExist,%s' % OU_PEOPLE)])
except ldap.UNWILLING_TO_PERFORM:
log.info('Add uid=employee1 and uid=doNotExist expectedly failed.')
pass
log.info('Check managed attribute does not exist.')
- check_attr_val(topology, 'uid=employee1,%s' % OU_PEOPLE, MANAGEDTYPE, 'uid=manager1,%s' % OU_PEOPLE, True)
+ check_attr_val(topology_st, 'uid=employee1,%s' % OU_PEOPLE, MANAGEDTYPE, 'uid=manager1,%s' % OU_PEOPLE, True)
log.info('PASSED')
diff --git a/dirsrvtests/tests/tickets/ticket48312_test.py b/dirsrvtests/tests/tickets/ticket48312_test.py
index 0989279..36c30d8 100644
--- a/dirsrvtests/tests/tickets/ticket48312_test.py
+++ b/dirsrvtests/tests/tickets/ticket48312_test.py
@@ -1,61 +1,13 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
-
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-
-def test_ticket48312(topology):
+def test_ticket48312(topology_st):
"""
Configure managed entries plugins(tempalte/definition), then perform a
modrdn(deleteoldrdn 1), and make sure the server does not crash.
@@ -72,19 +24,19 @@ def test_ticket48312(topology):
# First enable dynamic plugins
#
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
except ldap.LDAPError as e:
ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc'])
assert False
- topology.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY)
+ topology_st.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY)
#
# Add our org units (they should already exist, but do it just in case)
#
try:
- topology.standalone.add_s(Entry((PEOPLE_OU, {
- 'objectclass': 'top extensibleObject'.split(),
- 'ou': 'people'})))
+ topology_st.standalone.add_s(Entry((PEOPLE_OU, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'ou': 'people'})))
except ldap.ALREADY_EXISTS:
pass
except ldap.LDAPError as e:
@@ -92,9 +44,9 @@ def test_ticket48312(topology):
assert False
try:
- topology.standalone.add_s(Entry((GROUP_OU, {
- 'objectclass': 'top extensibleObject'.split(),
- 'ou': 'people'})))
+ topology_st.standalone.add_s(Entry((GROUP_OU, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'ou': 'people'})))
except ldap.ALREADY_EXISTS:
pass
except ldap.LDAPError as e:
@@ -105,13 +57,13 @@ def test_ticket48312(topology):
# Add the template entry
#
try:
- topology.standalone.add_s(Entry((TEMPLATE_DN, {
- 'objectclass': 'top mepTemplateEntry extensibleObject'.split(),
- 'cn': 'MEP Template',
- 'mepRDNAttr': 'cn',
- 'mepStaticAttr': ['objectclass: posixGroup', 'objectclass: extensibleObject'],
- 'mepMappedAttr': ['cn: $uid', 'uid: $cn', 'gidNumber: $uidNumber']
- })))
+ topology_st.standalone.add_s(Entry((TEMPLATE_DN, {
+ 'objectclass': 'top mepTemplateEntry extensibleObject'.split(),
+ 'cn': 'MEP Template',
+ 'mepRDNAttr': 'cn',
+ 'mepStaticAttr': ['objectclass: posixGroup', 'objectclass: extensibleObject'],
+ 'mepMappedAttr': ['cn: $uid', 'uid: $cn', 'gidNumber: $uidNumber']
+ })))
except ldap.LDAPError as e:
log.fatal('test_mep: Failed to add template entry: error ' + e.message['desc'])
assert False
@@ -120,14 +72,14 @@ def test_ticket48312(topology):
# Add the definition entry
#
try:
- topology.standalone.add_s(Entry((CONFIG_DN, {
- 'objectclass': 'top extensibleObject'.split(),
- 'cn': 'config',
- 'originScope': PEOPLE_OU,
- 'originFilter': 'objectclass=posixAccount',
- 'managedBase': GROUP_OU,
- 'managedTemplate': TEMPLATE_DN
- })))
+ topology_st.standalone.add_s(Entry((CONFIG_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'cn': 'config',
+ 'originScope': PEOPLE_OU,
+ 'originFilter': 'objectclass=posixAccount',
+ 'managedBase': GROUP_OU,
+ 'managedTemplate': TEMPLATE_DN
+ })))
except ldap.LDAPError as e:
log.fatal('test_mep: Failed to add config entry: error ' + e.message['desc'])
assert False
@@ -136,15 +88,15 @@ def test_ticket48312(topology):
# Add an entry that meets the MEP scope
#
try:
- topology.standalone.add_s(Entry((USER_DN, {
- 'objectclass': 'top posixAccount extensibleObject'.split(),
- 'uid': 'user1',
- 'cn': 'user1',
- 'uidNumber': '1',
- 'gidNumber': '1',
- 'homeDirectory': '/home/user1',
- 'description': 'uiser description'
- })))
+ topology_st.standalone.add_s(Entry((USER_DN, {
+ 'objectclass': 'top posixAccount extensibleObject'.split(),
+ 'uid': 'user1',
+ 'cn': 'user1',
+ 'uidNumber': '1',
+ 'gidNumber': '1',
+ 'homeDirectory': '/home/user1',
+ 'description': 'uiser description'
+ })))
except ldap.LDAPError as e:
log.fatal('test_mep: Failed to user1: error ' + e.message['desc'])
assert False
@@ -153,7 +105,7 @@ def test_ticket48312(topology):
# Perform a modrdn on USER_DN
#
try:
- topology.standalone.rename_s(USER_DN, USER_NEWRDN, delold=1)
+ topology_st.standalone.rename_s(USER_DN, USER_NEWRDN, delold=1)
except ldap.LDAPError as e:
log.error('Failed to modrdn: error ' + e.message['desc'])
assert False
@@ -165,4 +117,4 @@ if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
\ No newline at end of file
+ pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/tickets/ticket48325_test.py b/dirsrvtests/tests/tickets/ticket48325_test.py
index 3505d1a..3f52188 100644
--- a/dirsrvtests/tests/tickets/ticket48325_test.py
+++ b/dirsrvtests/tests/tickets/ticket48325_test.py
@@ -1,151 +1,10 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_m1h1c1
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
-
-class TopologyReplication(object):
- def __init__(self, master1, hub1, consumer1):
- master1.open()
- self.master1 = master1
- hub1.open()
- self.hub1 = hub1
- consumer1.open()
- self.consumer1 = consumer1
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating master 1...
- master1 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master1.allocate(args_master)
- instance_master1 = master1.exists()
- if instance_master1:
- master1.delete()
- master1.create()
- master1.open()
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
- replicaId=REPLICAID_MASTER_1)
-
- # Creating hub 1...
- hub1 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_HUB_1
- args_instance[SER_PORT] = PORT_HUB_1
- args_instance[SER_SERVERID_PROP] = SERVERID_HUB_1
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_hub = args_instance.copy()
- hub1.allocate(args_hub)
- instance_hub1 = hub1.exists()
- if instance_hub1:
- hub1.delete()
- hub1.create()
- hub1.open()
- hub1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_HUB,
- replicaId=REPLICAID_HUB_1)
-
- # Creating consumer 1...
- consumer1 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_CONSUMER_1
- args_instance[SER_PORT] = PORT_CONSUMER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_consumer = args_instance.copy()
- consumer1.allocate(args_consumer)
- instance_consumer1 = consumer1.exists()
- if instance_consumer1:
- consumer1.delete()
- consumer1.create()
- consumer1.open()
- consumer1.changelog.create()
- consumer1.replica.enableReplication(suffix=SUFFIX,
- role=REPLICAROLE_CONSUMER,
- replicaId=CONSUMER_REPLICAID)
-
- #
- # Create all the agreements
- #
- # Creating agreement from master 1 to hub 1
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m1_h1_agmt = master1.agreement.create(suffix=SUFFIX, host=hub1.host,
- port=hub1.port,
- properties=properties)
- if not m1_h1_agmt:
- log.fatal("Fail to create a master -> hub replica agreement")
- sys.exit(1)
- log.debug("%s created" % m1_h1_agmt)
-
- # Creating agreement from hub 1 to consumer 1
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- h1_c1_agmt = hub1.agreement.create(suffix=SUFFIX, host=consumer1.host,
- port=consumer1.port,
- properties=properties)
- if not h1_c1_agmt:
- log.fatal("Fail to create a hub -> consumer replica agreement")
- sys.exit(1)
- log.debug("%s created" % h1_c1_agmt)
-
- # Allow the replicas to get situated with the new agreements...
- time.sleep(5)
-
- #
- # Initialize all the agreements
- #
- master1.agreement.init(SUFFIX, HOST_HUB_1, PORT_HUB_1)
- master1.waitForReplInit(m1_h1_agmt)
- hub1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
- hub1.waitForReplInit(h1_c1_agmt)
-
- # Check replication is working...
- if master1.testReplication(DEFAULT_SUFFIX, consumer1):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- # Delete each instance in the end
- def fin():
- master1.delete()
- hub1.delete()
- consumer1.delete()
- pass
-
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- master1.clearTmpDir(__file__)
-
- return TopologyReplication(master1, hub1, consumer1)
-
def checkFirstElement(ds, rid):
"""
@@ -169,7 +28,7 @@ def checkFirstElement(ds, rid):
return False
-def test_ticket48325(topology):
+def test_ticket48325(topology_m1h1c1):
"""
Test that the RUV element order is correctly maintained when promoting
a hub or consumer.
@@ -179,16 +38,17 @@ def test_ticket48325(topology):
# Promote consumer to master
#
try:
- DN = topology.consumer1.replica._get_mt_entry(DEFAULT_SUFFIX)
- topology.consumer1.modify_s(DN, [(ldap.MOD_REPLACE,
- 'nsDS5ReplicaType',
- '3'),
- (ldap.MOD_REPLACE,
- 'nsDS5ReplicaID',
- '1234'),
- (ldap.MOD_REPLACE,
- 'nsDS5Flags',
- '1')])
+ topology_m1h1c1.cs["consumer1"].changelog.create()
+ DN = topology_m1h1c1.cs["consumer1"].replica._get_mt_entry(DEFAULT_SUFFIX)
+ topology_m1h1c1.cs["consumer1"].modify_s(DN, [(ldap.MOD_REPLACE,
+ 'nsDS5ReplicaType',
+ '3'),
+ (ldap.MOD_REPLACE,
+ 'nsDS5ReplicaID',
+ '1234'),
+ (ldap.MOD_REPLACE,
+ 'nsDS5Flags',
+ '1')])
except ldap.LDAPError as e:
log.fatal('Failed to promote consuemr to master: error %s' % str(e))
assert False
@@ -197,22 +57,23 @@ def test_ticket48325(topology):
#
# Check ruv has been reordered
#
- if not checkFirstElement(topology.consumer1, '1234'):
+ if not checkFirstElement(topology_m1h1c1.cs["consumer1"], '1234'):
log.fatal('RUV was not reordered')
assert False
#
# Create repl agreement from the newly promoted master to master1
#
- properties = {RA_NAME: r'meTo_$host:$port',
+ properties = {RA_NAME: 'meTo_{}:{}'.format(topology_m1h1c1.ms["master1"].host,
+ str(topology_m1h1c1.ms["master1"].port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- new_agmt = topology.consumer1.agreement.create(suffix=SUFFIX,
- host=topology.master1.host,
- port=topology.master1.port,
- properties=properties)
+ new_agmt = topology_m1h1c1.cs["consumer1"].agreement.create(suffix=SUFFIX,
+ host=topology_m1h1c1.ms["master1"].host,
+ port=topology_m1h1c1.ms["master1"].port,
+ properties=properties)
if not new_agmt:
log.fatal("Fail to create new agmt from old consumer to the master")
@@ -221,7 +82,7 @@ def test_ticket48325(topology):
#
# Test replication is working
#
- if topology.consumer1.testReplication(DEFAULT_SUFFIX, topology.master1):
+ if topology_m1h1c1.cs["consumer1"].testReplication(DEFAULT_SUFFIX, topology_m1h1c1.ms["master1"]):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
@@ -231,13 +92,13 @@ def test_ticket48325(topology):
# Promote hub to master
#
try:
- DN = topology.hub1.replica._get_mt_entry(DEFAULT_SUFFIX)
- topology.hub1.modify_s(DN, [(ldap.MOD_REPLACE,
- 'nsDS5ReplicaType',
- '3'),
- (ldap.MOD_REPLACE,
- 'nsDS5ReplicaID',
- '5678')])
+ DN = topology_m1h1c1.hs["hub1"].replica._get_mt_entry(DEFAULT_SUFFIX)
+ topology_m1h1c1.hs["hub1"].modify_s(DN, [(ldap.MOD_REPLACE,
+ 'nsDS5ReplicaType',
+ '3'),
+ (ldap.MOD_REPLACE,
+ 'nsDS5ReplicaID',
+ '5678')])
except ldap.LDAPError as e:
log.fatal('Failed to promote consuemr to master: error %s' % str(e))
assert False
@@ -246,14 +107,14 @@ def test_ticket48325(topology):
#
# Check ruv has been reordered
#
- if not checkFirstElement(topology.hub1, '5678'):
+ if not checkFirstElement(topology_m1h1c1.hs["hub1"], '5678'):
log.fatal('RUV was not reordered')
assert False
#
# Test replication is working
#
- if topology.hub1.testReplication(DEFAULT_SUFFIX, topology.master1):
+ if topology_m1h1c1.hs["hub1"].testReplication(DEFAULT_SUFFIX, topology_m1h1c1.ms["master1"]):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
@@ -267,4 +128,4 @@ if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
\ No newline at end of file
+ pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/tickets/ticket48342_test.py b/dirsrvtests/tests/tickets/ticket48342_test.py
index de0dea9..ef68c5f 100644
--- a/dirsrvtests/tests/tickets/ticket48342_test.py
+++ b/dirsrvtests/tests/tickets/ticket48342_test.py
@@ -1,208 +1,30 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_m3
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
PEOPLE_OU = 'people'
PEOPLE_DN = "ou=%s,%s" % (PEOPLE_OU, SUFFIX)
MAX_ACCOUNTS = 5
-class TopologyReplication(object):
- def __init__(self, master1, master2, master3):
- master1.open()
- self.master1 = master1
- master2.open()
- self.master2 = master2
- master3.open()
- self.master3 = master3
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating master 1...
- master1 = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master1.allocate(args_master)
- instance_master1 = master1.exists()
- if instance_master1:
- master1.delete()
- master1.create()
- master1.open()
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-
- # Creating master 2...
- master2 = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master2.allocate(args_master)
- instance_master2 = master2.exists()
- if instance_master2:
- master2.delete()
- master2.create()
- master2.open()
- master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
- # Creating master 3...
- master3 = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- args_instance[SER_HOST] = HOST_MASTER_3
- args_instance[SER_PORT] = PORT_MASTER_3
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master3.allocate(args_master)
- instance_master3 = master3.exists()
- if instance_master3:
- master3.delete()
- master3.create()
- master3.open()
- master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_3)
-
- #
- # Create all the agreements
- #
- # Creating agreement from master 1 to master 2
- properties = {RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
- if not m1_m2_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m1_m2_agmt)
-
- # Creating agreement from master 1 to master 3
-# properties = {RA_NAME: r'meTo_$host:$port',
-# RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
-# RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
-# RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
-# RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-# m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host, port=master3.port, properties=properties)
-# if not m1_m3_agmt:
-# log.fatal("Fail to create a master -> master replica agreement")
-# sys.exit(1)
-# log.debug("%s created" % m1_m3_agmt)
-
- # Creating agreement from master 2 to master 1
- properties = {RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
- if not m2_m1_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m2_m1_agmt)
-
- # Creating agreement from master 2 to master 3
- properties = {RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m2_m3_agmt = master2.agreement.create(suffix=SUFFIX, host=master3.host, port=master3.port, properties=properties)
- if not m2_m3_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m2_m3_agmt)
-
- # Creating agreement from master 3 to master 1
-# properties = {RA_NAME: r'meTo_$host:$port',
-# RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
-# RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
-# RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
-# RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-# m3_m1_agmt = master3.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
-# if not m3_m1_agmt:
-# log.fatal("Fail to create a master -> master replica agreement")
-# sys.exit(1)
-# log.debug("%s created" % m3_m1_agmt)
-
- # Creating agreement from master 3 to master 2
- properties = {RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m3_m2_agmt = master3.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
- if not m3_m2_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m3_m2_agmt)
-
- # Allow the replicas to get situated with the new agreements...
- time.sleep(5)
-
- #
- # Initialize all the agreements
- #
- master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- master1.waitForReplInit(m1_m2_agmt)
- time.sleep(5) # just to be safe
- master2.agreement.init(SUFFIX, HOST_MASTER_3, PORT_MASTER_3)
- master2.waitForReplInit(m2_m3_agmt)
-
- # Check replication is working...
- if master1.testReplication(DEFAULT_SUFFIX, master2):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- # Delete each instance in the end
- def fin():
- for master in (master1, master2, master3):
- master.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- master1.clearTmpDir(__file__)
-
- return TopologyReplication(master1, master2, master3)
-
-
def _dna_config(server, nextValue=500, maxValue=510):
log.info("Add dna plugin config entry...%s" % server)
try:
server.add_s(Entry(('cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config', {
- 'objectclass': 'top dnaPluginConfig'.split(),
- 'dnaType': 'description',
- 'dnaMagicRegen': '-1',
- 'dnaFilter': '(objectclass=posixAccount)',
- 'dnaScope': 'ou=people,%s' % SUFFIX,
- 'dnaNextValue': str(nextValue),
- 'dnaMaxValue' : str(nextValue+maxValue),
- 'dnaSharedCfgDN': 'ou=ranges,%s' % SUFFIX
- })))
+ 'objectclass': 'top dnaPluginConfig'.split(),
+ 'dnaType': 'description',
+ 'dnaMagicRegen': '-1',
+ 'dnaFilter': '(objectclass=posixAccount)',
+ 'dnaScope': 'ou=people,%s' % SUFFIX,
+ 'dnaNextValue': str(nextValue),
+ 'dnaMaxValue': str(nextValue + maxValue),
+ 'dnaSharedCfgDN': 'ou=ranges,%s' % SUFFIX
+ })))
except ldap.LDAPError as e:
log.error('Failed to add DNA config entry: error ' + e.message['desc'])
@@ -222,51 +44,51 @@ def _dna_config(server, nextValue=500, maxValue=510):
time.sleep(3)
-def test_ticket4026(topology):
+def test_ticket4026(topology_m3):
"""Write your replication testcase here.
- To access each DirSrv instance use: topology.master1, topology.master2,
- ..., topology.hub1, ..., topology.consumer1, ...
+ To access each DirSrv instance use: topology_m3.ms["master1"], topology_m3.ms["master2"],
+ ..., topology_m3.hub1, ..., topology_m3.consumer1, ...
Also, if you need any testcase initialization,
please, write additional fixture for that(include finalizer).
"""
try:
- topology.master1.add_s(Entry((PEOPLE_DN, {
- 'objectclass': "top extensibleObject".split(),
- 'ou': 'people'})))
+ topology_m3.ms["master1"].add_s(Entry((PEOPLE_DN, {
+ 'objectclass': "top extensibleObject".split(),
+ 'ou': 'people'})))
except ldap.ALREADY_EXISTS:
pass
- topology.master1.add_s(Entry(('ou=ranges,' + SUFFIX, {
- 'objectclass': 'top organizationalunit'.split(),
- 'ou': 'ranges'
- })))
+ topology_m3.ms["master1"].add_s(Entry(('ou=ranges,' + SUFFIX, {
+ 'objectclass': 'top organizationalunit'.split(),
+ 'ou': 'ranges'
+ })))
for cpt in range(MAX_ACCOUNTS):
name = "user%d" % (cpt)
- topology.master1.add_s(Entry(("uid=%s,%s" %(name, PEOPLE_DN), {
- 'objectclass': 'top posixAccount extensibleObject'.split(),
- 'uid': name,
- 'cn': name,
- 'uidNumber': '1',
- 'gidNumber': '1',
- 'homeDirectory': '/home/%s' % name
- })))
+ topology_m3.ms["master1"].add_s(Entry(("uid=%s,%s" % (name, PEOPLE_DN), {
+ 'objectclass': 'top posixAccount extensibleObject'.split(),
+ 'uid': name,
+ 'cn': name,
+ 'uidNumber': '1',
+ 'gidNumber': '1',
+ 'homeDirectory': '/home/%s' % name
+ })))
# make master3 having more free slots that master2
# so master1 will contact master3
- _dna_config(topology.master1, nextValue=100, maxValue=10)
- _dna_config(topology.master2, nextValue=200, maxValue=10)
- _dna_config(topology.master3, nextValue=300, maxValue=3000)
+ _dna_config(topology_m3.ms["master1"], nextValue=100, maxValue=10)
+ _dna_config(topology_m3.ms["master2"], nextValue=200, maxValue=10)
+ _dna_config(topology_m3.ms["master3"], nextValue=300, maxValue=3000)
# Turn on lots of error logging now.
mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '16384')]
- #mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '1')]
- topology.master1.modify_s('cn=config', mod)
- topology.master2.modify_s('cn=config', mod)
- topology.master3.modify_s('cn=config', mod)
+ # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '1')]
+ topology_m3.ms["master1"].modify_s('cn=config', mod)
+ topology_m3.ms["master2"].modify_s('cn=config', mod)
+ topology_m3.ms["master3"].modify_s('cn=config', mod)
# We need to wait for the event in dna.c to fire to start the servers
# see dna.c line 899
@@ -275,36 +97,36 @@ def test_ticket4026(topology):
# add on master1 users with description DNA
for cpt in range(10):
name = "user_with_desc1_%d" % (cpt)
- topology.master1.add_s(Entry(("uid=%s,%s" %(name, PEOPLE_DN), {
- 'objectclass': 'top posixAccount extensibleObject'.split(),
- 'uid': name,
- 'cn': name,
- 'description' : '-1',
- 'uidNumber': '1',
- 'gidNumber': '1',
- 'homeDirectory': '/home/%s' % name
- })))
+ topology_m3.ms["master1"].add_s(Entry(("uid=%s,%s" % (name, PEOPLE_DN), {
+ 'objectclass': 'top posixAccount extensibleObject'.split(),
+ 'uid': name,
+ 'cn': name,
+ 'description': '-1',
+ 'uidNumber': '1',
+ 'gidNumber': '1',
+ 'homeDirectory': '/home/%s' % name
+ })))
# give time to negociate master1 <--> master3
time.sleep(10)
- # add on master1 users with description DNA
- for cpt in range(11,20):
+ # add on master1 users with description DNA
+ for cpt in range(11, 20):
name = "user_with_desc1_%d" % (cpt)
- topology.master1.add_s(Entry(("uid=%s,%s" %(name, PEOPLE_DN), {
- 'objectclass': 'top posixAccount extensibleObject'.split(),
- 'uid': name,
- 'cn': name,
- 'description' : '-1',
- 'uidNumber': '1',
- 'gidNumber': '1',
- 'homeDirectory': '/home/%s' % name
- })))
+ topology_m3.ms["master1"].add_s(Entry(("uid=%s,%s" % (name, PEOPLE_DN), {
+ 'objectclass': 'top posixAccount extensibleObject'.split(),
+ 'uid': name,
+ 'cn': name,
+ 'description': '-1',
+ 'uidNumber': '1',
+ 'gidNumber': '1',
+ 'homeDirectory': '/home/%s' % name
+ })))
log.info('Test complete')
# add on master1 users with description DNA
mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '16384')]
- #mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '1')]
- topology.master1.modify_s('cn=config', mod)
- topology.master2.modify_s('cn=config', mod)
- topology.master3.modify_s('cn=config', mod)
+ # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '1')]
+ topology_m3.ms["master1"].modify_s('cn=config', mod)
+ topology_m3.ms["master2"].modify_s('cn=config', mod)
+ topology_m3.ms["master3"].modify_s('cn=config', mod)
log.info('Test complete')
diff --git a/dirsrvtests/tests/tickets/ticket48354_test.py b/dirsrvtests/tests/tickets/ticket48354_test.py
index 53e1316..d748cd5 100644
--- a/dirsrvtests/tests/tickets/ticket48354_test.py
+++ b/dirsrvtests/tests/tickets/ticket48354_test.py
@@ -1,82 +1,27 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
-DEBUGGING = False
+DEBUGGING = os.getenv('DEBUGGING', False)
if DEBUGGING:
logging.getLogger(__name__).setLevel(logging.DEBUG)
else:
logging.getLogger(__name__).setLevel(logging.INFO)
-
log = logging.getLogger(__name__)
-class TopologyStandalone(object):
- """The DS Topology Class"""
- def __init__(self, standalone):
- """Init"""
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- """Create DS Deployment"""
-
- # Creating standalone instance ...
- if DEBUGGING:
- standalone = DirSrv(verbose=True)
- else:
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- """If we are debugging just stop the instances, otherwise remove
- them
- """
- if DEBUGGING:
- standalone.stop(60)
- else:
- standalone.delete()
-
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-
def _attr_present(conn, name):
- results = conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(%s=*)' % name, [name,])
+ results = conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(%s=*)' % name, [name, ])
if DEBUGGING:
print(results)
if len(results) > 0:
return True
return False
-def test_ticket48354(topology):
+
+def test_ticket48354(topology_st):
"""
Test that we cannot view ACIs, userPassword, or certain other attributes as anonymous.
"""
@@ -91,9 +36,9 @@ def test_ticket48354(topology):
# Make sure that we cannot see:
# * userPassword
- assert(not _attr_present(conn, 'userPassword'))
+ assert (not _attr_present(conn, 'userPassword'))
# * aci
- assert(not _attr_present(conn, 'aci'))
+ assert (not _attr_present(conn, 'aci'))
# * anything else?
conn.unbind_s()
@@ -106,4 +51,3 @@ if __name__ == '__main__':
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
diff --git a/dirsrvtests/tests/tickets/ticket48362_test.py b/dirsrvtests/tests/tickets/ticket48362_test.py
index 0fa8037..10687a3 100644
--- a/dirsrvtests/tests/tickets/ticket48362_test.py
+++ b/dirsrvtests/tests/tickets/ticket48362_test.py
@@ -1,120 +1,21 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_m2
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-PEOPLE_OU='people'
+PEOPLE_OU = 'people'
PEOPLE_DN = "ou=%s,%s" % (PEOPLE_OU, SUFFIX)
-MAX_ACCOUNTS=5
+MAX_ACCOUNTS = 5
-BINDMETHOD_ATTR = 'dnaRemoteBindMethod'
+BINDMETHOD_ATTR = 'dnaRemoteBindMethod'
BINDMETHOD_VALUE = "SASL/GSSAPI"
-PROTOCOLE_ATTR = 'dnaRemoteConnProtocol'
-PROTOCOLE_VALUE = 'LDAP'
+PROTOCOLE_ATTR = 'dnaRemoteConnProtocol'
+PROTOCOLE_VALUE = 'LDAP'
-
-class TopologyReplication(object):
- def __init__(self, master1, master2):
- master1.open()
- self.master1 = master1
- master2.open()
- self.master2 = master2
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
-
- # Creating master 1...
- master1 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master1.allocate(args_master)
- instance_master1 = master1.exists()
- if instance_master1:
- master1.delete()
- master1.create()
- master1.open()
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-
- # Creating master 2...
- master2 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master2.allocate(args_master)
- instance_master2 = master2.exists()
- if instance_master2:
- master2.delete()
- master2.create()
- master2.open()
- master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
- #
- # Create all the agreements
- #
- # Creating agreement from master 1 to master 2
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
- if not m1_m2_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m1_m2_agmt)
-
- # Creating agreement from master 2 to master 1
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
- if not m2_m1_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m2_m1_agmt)
-
- # Allow the replicas to get situated with the new agreements...
- time.sleep(5)
-
- #
- # Initialize all the agreements
- #
- master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- master1.waitForReplInit(m1_m2_agmt)
-
- # Check replication is working...
- if master1.testReplication(DEFAULT_SUFFIX, master2):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- # Delete each instance in the end
- def fin():
- master1.delete()
- master2.delete()
- request.addfinalizer(fin)
-
- return TopologyReplication(master1, master2)
+SHARE_CFG_BASE = 'ou=ranges,' + SUFFIX
def _dna_config(server, nextValue=500, maxValue=510):
@@ -124,15 +25,15 @@ def _dna_config(server, nextValue=500, maxValue=510):
try:
server.add_s(Entry((cfg_base_dn, {
- 'objectclass': 'top dnaPluginConfig'.split(),
- 'dnaType': 'description',
- 'dnaMagicRegen': '-1',
- 'dnaFilter': '(objectclass=posixAccount)',
- 'dnaScope': 'ou=people,%s' % SUFFIX,
- 'dnaNextValue': str(nextValue),
- 'dnaMaxValue' : str(nextValue+maxValue),
- 'dnaSharedCfgDN': 'ou=ranges,%s' % SUFFIX
- })))
+ 'objectclass': 'top dnaPluginConfig'.split(),
+ 'dnaType': 'description',
+ 'dnaMagicRegen': '-1',
+ 'dnaFilter': '(objectclass=posixAccount)',
+ 'dnaScope': 'ou=people,%s' % SUFFIX,
+ 'dnaNextValue': str(nextValue),
+ 'dnaMaxValue': str(nextValue + maxValue),
+ 'dnaSharedCfgDN': 'ou=ranges,%s' % SUFFIX
+ })))
except ldap.LDAPError as e:
log.error('Failed to add DNA config entry: error ' + e.message['desc'])
@@ -152,8 +53,6 @@ def _dna_config(server, nextValue=500, maxValue=510):
time.sleep(3)
-SHARE_CFG_BASE = 'ou=ranges,' + SUFFIX
-
def _wait_shared_cfg_servers(server, expected):
attempts = 0
ents = []
@@ -173,8 +72,9 @@ def _wait_shared_cfg_servers(server, expected):
except lib389.NoSuchEntryError:
pass
+
def _shared_cfg_server_update(server, method=BINDMETHOD_VALUE, transport=PROTOCOLE_VALUE):
- log.info('\n======================== Update dnaPortNum=%d ============================\n'% server.port)
+ log.info('\n======================== Update dnaPortNum=%d ============================\n' % server.port)
try:
ent = server.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % server.port)
mod = [(ldap.MOD_REPLACE, BINDMETHOD_ATTR, method),
@@ -188,70 +88,71 @@ def _shared_cfg_server_update(server, method=BINDMETHOD_VALUE, transport=PROTOCO
assert False
-def test_ticket48362(topology):
+def test_ticket48362(topology_m2):
"""Write your replication testcase here.
- To access each DirSrv instance use: topology.master1, topology.master2,
- ..., topology.hub1, ..., topology.consumer1, ...
+ To access each DirSrv instance use: topology_m2.ms["master1"], topology_m2.ms["master2"],
+ ..., topology_m2.hub1, ..., topology_m2.consumer1, ...
Also, if you need any testcase initialization,
please, write additional fixture for that(include finalizer).
"""
try:
- topology.master1.add_s(Entry((PEOPLE_DN, {
- 'objectclass': "top extensibleObject".split(),
- 'ou': 'people'})))
+ topology_m2.ms["master1"].add_s(Entry((PEOPLE_DN, {
+ 'objectclass': "top extensibleObject".split(),
+ 'ou': 'people'})))
except ldap.ALREADY_EXISTS:
pass
- topology.master1.add_s(Entry((SHARE_CFG_BASE, {
- 'objectclass': 'top organizationalunit'.split(),
- 'ou': 'ranges'
- })))
+ topology_m2.ms["master1"].add_s(Entry((SHARE_CFG_BASE, {
+ 'objectclass': 'top organizationalunit'.split(),
+ 'ou': 'ranges'
+ })))
# master 1 will have a valid remaining range (i.e. 101)
# master 2 will not have a valid remaining range (i.e. 0) so dna servers list on master2
# will not contain master 2. So at restart, master 2 is recreated without the method/protocol attribute
- _dna_config(topology.master1, nextValue=1000, maxValue=100)
- _dna_config(topology.master2, nextValue=2000, maxValue=-1)
+ _dna_config(topology_m2.ms["master1"], nextValue=1000, maxValue=100)
+ _dna_config(topology_m2.ms["master2"], nextValue=2000, maxValue=-1)
# check we have all the servers available
- _wait_shared_cfg_servers(topology.master1, 2)
- _wait_shared_cfg_servers(topology.master2, 2)
+ _wait_shared_cfg_servers(topology_m2.ms["master1"], 2)
+ _wait_shared_cfg_servers(topology_m2.ms["master2"], 2)
# now force the method/transport on the servers entry
- _shared_cfg_server_update(topology.master1)
- _shared_cfg_server_update(topology.master2)
-
-
+ _shared_cfg_server_update(topology_m2.ms["master1"])
+ _shared_cfg_server_update(topology_m2.ms["master2"])
log.info('\n======================== BEFORE RESTART ============================\n')
- ent = topology.master1.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % topology.master1.port)
+ ent = topology_m2.ms["master1"].getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL,
+ "(dnaPortNum=%d)" % topology_m2.ms["master1"].port)
log.info('\n======================== BEFORE RESTART ============================\n')
- assert(ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE)
- assert(ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE)
-
+ assert (ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE)
+ assert (ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE)
- ent = topology.master2.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % topology.master2.port)
+ ent = topology_m2.ms["master2"].getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL,
+ "(dnaPortNum=%d)" % topology_m2.ms["master2"].port)
log.info('\n======================== BEFORE RESTART ============================\n')
- assert(ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE)
- assert(ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE)
- topology.master1.restart(10)
- topology.master2.restart(10)
+ assert (ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE)
+ assert (ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE)
+ topology_m2.ms["master1"].restart(10)
+ topology_m2.ms["master2"].restart(10)
# to allow DNA plugin to recreate the local host entry
time.sleep(40)
log.info('\n=================== AFTER RESTART =================================\n')
- ent = topology.master1.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % topology.master1.port)
+ ent = topology_m2.ms["master1"].getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL,
+ "(dnaPortNum=%d)" % topology_m2.ms["master1"].port)
log.info('\n=================== AFTER RESTART =================================\n')
- assert(ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE)
- assert(ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE)
+ assert (ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE)
+ assert (ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE)
- ent = topology.master2.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % topology.master2.port)
+ ent = topology_m2.ms["master2"].getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL,
+ "(dnaPortNum=%d)" % topology_m2.ms["master2"].port)
log.info('\n=================== AFTER RESTART =================================\n')
- assert(ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE)
- assert(ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE)
+ assert (ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE)
+ assert (ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE)
log.info('Test complete')
diff --git a/dirsrvtests/tests/tickets/ticket48366_test.py b/dirsrvtests/tests/tickets/ticket48366_test.py
index 46ee8ba..98b191f 100644
--- a/dirsrvtests/tests/tickets/ticket48366_test.py
+++ b/dirsrvtests/tests/tickets/ticket48366_test.py
@@ -6,70 +6,28 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
import logging
+
+import ldap
import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from ldap.controls.simple import ProxyAuthzControl
+from lib389 import Entry
+from lib389._constants import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
-PROXY_USER_DN = 'cn=proxy,ou=people,%s' % SUFFIX
-TEST_USER_DN = 'cn=test,ou=people,%s' % SUFFIX
-USER_PW = 'password'
-
+PROXY_USER_DN = 'cn=proxy,ou=people,%s' % SUFFIX
+TEST_USER_DN = 'cn=test,ou=people,%s' % SUFFIX
+USER_PW = 'password'
# subtrees used in test
SUBTREE_GREEN = "ou=green,%s" % SUFFIX
-SUBTREE_RED = "ou=red,%s" % SUFFIX
+SUBTREE_RED = "ou=red,%s" % SUFFIX
SUBTREES = (SUBTREE_GREEN, SUBTREE_RED)
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def test_ticket48366_init(topology):
+def test_ticket48366_init(topology_st):
"""
It creates identical entries in 3 subtrees
It creates aci which allow access to a set of attrs
@@ -78,106 +36,103 @@ def test_ticket48366_init(topology):
"""
-
- topology.standalone.log.info("Add subtree: %s" % SUBTREE_GREEN)
- topology.standalone.add_s(Entry((SUBTREE_GREEN, {
- 'objectclass': "top organizationalunit".split(),
- 'ou': "green_one"})))
- topology.standalone.log.info("Add subtree: %s" % SUBTREE_RED)
- topology.standalone.add_s(Entry((SUBTREE_RED, {
- 'objectclass': "top organizationalunit".split(),
- 'ou': "red"})))
+ topology_st.standalone.log.info("Add subtree: %s" % SUBTREE_GREEN)
+ topology_st.standalone.add_s(Entry((SUBTREE_GREEN, {
+ 'objectclass': "top organizationalunit".split(),
+ 'ou': "green_one"})))
+ topology_st.standalone.log.info("Add subtree: %s" % SUBTREE_RED)
+ topology_st.standalone.add_s(Entry((SUBTREE_RED, {
+ 'objectclass': "top organizationalunit".split(),
+ 'ou': "red"})))
# add proxy user and test user
- topology.standalone.log.info("Add %s" % TEST_USER_DN)
- topology.standalone.add_s(Entry((TEST_USER_DN, {
- 'objectclass': "top person".split(),
- 'sn': 'test',
- 'cn': 'test',
- 'userpassword': USER_PW})))
- topology.standalone.log.info("Add %s" % PROXY_USER_DN)
- topology.standalone.add_s(Entry((PROXY_USER_DN, {
- 'objectclass': "top person".split(),
- 'sn': 'proxy',
- 'cn': 'proxy',
- 'userpassword': USER_PW})))
+ topology_st.standalone.log.info("Add %s" % TEST_USER_DN)
+ topology_st.standalone.add_s(Entry((TEST_USER_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': 'test',
+ 'cn': 'test',
+ 'userpassword': USER_PW})))
+ topology_st.standalone.log.info("Add %s" % PROXY_USER_DN)
+ topology_st.standalone.add_s(Entry((PROXY_USER_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': 'proxy',
+ 'cn': 'proxy',
+ 'userpassword': USER_PW})))
# enable acl error logging
# mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '128')]
- # topology.standalone.modify_s(DN_CONFIG, mod)
+ # topology_st.standalone.modify_s(DN_CONFIG, mod)
# get rid of default ACIs
mod = [(ldap.MOD_DELETE, 'aci', None)]
- topology.standalone.modify_s(SUFFIX, mod)
+ topology_st.standalone.modify_s(SUFFIX, mod)
# Ok Now add the proper ACIs
- ACI_TARGET = "(target = \"ldap:///%s\")" % SUBTREE_GREEN
- ACI_TARGETATTR = "(targetattr = \"objectclass || cn || sn || uid || givenname \")"
- ACI_ALLOW = "(version 3.0; acl \"Allow search-read to green subtree\"; allow (read, search, compare)"
- ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % TEST_USER_DN
- ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT
+ ACI_TARGET = "(target = \"ldap:///%s\")" % SUBTREE_GREEN
+ ACI_TARGETATTR = "(targetattr = \"objectclass || cn || sn || uid || givenname \")"
+ ACI_ALLOW = "(version 3.0; acl \"Allow search-read to green subtree\"; allow (read, search, compare)"
+ ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % TEST_USER_DN
+ ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT
mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
- topology.standalone.modify_s(SUFFIX, mod)
+ topology_st.standalone.modify_s(SUFFIX, mod)
- ACI_ALLOW = "(version 3.0; acl \"Allow use pf proxy auth to green subtree\"; allow (proxy)"
- ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % PROXY_USER_DN
- ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT
+ ACI_ALLOW = "(version 3.0; acl \"Allow use pf proxy auth to green subtree\"; allow (proxy)"
+ ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % PROXY_USER_DN
+ ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT
mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
- topology.standalone.modify_s(SUFFIX, mod)
+ topology_st.standalone.modify_s(SUFFIX, mod)
log.info("Adding %d test entries...")
for id in range(2):
name = "%s%d" % ('test', id)
mail = "%s(a)example.com" % name
for subtree in SUBTREES:
- topology.standalone.add_s(Entry(("cn=%s,%s" % (name, subtree), {
- 'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'sn': name,
- 'cn': name,
- 'uid': name,
- 'givenname': 'test',
- 'mail': mail,
- 'description': 'description',
- 'employeenumber': "%d" % id,
- 'telephonenumber': "%d%d%d" % (id,id,id),
- 'mobile': "%d%d%d" % (id,id,id),
- 'l': 'MV',
- 'title': 'Engineer'})))
-
-
-
-def test_ticket48366_search_user(topology):
-
- proxy_ctrl = ProxyAuthzControl(criticality=True, authzId="dn: "+TEST_USER_DN)
+ topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, subtree), {
+ 'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'sn': name,
+ 'cn': name,
+ 'uid': name,
+ 'givenname': 'test',
+ 'mail': mail,
+ 'description': 'description',
+ 'employeenumber': "%d" % id,
+ 'telephonenumber': "%d%d%d" % (id, id, id),
+ 'mobile': "%d%d%d" % (id, id, id),
+ 'l': 'MV',
+ 'title': 'Engineer'})))
+
+
+def test_ticket48366_search_user(topology_st):
+ proxy_ctrl = ProxyAuthzControl(criticality=True, authzId="dn: " + TEST_USER_DN)
# searching as test user should return one entry from the green subtree
- topology.standalone.simple_bind_s(TEST_USER_DN, PASSWORD)
- ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1')
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, PASSWORD)
+ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1')
assert (len(ents) == 1)
# searching as proxy user should return no entry
- topology.standalone.simple_bind_s(PROXY_USER_DN, PASSWORD)
- ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1')
+ topology_st.standalone.simple_bind_s(PROXY_USER_DN, PASSWORD)
+ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1')
assert (len(ents) == 0)
# serching as proxy user, authorizing as test user should return 1 entry
- ents = topology.standalone.search_ext_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1', serverctrls=[proxy_ctrl])
+ ents = topology_st.standalone.search_ext_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1', serverctrls=[proxy_ctrl])
assert (len(ents) == 1)
-def test_ticket48366_search_dm(topology):
+def test_ticket48366_search_dm(topology_st):
# searching as directory manager should return one entries from both subtrees
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1')
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1')
assert (len(ents) == 2)
# searching as directory manager proxying test user should return one entry
- proxy_ctrl = ProxyAuthzControl(criticality=True, authzId="dn: "+TEST_USER_DN)
- ents = topology.standalone.search_ext_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1', serverctrls=[proxy_ctrl])
+ proxy_ctrl = ProxyAuthzControl(criticality=True, authzId="dn: " + TEST_USER_DN)
+ ents = topology_st.standalone.search_ext_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1', serverctrls=[proxy_ctrl])
assert (len(ents) == 1)
# searching as directory manager proxying proxy user should return no entry
- proxy_ctrl = ProxyAuthzControl(criticality=True, authzId="dn: "+PROXY_USER_DN)
- ents = topology.standalone.search_ext_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1', serverctrls=[proxy_ctrl])
+ proxy_ctrl = ProxyAuthzControl(criticality=True, authzId="dn: " + PROXY_USER_DN)
+ ents = topology_st.standalone.search_ext_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1', serverctrls=[proxy_ctrl])
assert (len(ents) == 0)
diff --git a/dirsrvtests/tests/tickets/ticket48370_test.py b/dirsrvtests/tests/tickets/ticket48370_test.py
index f5b1f47..b8ae106 100644
--- a/dirsrvtests/tests/tickets/ticket48370_test.py
+++ b/dirsrvtests/tests/tickets/ticket48370_test.py
@@ -1,57 +1,13 @@
-import os
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-
-def test_ticket48370(topology):
+def test_ticket48370(topology_st):
"""
Deleting attirbute values and readding a value does not properly update
the pres index. The values are not actually deleted from the index
@@ -62,37 +18,37 @@ def test_ticket48370(topology):
#
# Add an entry
#
- topology.standalone.add_s(Entry((DN, {
- 'objectclass': ['top', 'person',
- 'organizationalPerson',
- 'inetorgperson',
- 'posixAccount'],
- 'givenname': 'test',
- 'sn': 'user',
- 'loginshell': '/bin/bash',
- 'uidNumber': '10099',
- 'gidNumber': '10099',
- 'gecos': 'Test User',
- 'mail': ['user0099(a)dev.null',
- 'alias(a)dev.null',
- 'user0099(a)redhat.com'],
- 'cn': 'Test User',
- 'homeDirectory': '/home/user0099',
- 'uid': 'admin2',
- 'userpassword': 'password'})))
+ topology_st.standalone.add_s(Entry((DN, {
+ 'objectclass': ['top', 'person',
+ 'organizationalPerson',
+ 'inetorgperson',
+ 'posixAccount'],
+ 'givenname': 'test',
+ 'sn': 'user',
+ 'loginshell': '/bin/bash',
+ 'uidNumber': '10099',
+ 'gidNumber': '10099',
+ 'gecos': 'Test User',
+ 'mail': ['user0099(a)dev.null',
+ 'alias(a)dev.null',
+ 'user0099(a)redhat.com'],
+ 'cn': 'Test User',
+ 'homeDirectory': '/home/user0099',
+ 'uid': 'admin2',
+ 'userpassword': 'password'})))
#
# Perform modify (delete & add mail attributes)
#
try:
- topology.standalone.modify_s(DN, [(ldap.MOD_DELETE,
- 'mail',
- 'user0099(a)dev.null'),
- (ldap.MOD_DELETE,
- 'mail',
- 'alias(a)dev.null'),
- (ldap.MOD_ADD,
- 'mail', 'user0099(a)dev.null')])
+ topology_st.standalone.modify_s(DN, [(ldap.MOD_DELETE,
+ 'mail',
+ 'user0099(a)dev.null'),
+ (ldap.MOD_DELETE,
+ 'mail',
+ 'alias(a)dev.null'),
+ (ldap.MOD_ADD,
+ 'mail', 'user0099(a)dev.null')])
except ldap.LDAPError as e:
log.fatal('Failedto modify user: ' + str(e))
assert False
@@ -101,9 +57,9 @@ def test_ticket48370(topology):
# Search using deleted attribute value- no entries should be returned
#
try:
- entry = topology.standalone.search_s(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- 'mail=alias(a)dev.null')
+ entry = topology_st.standalone.search_s(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ 'mail=alias(a)dev.null')
if entry:
log.fatal('Entry incorrectly returned')
assert False
@@ -115,9 +71,9 @@ def test_ticket48370(topology):
# Search using existing attribute value - the entry should be returned
#
try:
- entry = topology.standalone.search_s(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- 'mail=user0099(a)dev.null')
+ entry = topology_st.standalone.search_s(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ 'mail=user0099(a)dev.null')
if entry is None:
log.fatal('Entry not found, but it should have been')
assert False
@@ -129,13 +85,13 @@ def test_ticket48370(topology):
# Delete the last values
#
try:
- topology.standalone.modify_s(DN, [(ldap.MOD_DELETE,
- 'mail',
- 'user0099(a)dev.null'),
- (ldap.MOD_DELETE,
- 'mail',
- 'user0099(a)redhat.com')
- ])
+ topology_st.standalone.modify_s(DN, [(ldap.MOD_DELETE,
+ 'mail',
+ 'user0099(a)dev.null'),
+ (ldap.MOD_DELETE,
+ 'mail',
+ 'user0099(a)redhat.com')
+ ])
except ldap.LDAPError as e:
log.fatal('Failed to modify user: ' + str(e))
assert False
@@ -144,9 +100,9 @@ def test_ticket48370(topology):
# Search using deleted attribute value - no entries should be returned
#
try:
- entry = topology.standalone.search_s(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- 'mail=user0099(a)redhat.com')
+ entry = topology_st.standalone.search_s(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ 'mail=user0099(a)redhat.com')
if entry:
log.fatal('Entry incorrectly returned')
assert False
@@ -159,9 +115,9 @@ def test_ticket48370(topology):
# returned
#
try:
- entry = topology.standalone.search_s(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- 'mail=*')
+ entry = topology_st.standalone.search_s(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ 'mail=*')
if entry:
log.fatal('Entry incorrectly returned')
assert False
@@ -174,10 +130,10 @@ def test_ticket48370(topology):
# a different number of attributes
#
try:
- topology.standalone.modify_s(DN, [(ldap.MOD_ADD,
- 'mail',
- ['user0099(a)dev.null',
- 'alias(a)dev.null'])])
+ topology_st.standalone.modify_s(DN, [(ldap.MOD_ADD,
+ 'mail',
+ ['user0099(a)dev.null',
+ 'alias(a)dev.null'])])
except ldap.LDAPError as e:
log.fatal('Failedto modify user: ' + str(e))
assert False
@@ -186,14 +142,14 @@ def test_ticket48370(topology):
# Remove and readd some attibutes
#
try:
- topology.standalone.modify_s(DN, [(ldap.MOD_DELETE,
- 'mail',
- 'alias(a)dev.null'),
- (ldap.MOD_DELETE,
- 'mail',
- 'user0099(a)dev.null'),
- (ldap.MOD_ADD,
- 'mail', 'user0099(a)dev.null')])
+ topology_st.standalone.modify_s(DN, [(ldap.MOD_DELETE,
+ 'mail',
+ 'alias(a)dev.null'),
+ (ldap.MOD_DELETE,
+ 'mail',
+ 'user0099(a)dev.null'),
+ (ldap.MOD_ADD,
+ 'mail', 'user0099(a)dev.null')])
except ldap.LDAPError as e:
log.fatal('Failedto modify user: ' + str(e))
assert False
@@ -202,9 +158,9 @@ def test_ticket48370(topology):
# Search using deleted attribute value - no entries should be returned
#
try:
- entry = topology.standalone.search_s(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- 'mail=alias(a)dev.null')
+ entry = topology_st.standalone.search_s(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ 'mail=alias(a)dev.null')
if entry:
log.fatal('Entry incorrectly returned')
assert False
@@ -216,9 +172,9 @@ def test_ticket48370(topology):
# Search using existing attribute value - the entry should be returned
#
try:
- entry = topology.standalone.search_s(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- 'mail=user0099(a)dev.null')
+ entry = topology_st.standalone.search_s(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ 'mail=user0099(a)dev.null')
if entry is None:
log.fatal('Entry not found, but it should have been')
assert False
diff --git a/dirsrvtests/tests/tickets/ticket48383_test.py b/dirsrvtests/tests/tickets/ticket48383_test.py
index e49c77f..50f17f0 100644
--- a/dirsrvtests/tests/tickets/ticket48383_test.py
+++ b/dirsrvtests/tests/tickets/ticket48383_test.py
@@ -1,62 +1,16 @@
-import os
-import sys
-import time
-import ldap
-import logging
+import random
+import string
+
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
-
-import string
-import random
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- # This is useful for analysing the test env.
- #standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=[], encrypt=False, \
- # repl_data=True, outputfile='%s/ldif/%s.ldif' % (standalone.dbdir,SERVERID_STANDALONE ))
- #standalone.clearBackupFS()
- #standalone.backupFS()
- standalone.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-
-def test_ticket48383(topology):
+def test_ticket48383(topology_st):
"""
This test case will check that we re-alloc buffer sizes on import.c
@@ -68,8 +22,8 @@ def test_ticket48383(topology):
* If we re-alloc properly, it all works regardless.
"""
- topology.standalone.config.set('nsslapd-maxbersize', '200000000')
- topology.standalone.restart()
+ topology_st.standalone.config.set('nsslapd-maxbersize', '200000000')
+ topology_st.standalone.restart()
# Create some stupid huge objects / attributes in DS.
# seeAlso is indexed by default. Lets do that!
@@ -83,27 +37,27 @@ def test_ticket48383(topology):
padding = ['%s' % n for n in range(400)]
user = Entry((USER_DN, {
- 'objectclass': 'top posixAccount person extensibleObject'.split(),
- 'uid': 'user%s' % (i),
- 'cn': 'user%s' % (i),
- 'uidNumber': '%s' % (i),
- 'gidNumber': '%s' % (i),
- 'homeDirectory': '/home/user%s' % (i),
- 'description': 'user description',
- 'sn' : s ,
- 'padding' : padding ,
- }))
+ 'objectclass': 'top posixAccount person extensibleObject'.split(),
+ 'uid': 'user%s' % (i),
+ 'cn': 'user%s' % (i),
+ 'uidNumber': '%s' % (i),
+ 'gidNumber': '%s' % (i),
+ 'homeDirectory': '/home/user%s' % (i),
+ 'description': 'user description',
+ 'sn': s,
+ 'padding': padding,
+ }))
try:
- topology.standalone.add_s(user)
+ topology_st.standalone.add_s(user)
except ldap.LDAPError as e:
log.fatal('test 48383: Failed to user%s: error %s ' % (i, e.message['desc']))
assert False
# Set the dbsize really low.
try:
- topology.standalone.modify_s(DEFAULT_BENAME, [(ldap.MOD_REPLACE,
- 'nsslapd-cachememsize', '1')])
+ topology_st.standalone.modify_s(DEFAULT_BENAME, [(ldap.MOD_REPLACE,
+ 'nsslapd-cachememsize', '1')])
except ldap.LDAPError as e:
log.fatal('Failed to change nsslapd-cachememsize ' + e.message['desc'])
@@ -113,22 +67,22 @@ def test_ticket48383(topology):
# So an object with a 1MB attribute should break indexing
# stop the server
- topology.standalone.stop(timeout=30)
+ topology_st.standalone.stop(timeout=30)
# Now export and import the DB. It's easier than db2index ...
- topology.standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=[],
- encrypt=False, repl_data=True,
- outputfile='{}/{}.ldif'.format(topology.standalone.ldifdir, SERVERID_STANDALONE))
+ topology_st.standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=[],
+ encrypt=False, repl_data=True,
+ outputfile='{}/{}.ldif'.format(topology_st.standalone.ldifdir, SERVERID_STANDALONE))
- result = topology.standalone.ldif2db(DEFAULT_BENAME, None, None, False,
- '{}/{}.ldif'.format(topology.standalone.ldifdir, SERVERID_STANDALONE))
+ result = topology_st.standalone.ldif2db(DEFAULT_BENAME, None, None, False,
+ '{}/{}.ldif'.format(topology_st.standalone.ldifdir, SERVERID_STANDALONE))
- assert(result)
+ assert (result)
# see if user1 exists at all ....
- result_user = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=user1)')
+ result_user = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=user1)')
- assert(len(result_user) > 0)
+ assert (len(result_user) > 0)
log.info('Test complete')
diff --git a/dirsrvtests/tests/tickets/ticket48497_test.py b/dirsrvtests/tests/tickets/ticket48497_test.py
index d8141d5..1ce49e5 100644
--- a/dirsrvtests/tests/tickets/ticket48497_test.py
+++ b/dirsrvtests/tests/tickets/ticket48497_test.py
@@ -1,21 +1,11 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
NEW_ACCOUNT = "new_account"
MAX_ACCOUNTS = 20
@@ -28,114 +18,76 @@ UIDNUMBER_INDEX = 'cn=uidnumber,cn=index,cn=userRoot,cn=ldbm database,cn=plugins
UIDNUMBER_CN = "uidnumber"
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-
-def test_ticket48497_init(topology):
+def test_ticket48497_init(topology_st):
log.info("Initialization: add dummy entries for the tests")
for cpt in range(MAX_ACCOUNTS):
name = "%s%d" % (NEW_ACCOUNT, cpt)
- topology.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), {
- 'objectclass': "top posixAccount".split(),
- 'uid': name,
- 'cn': name,
- 'uidnumber': str(111),
- 'gidnumber': str(222),
- 'homedirectory': "/home/tb_%d" % cpt})))
+ topology_st.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), {
+ 'objectclass': "top posixAccount".split(),
+ 'uid': name,
+ 'cn': name,
+ 'uidnumber': str(111),
+ 'gidnumber': str(222),
+ 'homedirectory': "/home/tb_%d" % cpt})))
-def test_ticket48497_homeDirectory_mixed_value(topology):
+def test_ticket48497_homeDirectory_mixed_value(topology_st):
# Set a homedirectory value with mixed case
name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX)
mod = [(ldap.MOD_REPLACE, 'homeDirectory', MIXED_VALUE)]
- topology.standalone.modify_s(name, mod)
+ topology_st.standalone.modify_s(name, mod)
-def test_ticket48497_extensible_search(topology):
+def test_ticket48497_extensible_search(topology_st):
name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX)
# check with the exact stored value
log.info("Default: can retrieve an entry filter syntax with exact stored value")
- ent = topology.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % MIXED_VALUE)
+ ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % MIXED_VALUE)
log.info("Default: can retrieve an entry filter caseExactIA5Match with exact stored value")
- ent = topology.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % MIXED_VALUE)
+ ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % MIXED_VALUE)
# check with a lower case value that is different from the stored value
log.info("Default: can not retrieve an entry filter syntax match with lowered stored value")
try:
- ent = topology.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % LOWER_VALUE)
+ ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % LOWER_VALUE)
assert ent is None
except ldap.NO_SUCH_OBJECT:
pass
log.info("Default: can not retrieve an entry filter caseExactIA5Match with lowered stored value")
try:
- ent = topology.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % LOWER_VALUE)
+ ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE,
+ "(homeDirectory:caseExactIA5Match:=%s)" % LOWER_VALUE)
assert ent is None
except ldap.NO_SUCH_OBJECT:
pass
log.info("Default: can retrieve an entry filter caseIgnoreIA5Match with lowered stored value")
- ent = topology.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseIgnoreIA5Match:=%s)" % LOWER_VALUE)
+ ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseIgnoreIA5Match:=%s)" % LOWER_VALUE)
-def test_ticket48497_homeDirectory_index_cfg(topology):
+def test_ticket48497_homeDirectory_index_cfg(topology_st):
log.info("\n\nindex homeDirectory in caseIgnoreIA5Match and caseExactIA5Match")
try:
- ent = topology.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE)
+ ent = topology_st.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE)
except ldap.NO_SUCH_OBJECT:
- topology.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, {
- 'objectclass': "top nsIndex".split(),
- 'cn': HOMEDIRECTORY_CN,
- 'nsSystemIndex': 'false',
- 'nsIndexType': 'eq'})))
-
- IGNORE_MR_NAME='caseIgnoreIA5Match'
- EXACT_MR_NAME='caseExactIA5Match'
+ topology_st.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, {
+ 'objectclass': "top nsIndex".split(),
+ 'cn': HOMEDIRECTORY_CN,
+ 'nsSystemIndex': 'false',
+ 'nsIndexType': 'eq'})))
+
+ IGNORE_MR_NAME = 'caseIgnoreIA5Match'
+ EXACT_MR_NAME = 'caseExactIA5Match'
mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (IGNORE_MR_NAME, EXACT_MR_NAME))]
- topology.standalone.modify_s(HOMEDIRECTORY_INDEX, mod)
+ topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod)
-def test_ticket48497_homeDirectory_index_run(topology):
+def test_ticket48497_homeDirectory_index_run(topology_st):
args = {TASK_WAIT: True}
- topology.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args)
+ topology_st.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args)
log.info("Check indexing succeeded with a specified matching rule")
- file_obj = open(topology.standalone.errlog, "r")
+ file_obj = open(topology_st.standalone.errlog, "r")
# Check if the MR configuration failure occurs
regex = re.compile("unknown or invalid matching rule")
@@ -150,6 +102,7 @@ def test_ticket48497_homeDirectory_index_run(topology):
log.info(line)
assert 0
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/dirsrvtests/tests/tickets/ticket48637_test.py b/dirsrvtests/tests/tickets/ticket48637_test.py
index 2bf0321..1a46698 100644
--- a/dirsrvtests/tests/tickets/ticket48637_test.py
+++ b/dirsrvtests/tests/tickets/ticket48637_test.py
@@ -1,22 +1,16 @@
-import os
-import ldap
-import time
-import logging
import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
-DEBUGGING = False
+DEBUGGING = os.getenv('DEBUGGING', False)
if DEBUGGING:
logging.getLogger(__name__).setLevel(logging.DEBUG)
else:
logging.getLogger(__name__).setLevel(logging.INFO)
-log = logging.getLogger(__name__)
+log = logging.getLogger(__name__)
USER_DN = "uid=test,ou=people,dc=example,dc=com"
GROUP_DN = "cn=group,dc=example,dc=com"
@@ -28,50 +22,7 @@ AUTO_DN = "cn=All Users,cn=Auto Membership Plugin,cn=plugins,cn=config"
MEP_DN = "cn=MEP Definition,cn=Managed Entries,cn=plugins,cn=config"
-class TopologyStandalone(object):
- """The DS Topology Class"""
- def __init__(self, standalone):
- """Init"""
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- """Create DS Deployment"""
-
- # Creating standalone instance ...
- if DEBUGGING:
- standalone = DirSrv(verbose=True)
- else:
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- """If we are debugging just stop the instances, otherwise remove
- them
- """
- if DEBUGGING:
- standalone.stop()
- else:
- standalone.delete()
-
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-
-def test_ticket48637(topology):
+def test_ticket48637(topology_st):
"""Test for entry cache corruption
This requires automember and managed entry plugins to be configured.
@@ -90,7 +41,7 @@ def test_ticket48637(topology):
# Add our setup entries
#
try:
- topology.standalone.add_s(Entry((PEOPLE_OU, {
+ topology_st.standalone.add_s(Entry((PEOPLE_OU, {
'objectclass': 'top organizationalunit'.split(),
'ou': 'people'})))
except ldap.ALREADY_EXISTS:
@@ -100,7 +51,7 @@ def test_ticket48637(topology):
assert False
try:
- topology.standalone.add_s(Entry((GROUP_OU, {
+ topology_st.standalone.add_s(Entry((GROUP_OU, {
'objectclass': 'top organizationalunit'.split(),
'ou': 'groups'})))
except ldap.ALREADY_EXISTS:
@@ -110,7 +61,7 @@ def test_ticket48637(topology):
assert False
try:
- topology.standalone.add_s(Entry((MEP_OU, {
+ topology_st.standalone.add_s(Entry((MEP_OU, {
'objectclass': 'top extensibleObject'.split(),
'ou': 'mep'})))
except ldap.LDAPError as e:
@@ -118,7 +69,7 @@ def test_ticket48637(topology):
assert False
try:
- topology.standalone.add_s(Entry((MEP_TEMPLATE, {
+ topology_st.standalone.add_s(Entry((MEP_TEMPLATE, {
'objectclass': 'top mepTemplateEntry'.split(),
'cn': 'mep template',
'mepRDNAttr': 'cn',
@@ -132,7 +83,7 @@ def test_ticket48637(topology):
# Configure automember
#
try:
- topology.standalone.add_s(Entry((AUTO_DN, {
+ topology_st.standalone.add_s(Entry((AUTO_DN, {
'cn': 'All Users',
'objectclass': ['top', 'autoMemberDefinition'],
'autoMemberScope': 'dc=example,dc=com',
@@ -147,7 +98,7 @@ def test_ticket48637(topology):
# Configure managed entry plugin
#
try:
- topology.standalone.add_s(Entry((MEP_DN, {
+ topology_st.standalone.add_s(Entry((MEP_DN, {
'cn': 'MEP Definition',
'objectclass': ['top', 'extensibleObject'],
'originScope': 'ou=people,dc=example,dc=com',
@@ -161,13 +112,13 @@ def test_ticket48637(topology):
#
# Restart DS
#
- topology.standalone.restart(timeout=30)
+ topology_st.standalone.restart(timeout=30)
#
# Add entry that should fail since the automember group does not exist
#
try:
- topology.standalone.add_s(Entry((USER_DN, {
+ topology_st.standalone.add_s(Entry((USER_DN, {
'uid': 'test',
'objectclass': ['top', 'person', 'extensibleObject'],
'sn': 'test',
@@ -179,8 +130,8 @@ def test_ticket48637(topology):
# Search for the entry - it should not be returned
#
try:
- entry = topology.standalone.search_s(USER_DN, ldap.SCOPE_SUBTREE,
- 'objectclass=*')
+ entry = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_SUBTREE,
+ 'objectclass=*')
if entry:
log.fatal('Entry was incorrectly returned')
assert False
diff --git a/dirsrvtests/tests/tickets/ticket48665_test.py b/dirsrvtests/tests/tickets/ticket48665_test.py
index f50ab35..818fad2 100644
--- a/dirsrvtests/tests/tickets/ticket48665_test.py
+++ b/dirsrvtests/tests/tickets/ticket48665_test.py
@@ -1,59 +1,12 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- # This is useful for analysing the test env.
- #standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=[], encrypt=False, \
- # repl_data=True, outputfile='%s/ldif/%s.ldif' % (standalone.dbdir,SERVERID_STANDALONE ))
- #standalone.clearBackupFS()
- #standalone.backupFS()
- standalone.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-
-def test_ticket48665(topology):
+def test_ticket48665(topology_st):
"""
This tests deletion of certain cn=config values.
@@ -63,38 +16,38 @@ def test_ticket48665(topology):
We should also still be able to mod replace the values and keep the server alive.
"""
- #topology.standalone.config.enable_log('audit')
- #topology.standalone.config.enable_log('auditfail')
+ # topology_st.standalone.config.enable_log('audit')
+ # topology_st.standalone.config.enable_log('auditfail')
# This will trigger a mod delete then add.
try:
modlist = [(ldap.MOD_DELETE, 'nsslapd-cachememsize', None), (ldap.MOD_ADD, 'nsslapd-cachememsize', '1')]
- topology.standalone.modify_s("cn=%s,cn=ldbm database,cn=plugins,cn=config" % DEFAULT_BENAME,
- modlist)
+ topology_st.standalone.modify_s("cn=%s,cn=ldbm database,cn=plugins,cn=config" % DEFAULT_BENAME,
+ modlist)
except:
pass
# Check the server has not commited seppuku.
- result = topology.standalone.whoami_s()
- assert(DN_DM.lower() in result.lower())
+ result = topology_st.standalone.whoami_s()
+ assert (DN_DM.lower() in result.lower())
# This has a magic hack to determine if we are in cn=config.
try:
- topology.standalone.modify_s(DEFAULT_BENAME, [(ldap.MOD_REPLACE,
- 'nsslapd-cachememsize', '1')])
+ topology_st.standalone.modify_s(DEFAULT_BENAME, [(ldap.MOD_REPLACE,
+ 'nsslapd-cachememsize', '1')])
except ldap.LDAPError as e:
log.fatal('Failed to change nsslapd-cachememsize ' + e.message['desc'])
# Check the server has not commited seppuku.
- result = topology.standalone.whoami_s()
- assert(DN_DM.lower() in result.lower())
+ result = topology_st.standalone.whoami_s()
+ assert (DN_DM.lower() in result.lower())
# Now try with mod_replace. This should be okay.
modlist = [(ldap.MOD_REPLACE, 'nsslapd-cachememsize', '1')]
- topology.standalone.modify_s("cn=%s,cn=ldbm database,cn=plugins,cn=config" % DEFAULT_BENAME,
- modlist)
+ topology_st.standalone.modify_s("cn=%s,cn=ldbm database,cn=plugins,cn=config" % DEFAULT_BENAME,
+ modlist)
# Check the server has not commited seppuku.
- result = topology.standalone.whoami_s()
- assert(DN_DM.lower() in result.lower())
+ result = topology_st.standalone.whoami_s()
+ assert (DN_DM.lower() in result.lower())
log.info('Test complete')
diff --git a/dirsrvtests/tests/tickets/ticket48745_test.py b/dirsrvtests/tests/tickets/ticket48745_test.py
index 6a0c7f0..45c3a4f 100644
--- a/dirsrvtests/tests/tickets/ticket48745_test.py
+++ b/dirsrvtests/tests/tickets/ticket48745_test.py
@@ -1,112 +1,63 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
+NEW_ACCOUNT = "new_account"
+MAX_ACCOUNTS = 20
-NEW_ACCOUNT = "new_account"
-MAX_ACCOUNTS = 20
-
-MIXED_VALUE="/home/mYhOmEdIrEcToRy"
-LOWER_VALUE="/home/myhomedirectory"
+MIXED_VALUE = "/home/mYhOmEdIrEcToRy"
+LOWER_VALUE = "/home/myhomedirectory"
HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
-HOMEDIRECTORY_CN="homedirectory"
+HOMEDIRECTORY_CN = "homedirectory"
MATCHINGRULE = 'nsMatchingRule'
UIDNUMBER_INDEX = 'cn=uidnumber,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
-UIDNUMBER_CN="uidnumber"
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
+UIDNUMBER_CN = "uidnumber"
-def test_ticket48745_init(topology):
+def test_ticket48745_init(topology_st):
log.info("Initialization: add dummy entries for the tests")
for cpt in range(MAX_ACCOUNTS):
name = "%s%d" % (NEW_ACCOUNT, cpt)
- topology.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), {
- 'objectclass': "top posixAccount".split(),
- 'uid': name,
- 'cn': name,
- 'uidnumber': str(111),
- 'gidnumber': str(222),
- 'homedirectory': "/home/tbordaz_%d" % cpt})))
+ topology_st.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), {
+ 'objectclass': "top posixAccount".split(),
+ 'uid': name,
+ 'cn': name,
+ 'uidnumber': str(111),
+ 'gidnumber': str(222),
+ 'homedirectory': "/home/tbordaz_%d" % cpt})))
-def test_ticket48745_homeDirectory_indexed_cis(topology):
+def test_ticket48745_homeDirectory_indexed_cis(topology_st):
log.info("\n\nindex homeDirectory in caseIgnoreIA5Match and caseExactIA5Match")
try:
- ent = topology.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE)
+ ent = topology_st.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE)
except ldap.NO_SUCH_OBJECT:
- topology.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, {
- 'objectclass': "top nsIndex".split(),
- 'cn': HOMEDIRECTORY_CN,
- 'nsSystemIndex': 'false',
- 'nsIndexType': 'eq'})))
- #log.info("attach debugger")
- #time.sleep(60)
-
- IGNORE_MR_NAME='caseIgnoreIA5Match'
- EXACT_MR_NAME='caseExactIA5Match'
+ topology_st.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, {
+ 'objectclass': "top nsIndex".split(),
+ 'cn': HOMEDIRECTORY_CN,
+ 'nsSystemIndex': 'false',
+ 'nsIndexType': 'eq'})))
+ # log.info("attach debugger")
+ # time.sleep(60)
+
+ IGNORE_MR_NAME = 'caseIgnoreIA5Match'
+ EXACT_MR_NAME = 'caseExactIA5Match'
mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (IGNORE_MR_NAME, EXACT_MR_NAME))]
- topology.standalone.modify_s(HOMEDIRECTORY_INDEX, mod)
+ topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod)
- #topology.standalone.stop(timeout=10)
+ # topology_st.standalone.stop(timeout=10)
log.info("successfully checked that filter with exact mr , a filter with lowercase eq is failing")
- #assert topology.standalone.db2index(bename=DEFAULT_BENAME, suffixes=None, attrs=['homeDirectory'])
- #topology.standalone.start(timeout=10)
+ # assert topology_st.standalone.db2index(bename=DEFAULT_BENAME, suffixes=None, attrs=['homeDirectory'])
+ # topology_st.standalone.start(timeout=10)
args = {TASK_WAIT: True}
- topology.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args)
+ topology_st.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args)
log.info("Check indexing succeeded with a specified matching rule")
- file_obj = open(topology.standalone.errlog, "r")
+ file_obj = open(topology_st.standalone.errlog, "r")
# Check if the MR configuration failure occurs
regex = re.compile("unknown or invalid matching rule")
@@ -122,45 +73,48 @@ def test_ticket48745_homeDirectory_indexed_cis(topology):
assert 0
-def test_ticket48745_homeDirectory_mixed_value(topology):
+def test_ticket48745_homeDirectory_mixed_value(topology_st):
# Set a homedirectory value with mixed case
name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX)
mod = [(ldap.MOD_REPLACE, 'homeDirectory', MIXED_VALUE)]
- topology.standalone.modify_s(name, mod)
+ topology_st.standalone.modify_s(name, mod)
-def test_ticket48745_extensible_search_after_index(topology):
+def test_ticket48745_extensible_search_after_index(topology_st):
name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX)
# check with the exact stored value
log.info("Default: can retrieve an entry filter syntax with exact stored value")
- ent = topology.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE, "(homeDirectory=%s)" % MIXED_VALUE)
-# log.info("attach debugger")
-# time.sleep(60)
+ ent = topology_st.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE, "(homeDirectory=%s)" % MIXED_VALUE)
+ # log.info("attach debugger")
+ # time.sleep(60)
# This search will fail because a
# subtree search with caseExactIA5Match will find a key
# where the value has been lowercase
log.info("Default: can retrieve an entry filter caseExactIA5Match with exact stored value")
- ent = topology.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE, "(homeDirectory:caseExactIA5Match:=%s)" % MIXED_VALUE)
+ ent = topology_st.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE,
+ "(homeDirectory:caseExactIA5Match:=%s)" % MIXED_VALUE)
assert ent
# But do additional searches.. just for more tests
# check with a lower case value that is different from the stored value
log.info("Default: can not retrieve an entry filter syntax match with lowered stored value")
try:
- ent = topology.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE, "(homeDirectory=%s)" % LOWER_VALUE)
+ ent = topology_st.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE, "(homeDirectory=%s)" % LOWER_VALUE)
assert ent is None
except ldap.NO_SUCH_OBJECT:
pass
log.info("Default: can not retrieve an entry filter caseExactIA5Match with lowered stored value")
try:
- ent = topology.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE, "(homeDirectory:caseExactIA5Match:=%s)" % LOWER_VALUE)
+ ent = topology_st.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE,
+ "(homeDirectory:caseExactIA5Match:=%s)" % LOWER_VALUE)
assert ent is None
except ldap.NO_SUCH_OBJECT:
pass
log.info("Default: can retrieve an entry filter caseIgnoreIA5Match with lowered stored value")
- ent = topology.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE, "(homeDirectory:caseIgnoreIA5Match:=%s)" % LOWER_VALUE)
+ ent = topology_st.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE,
+ "(homeDirectory:caseIgnoreIA5Match:=%s)" % LOWER_VALUE)
if __name__ == '__main__':
diff --git a/dirsrvtests/tests/tickets/ticket48746_test.py b/dirsrvtests/tests/tickets/ticket48746_test.py
index 0a13998..7b05fc7 100644
--- a/dirsrvtests/tests/tickets/ticket48746_test.py
+++ b/dirsrvtests/tests/tickets/ticket48746_test.py
@@ -1,110 +1,63 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
+NEW_ACCOUNT = "new_account"
+MAX_ACCOUNTS = 20
-NEW_ACCOUNT = "new_account"
-MAX_ACCOUNTS = 20
-
-MIXED_VALUE="/home/mYhOmEdIrEcToRy"
-LOWER_VALUE="/home/myhomedirectory"
+MIXED_VALUE = "/home/mYhOmEdIrEcToRy"
+LOWER_VALUE = "/home/myhomedirectory"
HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
-HOMEDIRECTORY_CN="homedirectory"
+HOMEDIRECTORY_CN = "homedirectory"
MATCHINGRULE = 'nsMatchingRule'
UIDNUMBER_INDEX = 'cn=uidnumber,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
-UIDNUMBER_CN="uidnumber"
-
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-def test_ticket48746_init(topology):
+UIDNUMBER_CN = "uidnumber"
+
+
+def test_ticket48746_init(topology_st):
log.info("Initialization: add dummy entries for the tests")
for cpt in range(MAX_ACCOUNTS):
name = "%s%d" % (NEW_ACCOUNT, cpt)
- topology.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), {
- 'objectclass': "top posixAccount".split(),
- 'uid': name,
- 'cn': name,
- 'uidnumber': str(111),
- 'gidnumber': str(222),
- 'homedirectory': "/home/tbordaz_%d" % cpt})))
-
-def test_ticket48746_homeDirectory_indexed_cis(topology):
+ topology_st.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), {
+ 'objectclass': "top posixAccount".split(),
+ 'uid': name,
+ 'cn': name,
+ 'uidnumber': str(111),
+ 'gidnumber': str(222),
+ 'homedirectory': "/home/tbordaz_%d" % cpt})))
+
+
+def test_ticket48746_homeDirectory_indexed_cis(topology_st):
log.info("\n\nindex homeDirectory in caseIgnoreIA5Match and caseExactIA5Match")
try:
- ent = topology.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE)
+ ent = topology_st.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE)
except ldap.NO_SUCH_OBJECT:
- topology.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, {
- 'objectclass': "top nsIndex".split(),
- 'cn': HOMEDIRECTORY_CN,
- 'nsSystemIndex': 'false',
- 'nsIndexType': 'eq'})))
- #log.info("attach debugger")
- #time.sleep(60)
-
- IGNORE_MR_NAME='caseIgnoreIA5Match'
- EXACT_MR_NAME='caseExactIA5Match'
+ topology_st.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, {
+ 'objectclass': "top nsIndex".split(),
+ 'cn': HOMEDIRECTORY_CN,
+ 'nsSystemIndex': 'false',
+ 'nsIndexType': 'eq'})))
+ # log.info("attach debugger")
+ # time.sleep(60)
+
+ IGNORE_MR_NAME = 'caseIgnoreIA5Match'
+ EXACT_MR_NAME = 'caseExactIA5Match'
mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (IGNORE_MR_NAME, EXACT_MR_NAME))]
- topology.standalone.modify_s(HOMEDIRECTORY_INDEX, mod)
+ topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod)
- #topology.standalone.stop(timeout=10)
+ # topology_st.standalone.stop(timeout=10)
log.info("successfully checked that filter with exact mr , a filter with lowercase eq is failing")
- #assert topology.standalone.db2index(bename=DEFAULT_BENAME, suffixes=None, attrs=['homeDirectory'])
- #topology.standalone.start(timeout=10)
+ # assert topology_st.standalone.db2index(bename=DEFAULT_BENAME, suffixes=None, attrs=['homeDirectory'])
+ # topology_st.standalone.start(timeout=10)
args = {TASK_WAIT: True}
- topology.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args)
+ topology_st.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args)
log.info("Check indexing succeeded with a specified matching rule")
- file_obj = open(topology.standalone.errlog, "r")
+ file_obj = open(topology_st.standalone.errlog, "r")
# Check if the MR configuration failure occurs
regex = re.compile("unknown or invalid matching rule")
@@ -119,55 +72,56 @@ def test_ticket48746_homeDirectory_indexed_cis(topology):
log.info(line)
assert not found
-def test_ticket48746_homeDirectory_mixed_value(topology):
+
+def test_ticket48746_homeDirectory_mixed_value(topology_st):
# Set a homedirectory value with mixed case
name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX)
mod = [(ldap.MOD_REPLACE, 'homeDirectory', MIXED_VALUE)]
- topology.standalone.modify_s(name, mod)
+ topology_st.standalone.modify_s(name, mod)
-def test_ticket48746_extensible_search_after_index(topology):
+
+def test_ticket48746_extensible_search_after_index(topology_st):
name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX)
# check with the exact stored value
-# log.info("Default: can retrieve an entry filter syntax with exact stored value")
-# ent = topology.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % MIXED_VALUE)
-# log.info("attach debugger")
-# time.sleep(60)
+ # log.info("Default: can retrieve an entry filter syntax with exact stored value")
+ # ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % MIXED_VALUE)
+ # log.info("attach debugger")
+ # time.sleep(60)
# This search is enought to trigger the crash
# because it loads a registered filter MR plugin that has no indexer create function
# following index will trigger the crash
log.info("Default: can retrieve an entry filter caseExactIA5Match with exact stored value")
- ent = topology.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % MIXED_VALUE)
-
+ ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % MIXED_VALUE)
-def test_ticket48746_homeDirectory_indexed_ces(topology):
+def test_ticket48746_homeDirectory_indexed_ces(topology_st):
log.info("\n\nindex homeDirectory in caseExactIA5Match, this would trigger the crash")
try:
- ent = topology.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE)
+ ent = topology_st.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE)
except ldap.NO_SUCH_OBJECT:
- topology.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, {
- 'objectclass': "top nsIndex".split(),
- 'cn': HOMEDIRECTORY_CN,
- 'nsSystemIndex': 'false',
- 'nsIndexType': 'eq'})))
-# log.info("attach debugger")
-# time.sleep(60)
-
- EXACT_MR_NAME='caseExactIA5Match'
+ topology_st.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, {
+ 'objectclass': "top nsIndex".split(),
+ 'cn': HOMEDIRECTORY_CN,
+ 'nsSystemIndex': 'false',
+ 'nsIndexType': 'eq'})))
+ # log.info("attach debugger")
+ # time.sleep(60)
+
+ EXACT_MR_NAME = 'caseExactIA5Match'
mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (EXACT_MR_NAME))]
- topology.standalone.modify_s(HOMEDIRECTORY_INDEX, mod)
+ topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod)
- #topology.standalone.stop(timeout=10)
+ # topology_st.standalone.stop(timeout=10)
log.info("successfully checked that filter with exact mr , a filter with lowercase eq is failing")
- #assert topology.standalone.db2index(bename=DEFAULT_BENAME, suffixes=None, attrs=['homeDirectory'])
- #topology.standalone.start(timeout=10)
+ # assert topology_st.standalone.db2index(bename=DEFAULT_BENAME, suffixes=None, attrs=['homeDirectory'])
+ # topology_st.standalone.start(timeout=10)
args = {TASK_WAIT: True}
- topology.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args)
+ topology_st.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args)
log.info("Check indexing succeeded with a specified matching rule")
- file_obj = open(topology.standalone.errlog, "r")
+ file_obj = open(topology_st.standalone.errlog, "r")
# Check if the MR configuration failure occurs
regex = re.compile("unknown or invalid matching rule")
diff --git a/dirsrvtests/tests/tickets/ticket48755_test.py b/dirsrvtests/tests/tickets/ticket48755_test.py
index bcf8d1c..9f2e2ac 100644
--- a/dirsrvtests/tests/tickets/ticket48755_test.py
+++ b/dirsrvtests/tests/tickets/ticket48755_test.py
@@ -6,129 +6,14 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import shlex
-import subprocess
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_m2
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
-m1_m2_agmt = None
-
-
-class TopologyReplication(object):
- def __init__(self, master1, master2):
- master1.open()
- self.master1 = master1
- master2.open()
- self.master2 = master2
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating master 1...
- master1 = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master1.allocate(args_master)
- instance_master1 = master1.exists()
- if instance_master1:
- master1.delete()
- master1.create()
- master1.open()
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-
- # Creating master 2...
- master2 = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master2.allocate(args_master)
- instance_master2 = master2.exists()
- if instance_master2:
- master2.delete()
- master2.create()
- master2.open()
- master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
- #
- # Create all the agreements
- #
- # Creating agreement from master 1 to master 2
- properties = {RA_NAME: r'meTo_%s:%s' % (master2.host, master2.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- global m1_m2_agmt
- m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
- if not m1_m2_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m1_m2_agmt)
-
- # Creating agreement from master 2 to master 1
- properties = {RA_NAME: r'meTo_%s:%s' % (master1.host, master1.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
- if not m2_m1_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m2_m1_agmt)
-
- # Allow the replicas to get situated with the new agreements...
- time.sleep(5)
-
- #
- # Initialize all the agreements
- #
- master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- master1.waitForReplInit(m1_m2_agmt)
-
- # Check replication is working...
- if master1.testReplication(DEFAULT_SUFFIX, master2):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- # Delete each instance in the end
- def fin():
- master1.delete()
- master2.delete()
- request.addfinalizer(fin)
-
- return TopologyReplication(master1, master2)
-
@pytest.fixture(scope="module")
def add_ou_entry(server, idx, myparent):
@@ -180,8 +65,8 @@ def add_ldapsubentry(server, myparent):
tmplentry = 'cn=%s,%s' % (name, myparent)
tmpldn = 'cn="%s",%s' % (tmplentry, container)
server.add_s(Entry((tmpldn, {'objectclass': ['top', 'ldapsubentry', 'costemplate', 'extensibleObject'],
- 'cosPriority': '1',
- 'cn': '%s' % tmplentry})))
+ 'cosPriority': '1',
+ 'cn': '%s' % tmplentry})))
name = 'nsPwPolicy_CoS'
cos = 'cn=%s,%s' % (name, myparent)
@@ -192,11 +77,11 @@ def add_ldapsubentry(server, myparent):
time.sleep(1)
-def test_ticket48755(topology):
+def test_ticket48755(topology_m2):
log.info("Ticket 48755 - moving an entry could make the online init fail")
- M1 = topology.master1
- M2 = topology.master2
+ M1 = topology_m2.ms["master1"]
+ M2 = topology_m2.ms["master2"]
log.info("Generating DIT_0")
idx = 0
@@ -248,7 +133,7 @@ def test_ticket48755(topology):
log.info('%s => %s => %s => %s => 10 USERS' % (DEFAULT_SUFFIX, parent1, parent01, parent001))
log.info("Run Consumer Initialization.")
- global m1_m2_agmt
+ m1_m2_agmt = topology_m2.ms["master1_agmts"]["m1_m2"]
M1.startReplication_async(m1_m2_agmt)
M1.waitForReplInit(m1_m2_agmt)
time.sleep(2)
diff --git a/dirsrvtests/tests/tickets/ticket48759_test.py b/dirsrvtests/tests/tickets/ticket48759_test.py
index d007728..55f21d6 100644
--- a/dirsrvtests/tests/tickets/ticket48759_test.py
+++ b/dirsrvtests/tests/tickets/ticket48759_test.py
@@ -6,17 +6,11 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
import logging
+
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
@@ -25,52 +19,12 @@ GROUP_DN = ("cn=group," + DEFAULT_SUFFIX)
MEMBER_DN_COMP = "uid=member"
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-def _add_group_with_members(topology):
+def _add_group_with_members(topology_st):
# Create group
try:
- topology.standalone.add_s(Entry((GROUP_DN,
- {'objectclass': 'top groupofnames'.split(),
- 'cn': 'group'})))
+ topology_st.standalone.add_s(Entry((GROUP_DN,
+ {'objectclass': 'top groupofnames'.split(),
+ 'cn': 'group'})))
except ldap.LDAPError as e:
log.fatal('Failed to add group: error ' + e.message['desc'])
assert False
@@ -80,36 +34,39 @@ def _add_group_with_members(topology):
for idx in range(1, 5):
try:
MEMBER_VAL = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX))
- topology.standalone.modify_s(GROUP_DN,
- [(ldap.MOD_ADD,
- 'member',
- MEMBER_VAL)])
+ topology_st.standalone.modify_s(GROUP_DN,
+ [(ldap.MOD_ADD,
+ 'member',
+ MEMBER_VAL)])
except ldap.LDAPError as e:
log.fatal('Failed to update group: member (%s) - error: %s' %
(MEMBER_VAL, e.message['desc']))
assert False
-def _find_retrocl_changes(topology, user_dn=None):
- ents = topology.standalone.search_s('cn=changelog', ldap.SCOPE_SUBTREE, '(targetDn=%s)' %user_dn)
+
+def _find_retrocl_changes(topology_st, user_dn=None):
+ ents = topology_st.standalone.search_s('cn=changelog', ldap.SCOPE_SUBTREE, '(targetDn=%s)' % user_dn)
return len(ents)
-def _find_memberof(topology, user_dn=None, group_dn=None, find_result=True):
- ent = topology.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof'])
+
+def _find_memberof(topology_st, user_dn=None, group_dn=None, find_result=True):
+ ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof'])
found = False
if ent.hasAttr('memberof'):
for val in ent.getValues('memberof'):
- topology.standalone.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val))
+ topology_st.standalone.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val))
if val == group_dn:
found = True
break
if find_result:
- assert(found)
+ assert (found)
else:
- assert(not found)
+ assert (not found)
+
-def test_ticket48759(topology):
+def test_ticket48759(topology_st):
"""
The fix for ticket 48759 has to prevent plugin calls for tombstone purging
@@ -144,35 +101,34 @@ def test_ticket48759(topology):
# Setup Replication
#
log.info('Setting up replication...')
- topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER,
- replicaId=REPLICAID_MASTER_1)
+ topology_st.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER,
+ replicaId=REPLICAID_MASTER_1)
#
# enable dynamic plugins, memberof and retro cl plugin
#
log.info('Enable plugins...')
try:
- topology.standalone.modify_s(DN_CONFIG,
- [(ldap.MOD_REPLACE,
- 'nsslapd-dynamic-plugins',
- 'on')])
+ topology_st.standalone.modify_s(DN_CONFIG,
+ [(ldap.MOD_REPLACE,
+ 'nsslapd-dynamic-plugins',
+ 'on')])
except ldap.LDAPError as e:
ldap.error('Failed to enable dynamic plugins! ' + e.message['desc'])
assert False
- topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
- topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
+ topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
# Configure memberOf group attribute
try:
- topology.standalone.modify_s(MEMBEROF_PLUGIN_DN,
- [(ldap.MOD_REPLACE,
- 'memberofgroupattr',
- 'member')])
+ topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN,
+ [(ldap.MOD_REPLACE,
+ 'memberofgroupattr',
+ 'member')])
except ldap.LDAPError as e:
log.fatal('Failed to configure memberOf plugin: error ' + e.message['desc'])
assert False
-
#
# create some users and a group
#
@@ -180,47 +136,47 @@ def test_ticket48759(topology):
for idx in range(1, 5):
try:
USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX))
- topology.standalone.add_s(Entry((USER_DN,
- {'objectclass': 'top extensibleObject'.split(),
- 'uid': 'member%d' % (idx)})))
+ topology_st.standalone.add_s(Entry((USER_DN,
+ {'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'member%d' % (idx)})))
except ldap.LDAPError as e:
log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.message['desc']))
assert False
- _add_group_with_members(topology)
+ _add_group_with_members(topology_st)
MEMBER_VAL = ("uid=member2,%s" % DEFAULT_SUFFIX)
time.sleep(1)
- _find_memberof(topology, MEMBER_VAL, GROUP_DN, True)
+ _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, True)
# delete group
log.info('delete group...')
try:
- topology.standalone.delete_s(GROUP_DN)
+ topology_st.standalone.delete_s(GROUP_DN)
except ldap.LDAPError as e:
log.error('Failed to delete entry: ' + e.message['desc'])
assert False
time.sleep(1)
- _find_memberof(topology, MEMBER_VAL, GROUP_DN, False)
+ _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, False)
# add group again
log.info('add group again')
- _add_group_with_members(topology)
+ _add_group_with_members(topology_st)
time.sleep(1)
- _find_memberof(topology, MEMBER_VAL, GROUP_DN, True)
+ _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, True)
#
# get number of changelog records for one user entry
log.info('get number of changes for %s before tombstone purging' % MEMBER_VAL)
- changes_pre = _find_retrocl_changes(topology, MEMBER_VAL)
+ changes_pre = _find_retrocl_changes(topology_st, MEMBER_VAL)
# configure tombstone purging
args = {REPLICA_PRECISE_PURGING: 'on',
REPLICA_PURGE_DELAY: '5',
REPLICA_PURGE_INTERVAL: '5'}
try:
- topology.standalone.replica.setProperties(DEFAULT_SUFFIX, None, None, args)
+ topology_st.standalone.replica.setProperties(DEFAULT_SUFFIX, None, None, args)
except:
log.fatal('Failed to configure replica')
assert False
@@ -232,17 +188,17 @@ def test_ticket48759(topology):
# Add an entry to trigger replication
log.info('add dummy entry')
try:
- topology.standalone.add_s(Entry(('cn=test_entry,dc=example,dc=com', {
- 'objectclass': 'top person'.split(),
- 'sn': 'user',
- 'cn': 'entry1'})))
+ topology_st.standalone.add_s(Entry(('cn=test_entry,dc=example,dc=com', {
+ 'objectclass': 'top person'.split(),
+ 'sn': 'user',
+ 'cn': 'entry1'})))
except ldap.LDAPError as e:
log.error('Failed to add entry: ' + e.message['desc'])
assert False
# check memberof is still correct
time.sleep(1)
- _find_memberof(topology, MEMBER_VAL, GROUP_DN, True)
+ _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, True)
# Wait for the interval to pass again
log.info('Wait for tombstone purge interval to pass again...')
@@ -251,7 +207,7 @@ def test_ticket48759(topology):
#
# get number of changelog records for one user entry
log.info('get number of changes for %s before tombstone purging' % MEMBER_VAL)
- changes_post = _find_retrocl_changes(topology, MEMBER_VAL)
+ changes_post = _find_retrocl_changes(topology_st, MEMBER_VAL)
assert (changes_pre == changes_post)
diff --git a/dirsrvtests/tests/tickets/ticket48784_test.py b/dirsrvtests/tests/tickets/ticket48784_test.py
index bc69308..0a739f4 100644
--- a/dirsrvtests/tests/tickets/ticket48784_test.py
+++ b/dirsrvtests/tests/tickets/ticket48784_test.py
@@ -6,27 +6,14 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import shlex
-import subprocess
-import ldap
-import logging
import pytest
-import base64
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_m2
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
CONFIG_DN = 'cn=config'
ENCRYPTION_DN = 'cn=encryption,%s' % CONFIG_DN
RSA = 'RSA'
@@ -37,113 +24,8 @@ M1SERVERCERT = 'Server-Cert1'
M2SERVERCERT = 'Server-Cert2'
M1LDAPSPORT = '41636'
M2LDAPSPORT = '42636'
-
-
-class TopologyReplication(object):
- def __init__(self, master1, master2):
- master1.open()
- self.master1 = master1
- master2.open()
- self.master2 = master2
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating master 1...
- master1 = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master1.allocate(args_master)
- instance_master1 = master1.exists()
- if instance_master1:
- master1.delete()
- master1.create()
- master1.open()
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-
- # Creating master 2...
- master2 = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master2.allocate(args_master)
- instance_master2 = master2.exists()
- if instance_master2:
- master2.delete()
- master2.create()
- master2.open()
- master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
- #
- # Create all the agreements
- #
- # Creating agreement from master 1 to master 2
- properties = {RA_NAME: r'meTo_%s:%s' % (master2.host, master2.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- global m1_m2_agmt
- m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
- if not m1_m2_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m1_m2_agmt)
-
- # Creating agreement from master 2 to master 1
- properties = {RA_NAME: r'meTo_%s:%s' % (master1.host, master1.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- global m2_m1_agmt
- m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
- if not m2_m1_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m2_m1_agmt)
-
- # Allow the replicas to get situated with the new agreements...
- time.sleep(2)
-
- global M1SUBJECT
- M1SUBJECT = 'CN=%s,OU=389 Directory Server' % (master1.host)
- global M2SUBJECT
- M2SUBJECT = 'CN=%s,OU=390 Directory Server' % (master2.host)
-
- #
- # Initialize all the agreements
- #
- master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- master1.waitForReplInit(m1_m2_agmt)
-
- # Check replication is working...
- if master1.testReplication(DEFAULT_SUFFIX, master2):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- # Delete each instance in the end
- def fin():
- master1.delete()
- master2.delete()
- request.addfinalizer(fin)
-
- return TopologyReplication(master1, master2)
+M1SUBJECT = 'CN={},OU=389 Directory Server'.format(HOST_MASTER_1)
+M2SUBJECT = 'CN={},OU=390 Directory Server'.format(HOST_MASTER_2)
@pytest.fixture(scope="module")
@@ -211,16 +93,16 @@ def doAndPrintIt(cmdline, filename):
time.sleep(1)
-def create_keys_certs(topology):
+def create_keys_certs(topology_m2):
log.info("\n######################### Creating SSL Keys and Certs ######################\n")
global m1confdir
- m1confdir = topology.master1.confdir
+ m1confdir = topology_m2.ms["master1"].confdir
global m2confdir
- m2confdir = topology.master2.confdir
+ m2confdir = topology_m2.ms["master2"].confdir
log.info("##### shutdown master1")
- topology.master1.stop(timeout=10)
+ topology_m2.ms["master1"].stop(timeout=10)
log.info("##### Creating a password file")
pwdfile = '%s/pwdfile.txt' % (m1confdir)
@@ -255,43 +137,47 @@ def create_keys_certs(topology):
log.info("##### Create key3.db and cert8.db database (master1): %s" % cmdline)
doAndPrintIt(cmdline, None)
- cmdline = ['certutil', '-G', '-d', m1confdir, '-z', noisefile, '-f', pwdfile]
+ cmdline = ['certutil', '-G', '-d', m1confdir, '-z', noisefile, '-f', pwdfile]
log.info("##### Creating encryption key for CA (master1): %s" % cmdline)
- #os.system('certutil -G -d %s -z %s -f %s' % (m1confdir, noisefile, pwdfile))
+ # os.system('certutil -G -d %s -z %s -f %s' % (m1confdir, noisefile, pwdfile))
doAndPrintIt(cmdline, None)
time.sleep(2)
log.info("##### Creating self-signed CA certificate (master1) -- nickname %s" % CACERT)
- os.system('( echo y ; echo ; echo y ) | certutil -S -n "%s" -s "%s" -x -t "CT,," -m 1000 -v 120 -d %s -z %s -f %s -2' % (CACERT, ISSUER, m1confdir, noisefile, pwdfile))
+ os.system(
+ '( echo y ; echo ; echo y ) | certutil -S -n "%s" -s "%s" -x -t "CT,," -m 1000 -v 120 -d %s -z %s -f %s -2' % (
+ CACERT, ISSUER, m1confdir, noisefile, pwdfile))
global M1SUBJECT
- cmdline = ['certutil', '-S', '-n', M1SERVERCERT, '-s', M1SUBJECT, '-c', CACERT, '-t', ',,', '-m', '1001', '-v', '120', '-d', m1confdir, '-z', noisefile, '-f', pwdfile]
+ cmdline = ['certutil', '-S', '-n', M1SERVERCERT, '-s', M1SUBJECT, '-c', CACERT, '-t', ',,', '-m', '1001', '-v',
+ '120', '-d', m1confdir, '-z', noisefile, '-f', pwdfile]
log.info("##### Creating Server certificate -- nickname %s: %s" % (M1SERVERCERT, cmdline))
doAndPrintIt(cmdline, None)
time.sleep(2)
global M2SUBJECT
- cmdline = ['certutil', '-S', '-n', M2SERVERCERT, '-s', M2SUBJECT, '-c', CACERT, '-t', ',,', '-m', '1002', '-v', '120', '-d', m1confdir, '-z', noisefile, '-f', pwdfile]
+ cmdline = ['certutil', '-S', '-n', M2SERVERCERT, '-s', M2SUBJECT, '-c', CACERT, '-t', ',,', '-m', '1002', '-v',
+ '120', '-d', m1confdir, '-z', noisefile, '-f', pwdfile]
log.info("##### Creating Server certificate -- nickname %s: %s" % (M2SERVERCERT, cmdline))
doAndPrintIt(cmdline, None)
time.sleep(2)
log.info("##### start master1")
- topology.master1.start(timeout=10)
+ topology_m2.ms["master1"].start(timeout=10)
log.info("##### enable SSL in master1 with all ciphers")
- enable_ssl(topology.master1, M1LDAPSPORT, M1SERVERCERT)
+ enable_ssl(topology_m2.ms["master1"], M1LDAPSPORT, M1SERVERCERT)
cmdline = ['certutil', '-L', '-d', m1confdir]
log.info("##### Check the cert db: %s" % cmdline)
doAndPrintIt(cmdline, None)
log.info("##### stop master[12]")
- topology.master1.stop(timeout=10)
- topology.master2.stop(timeout=10)
+ topology_m2.ms["master1"].stop(timeout=10)
+ topology_m2.ms["master2"].stop(timeout=10)
global mytmp
mytmp = '/tmp'
@@ -329,36 +215,36 @@ def create_keys_certs(topology):
time.sleep(1)
log.info("##### start master2")
- topology.master2.start(timeout=10)
+ topology_m2.ms["master2"].start(timeout=10)
log.info("##### enable SSL in master2 with all ciphers")
- enable_ssl(topology.master2, M2LDAPSPORT, M2SERVERCERT)
+ enable_ssl(topology_m2.ms["master2"], M2LDAPSPORT, M2SERVERCERT)
log.info("##### restart master2")
- topology.master2.restart(timeout=30)
+ topology_m2.ms["master2"].restart(timeout=30)
log.info("##### restart master1")
- topology.master1.restart(timeout=30)
+ topology_m2.ms["master1"].restart(timeout=30)
log.info("\n######################### Creating SSL Keys and Certs Done ######################\n")
-def config_tls_agreements(topology):
+def config_tls_agreements(topology_m2):
log.info("######################### Configure SSL/TLS agreements ######################")
log.info("######################## master1 <-- startTLS -> master2 #####################")
log.info("##### Update the agreement of master1")
- global m1_m2_agmt
- topology.master1.modify_s(m1_m2_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', 'TLS')])
+ m1_m2_agmt = topology_m2.ms["master1_agmts"]["m1_m2"]
+ topology_m2.ms["master1"].modify_s(m1_m2_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', 'TLS')])
log.info("##### Update the agreement of master2")
- global m2_m1_agmt
- topology.master2.modify_s(m2_m1_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', 'TLS')])
+ m2_m1_agmt = topology_m2.ms["master2_agmts"]["m2_m1"]
+ topology_m2.ms["master2"].modify_s(m2_m1_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', 'TLS')])
time.sleep(1)
- topology.master1.restart(10)
- topology.master2.restart(10)
+ topology_m2.ms["master1"].restart(10)
+ topology_m2.ms["master2"].restart(10)
log.info("\n######################### Configure SSL/TLS agreements Done ######################\n")
@@ -382,7 +268,7 @@ def set_ssl_Version(server, name, version):
assert False
-def test_ticket48784(topology):
+def test_ticket48784(topology_m2):
"""
Set up 2way MMR:
master_1 <----- startTLS -----> master_2
@@ -393,43 +279,43 @@ def test_ticket48784(topology):
"""
log.info("Ticket 48784 - Allow usage of OpenLDAP libraries that don't use NSS for crypto")
- create_keys_certs(topology)
- config_tls_agreements(topology)
+ create_keys_certs(topology_m2)
+ config_tls_agreements(topology_m2)
- add_entry(topology.master1, 'master1', 'uid=m1user', 0, 5)
- add_entry(topology.master2, 'master2', 'uid=m2user', 0, 5)
+ add_entry(topology_m2.ms["master1"], 'master1', 'uid=m1user', 0, 5)
+ add_entry(topology_m2.ms["master2"], 'master2', 'uid=m2user', 0, 5)
time.sleep(10)
log.info('##### Searching for entries on master1...')
- entries = topology.master1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
+ entries = topology_m2.ms["master1"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
assert 10 == len(entries)
log.info('##### Searching for entries on master2...')
- entries = topology.master2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
+ entries = topology_m2.ms["master2"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
assert 10 == len(entries)
log.info("##### openldap client just accepts sslVersionMin not Max.")
- set_ssl_Version(topology.master1, 'master1', 'SSL3')
- set_ssl_Version(topology.master2, 'master2', 'TLS1.2')
+ set_ssl_Version(topology_m2.ms["master1"], 'master1', 'SSL3')
+ set_ssl_Version(topology_m2.ms["master2"], 'master2', 'TLS1.2')
log.info("##### restart master[12]")
- topology.master1.restart(timeout=10)
- topology.master2.restart(timeout=10)
+ topology_m2.ms["master1"].restart(timeout=10)
+ topology_m2.ms["master2"].restart(timeout=10)
log.info("##### replication from master_1 to master_2 should be ok.")
- add_entry(topology.master1, 'master1', 'uid=m1user', 10, 1)
+ add_entry(topology_m2.ms["master1"], 'master1', 'uid=m1user', 10, 1)
log.info("##### replication from master_2 to master_1 should fail.")
- add_entry(topology.master2, 'master2', 'uid=m2user', 10, 1)
+ add_entry(topology_m2.ms["master2"], 'master2', 'uid=m2user', 10, 1)
time.sleep(10)
log.info('##### Searching for entries on master1...')
- entries = topology.master1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
+ entries = topology_m2.ms["master1"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
assert 11 == len(entries) # This is supposed to be "1" less than master 2's entry count
log.info('##### Searching for entries on master2...')
- entries = topology.master2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
+ entries = topology_m2.ms["master2"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
assert 12 == len(entries)
log.info("Ticket 48784 - PASSED")
diff --git a/dirsrvtests/tests/tickets/ticket48798_test.py b/dirsrvtests/tests/tickets/ticket48798_test.py
index 297e2ef..98d0cc0 100644
--- a/dirsrvtests/tests/tickets/ticket48798_test.py
+++ b/dirsrvtests/tests/tickets/ticket48798_test.py
@@ -1,54 +1,14 @@
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
-
-import nss
+from subprocess import check_output
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
+import pytest
from lib389.tasks import *
from lib389.utils import *
-
-from subprocess import check_output
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
def check_socket_dh_param_size(hostname, port):
### You know why we have to do this?
# Because TLS and SSL suck. Hard. They are impossible. It's all terrible, burn it all down.
@@ -67,7 +27,7 @@ def check_socket_dh_param_size(hostname, port):
return i
-def test_ticket48798(topology):
+def test_ticket48798(topology_st):
"""
Test DH param sizes offered by DS.
@@ -78,69 +38,69 @@ def test_ticket48798(topology):
## THIS ASSUMES old nss format. SQLite will bite us!
for f in ('key3.db', 'cert8.db', 'key4.db', 'cert9.db', 'secmod.db', 'pkcs11.txt'):
try:
- os.remove("%s/%s" % (topology.standalone.confdir, f ))
+ os.remove("%s/%s" % (topology_st.standalone.confdir, f))
except:
pass
# Check if the db exists. Should be false.
- assert(topology.standalone.nss_ssl._db_exists() is False)
+ assert (topology_st.standalone.nss_ssl._db_exists() is False)
time.sleep(0.5)
# Create it. Should work.
- assert(topology.standalone.nss_ssl.reinit() is True)
+ assert (topology_st.standalone.nss_ssl.reinit() is True)
time.sleep(0.5)
# Check if the db exists. Should be true
- assert(topology.standalone.nss_ssl._db_exists() is True)
+ assert (topology_st.standalone.nss_ssl._db_exists() is True)
time.sleep(0.5)
# Check if ca exists. Should be false.
- assert(topology.standalone.nss_ssl._rsa_ca_exists() is False)
+ assert (topology_st.standalone.nss_ssl._rsa_ca_exists() is False)
time.sleep(0.5)
# Create it. Should work.
- assert(topology.standalone.nss_ssl.create_rsa_ca() is True)
+ assert (topology_st.standalone.nss_ssl.create_rsa_ca() is True)
time.sleep(0.5)
# Check if ca exists. Should be true
- assert(topology.standalone.nss_ssl._rsa_ca_exists() is True)
+ assert (topology_st.standalone.nss_ssl._rsa_ca_exists() is True)
time.sleep(0.5)
# Check if we have a server cert / key. Should be false.
- assert(topology.standalone.nss_ssl._rsa_key_and_cert_exists() is False)
+ assert (topology_st.standalone.nss_ssl._rsa_key_and_cert_exists() is False)
time.sleep(0.5)
# Create it. Should work.
- assert(topology.standalone.nss_ssl.create_rsa_key_and_cert() is True)
+ assert (topology_st.standalone.nss_ssl.create_rsa_key_and_cert() is True)
time.sleep(0.5)
# Check if server cert and key exist. Should be true.
- assert(topology.standalone.nss_ssl._rsa_key_and_cert_exists() is True)
+ assert (topology_st.standalone.nss_ssl._rsa_key_and_cert_exists() is True)
time.sleep(0.5)
- topology.standalone.config.enable_ssl(secport=DEFAULT_SECURE_PORT, secargs={'nsSSL3Ciphers': '+all'} )
+ topology_st.standalone.config.enable_ssl(secport=DEFAULT_SECURE_PORT, secargs={'nsSSL3Ciphers': '+all'})
- topology.standalone.restart(30)
+ topology_st.standalone.restart(30)
# Confirm that we have a connection, and that it has DH
# Open a socket to the port.
# Check the security settings.
- size = check_socket_dh_param_size(topology.standalone.host, DEFAULT_SECURE_PORT)
+ size = check_socket_dh_param_size(topology_st.standalone.host, DEFAULT_SECURE_PORT)
- assert(size == 2048)
+ assert (size == 2048)
# Now toggle the settings.
mod = [(ldap.MOD_REPLACE, 'allowWeakDHParam', 'on')]
dn_enc = 'cn=encryption,cn=config'
- topology.standalone.modify_s(dn_enc, mod)
+ topology_st.standalone.modify_s(dn_enc, mod)
- topology.standalone.restart(30)
+ topology_st.standalone.restart(30)
# Check the DH params are less than 1024.
- size = check_socket_dh_param_size(topology.standalone.host, DEFAULT_SECURE_PORT)
+ size = check_socket_dh_param_size(topology_st.standalone.host, DEFAULT_SECURE_PORT)
- assert(size == 1024)
+ assert (size == 1024)
log.info('Test complete')
diff --git a/dirsrvtests/tests/tickets/ticket48799_test.py b/dirsrvtests/tests/tickets/ticket48799_test.py
index 47461eb..a52e85c 100644
--- a/dirsrvtests/tests/tickets/ticket48799_test.py
+++ b/dirsrvtests/tests/tickets/ticket48799_test.py
@@ -1,103 +1,12 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_m1c1
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-class TopologyReplication(object):
- def __init__(self, master1, consumer1):
- master1.open()
- self.master1 = master1
- consumer1.open()
- self.consumer1 = consumer1
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- # Creating master 1...
- master1 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master1.allocate(args_master)
- instance_master1 = master1.exists()
- if instance_master1:
- master1.delete()
- master1.create()
- master1.open()
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-
- # Creating consumer 1...
- consumer1 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_CONSUMER_1
- args_instance[SER_PORT] = PORT_CONSUMER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_consumer = args_instance.copy()
- consumer1.allocate(args_consumer)
- instance_consumer1 = consumer1.exists()
- if instance_consumer1:
- consumer1.delete()
- consumer1.create()
- consumer1.open()
- consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER, replicaId=CONSUMER_REPLICAID)
-
- #
- # Create all the agreements
- #
- # Creating agreement from master 1 to consumer 1
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host, port=consumer1.port, properties=properties)
- if not m1_c1_agmt:
- log.fatal("Fail to create a hub -> consumer replica agreement")
- sys.exit(1)
- log.debug("%s created" % m1_c1_agmt)
-
- # Allow the replicas to get situated with the new agreements...
- time.sleep(5)
-
- #
- # Initialize all the agreements
- #
- master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
- master1.waitForReplInit(m1_c1_agmt)
-
- # Check replication is working...
- if master1.testReplication(DEFAULT_SUFFIX, consumer1):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- # Delete each instance in the end
- def fin():
- master1.delete()
- consumer1.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- master1.clearTmpDir(__file__)
-
- return TopologyReplication(master1, consumer1)
-
-
def _add_custom_schema(server):
attr_value = "( 10.0.9.2342.19200300.100.1.1 NAME 'customManager' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'user defined' )"
mod = [(ldap.MOD_ADD, 'attributeTypes', attr_value)]
@@ -112,16 +21,16 @@ def _create_user(server):
server.add_s(Entry((
"uid=testuser,ou=People,%s" % DEFAULT_SUFFIX,
{
- 'objectClass' : "top account posixaccount".split(),
- 'uid' : 'testuser',
- 'gecos' : 'Test User',
- 'cn' : 'testuser',
- 'homedirectory' : '/home/testuser',
- 'passwordexpirationtime' : '20160710184141Z',
- 'userpassword' : '!',
- 'uidnumber' : '1111212',
- 'gidnumber' : '1111212',
- 'loginshell' : '/bin/bash'
+ 'objectClass': "top account posixaccount".split(),
+ 'uid': 'testuser',
+ 'gecos': 'Test User',
+ 'cn': 'testuser',
+ 'homedirectory': '/home/testuser',
+ 'passwordexpirationtime': '20160710184141Z',
+ 'userpassword': '!',
+ 'uidnumber': '1111212',
+ 'gidnumber': '1111212',
+ 'loginshell': '/bin/bash'
}
)))
@@ -135,34 +44,36 @@ def _modify_user(server):
server.modify("uid=testuser,ou=People,%s" % DEFAULT_SUFFIX, mod)
-def test_ticket48799(topology):
+def test_ticket48799(topology_m1c1):
"""Write your replication testcase here.
- To access each DirSrv instance use: topology.master1, topology.master2,
- ..., topology.hub1, ..., topology.consumer1,...
+ To access each DirSrv instance use: topology_m1c1.ms["master1"], topology_m1c1.ms["master1"]2,
+ ..., topology_m1c1.hub1, ..., topology_m1c1.cs["consumer1"],...
Also, if you need any testcase initialization,
please, write additional fixture for that(include finalizer).
"""
# Add the new schema element.
- _add_custom_schema(topology.master1)
- _add_custom_schema(topology.consumer1)
+ _add_custom_schema(topology_m1c1.ms["master1"])
+ _add_custom_schema(topology_m1c1.cs["consumer1"])
# Add a new user on the master.
- _create_user(topology.master1)
+ _create_user(topology_m1c1.ms["master1"])
# Modify the user on the master.
- _modify_user(topology.master1)
+ _modify_user(topology_m1c1.ms["master1"])
# We need to wait for replication here.
time.sleep(15)
# Now compare the master vs consumer, and see if the objectClass was dropped.
- master_entry = topology.master1.search_s("uid=testuser,ou=People,%s" % DEFAULT_SUFFIX, ldap.SCOPE_BASE, '(objectclass=*)', ['objectClass'])
- consumer_entry = topology.consumer1.search_s("uid=testuser,ou=People,%s" % DEFAULT_SUFFIX, ldap.SCOPE_BASE, '(objectclass=*)', ['objectClass'])
+ master_entry = topology_m1c1.ms["master1"].search_s("uid=testuser,ou=People,%s" % DEFAULT_SUFFIX, ldap.SCOPE_BASE,
+ '(objectclass=*)', ['objectClass'])
+ consumer_entry = topology_m1c1.cs["consumer1"].search_s("uid=testuser,ou=People,%s" % DEFAULT_SUFFIX,
+ ldap.SCOPE_BASE, '(objectclass=*)', ['objectClass'])
- assert(master_entry == consumer_entry)
+ assert (master_entry == consumer_entry)
log.info('Test complete')
diff --git a/dirsrvtests/tests/tickets/ticket48808_test.py b/dirsrvtests/tests/tickets/ticket48808_test.py
index 3dbceac..cf49e03 100644
--- a/dirsrvtests/tests/tickets/ticket48808_test.py
+++ b/dirsrvtests/tests/tickets/ticket48808_test.py
@@ -1,15 +1,10 @@
-import time
-import ldap
-import logging
-import pytest
from random import sample
+
+import pytest
from ldap.controls import SimplePagedResultsControl
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
@@ -19,61 +14,28 @@ TEST_USER_DN = 'uid=%s,%s' % (TEST_USER_NAME, DEFAULT_SUFFIX)
TEST_USER_PWD = 'simplepaged_test'
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-
@pytest.fixture(scope="module")
-def test_user(topology):
+def test_user(topology_st):
"""User for binding operation"""
try:
- topology.standalone.add_s(Entry((TEST_USER_DN, {
- 'objectclass': 'top person'.split(),
- 'objectclass': 'organizationalPerson',
- 'objectclass': 'inetorgperson',
- 'cn': TEST_USER_NAME,
- 'sn': TEST_USER_NAME,
- 'userpassword': TEST_USER_PWD,
- 'mail': '%s(a)redhat.com' % TEST_USER_NAME,
- 'uid': TEST_USER_NAME
- })))
+ topology_st.standalone.add_s(Entry((TEST_USER_DN, {
+ 'objectclass': 'top person'.split(),
+ 'objectclass': 'organizationalPerson',
+ 'objectclass': 'inetorgperson',
+ 'cn': TEST_USER_NAME,
+ 'sn': TEST_USER_NAME,
+ 'userpassword': TEST_USER_PWD,
+ 'mail': '%s(a)redhat.com' % TEST_USER_NAME,
+ 'uid': TEST_USER_NAME
+ })))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (TEST_USER_DN,
e.message['desc']))
raise e
-def add_users(topology, users_num):
+def add_users(topology_st, users_num):
"""Add users to the default suffix
and return a list of added user DNs.
"""
@@ -86,16 +48,16 @@ def add_users(topology, users_num):
USER_DN = 'uid=%s,%s' % (USER_NAME, DEFAULT_SUFFIX)
users_list.append(USER_DN)
try:
- topology.standalone.add_s(Entry((USER_DN, {
- 'objectclass': 'top person'.split(),
- 'objectclass': 'organizationalPerson',
- 'objectclass': 'inetorgperson',
- 'cn': USER_NAME,
- 'sn': USER_NAME,
- 'userpassword': 'pass%s' % num_ran,
- 'mail': '%s(a)redhat.com' % USER_NAME,
- 'uid': USER_NAME
- })))
+ topology_st.standalone.add_s(Entry((USER_DN, {
+ 'objectclass': 'top person'.split(),
+ 'objectclass': 'organizationalPerson',
+ 'objectclass': 'inetorgperson',
+ 'cn': USER_NAME,
+ 'sn': USER_NAME,
+ 'userpassword': 'pass%s' % num_ran,
+ 'mail': '%s(a)redhat.com' % USER_NAME,
+ 'uid': USER_NAME
+ })))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
@@ -103,48 +65,48 @@ def add_users(topology, users_num):
return users_list
-def del_users(topology, users_list):
+def del_users(topology_st, users_list):
"""Delete users with DNs from given list"""
log.info('Deleting %d users' % len(users_list))
for user_dn in users_list:
try:
- topology.standalone.delete_s(user_dn)
+ topology_st.standalone.delete_s(user_dn)
except ldap.LDAPError as e:
log.error('Failed to delete user (%s): error (%s)' % (user_dn,
e.message['desc']))
raise e
-def change_conf_attr(topology, suffix, attr_name, attr_value):
+def change_conf_attr(topology_st, suffix, attr_name, attr_value):
"""Change configurational attribute in the given suffix.
Funtion returns previous attribute value.
"""
try:
- entries = topology.standalone.search_s(suffix, ldap.SCOPE_BASE,
- 'objectclass=top',
- [attr_name])
+ entries = topology_st.standalone.search_s(suffix, ldap.SCOPE_BASE,
+ 'objectclass=top',
+ [attr_name])
attr_value_bck = entries[0].data.get(attr_name)
log.info('Set %s to %s. Previous value - %s. Modified suffix - %s.' % (
- attr_name, attr_value, attr_value_bck, suffix))
+ attr_name, attr_value, attr_value_bck, suffix))
if attr_value is None:
- topology.standalone.modify_s(suffix, [(ldap.MOD_DELETE,
- attr_name,
- attr_value)])
+ topology_st.standalone.modify_s(suffix, [(ldap.MOD_DELETE,
+ attr_name,
+ attr_value)])
else:
- topology.standalone.modify_s(suffix, [(ldap.MOD_REPLACE,
- attr_name,
- attr_value)])
+ topology_st.standalone.modify_s(suffix, [(ldap.MOD_REPLACE,
+ attr_name,
+ attr_value)])
except ldap.LDAPError as e:
- log.error('Failed to change attr value (%s): error (%s)' % (attr_name,
- e.message['desc']))
- raise e
+ log.error('Failed to change attr value (%s): error (%s)' % (attr_name,
+ e.message['desc']))
+ raise e
return attr_value_bck
-def paged_search(topology, controls, search_flt, searchreq_attrlist):
+def paged_search(topology_st, controls, search_flt, searchreq_attrlist):
"""Search at the DEFAULT_SUFFIX with ldap.SCOPE_SUBTREE
using Simple Paged Control(should the first item in the
list controls.
@@ -155,33 +117,33 @@ def paged_search(topology, controls, search_flt, searchreq_attrlist):
pctrls = []
all_results = []
req_ctrl = controls[0]
- msgid = topology.standalone.search_ext(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- search_flt,
- searchreq_attrlist,
- serverctrls=controls)
+ msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ search_flt,
+ searchreq_attrlist,
+ serverctrls=controls)
while True:
log.info('Getting page %d' % (pages,))
- rtype, rdata, rmsgid, rctrls = topology.standalone.result3(msgid)
+ rtype, rdata, rmsgid, rctrls = topology_st.standalone.result3(msgid)
all_results.extend(rdata)
pages += 1
pctrls = [
c
for c in rctrls
if c.controlType == SimplePagedResultsControl.controlType
- ]
+ ]
if pctrls:
if pctrls[0].cookie:
# Copy cookie from response control to request control
req_ctrl.cookie = pctrls[0].cookie
- msgid = topology.standalone.search_ext(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- search_flt,
- searchreq_attrlist,
- serverctrls=controls)
+ msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ search_flt,
+ searchreq_attrlist,
+ serverctrls=controls)
else:
- break # no more pages available
+ break # no more pages available
else:
break
@@ -189,16 +151,16 @@ def paged_search(topology, controls, search_flt, searchreq_attrlist):
return all_results
-def test_ticket48808(topology, test_user):
+def test_ticket48808(topology_st, test_user):
log.info('Run multiple paging controls on a single connection')
users_num = 100
page_size = 30
- users_list = add_users(topology, users_num)
+ users_list = add_users(topology_st, users_num)
search_flt = r'(uid=test*)'
searchreq_attrlist = ['dn', 'sn']
log.info('Set user bind')
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
log.info('Create simple paged results control instance')
req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
@@ -206,58 +168,58 @@ def test_ticket48808(topology, test_user):
for ii in xrange(3):
log.info('Iteration %d' % ii)
- msgid = topology.standalone.search_ext(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- search_flt,
- searchreq_attrlist,
- serverctrls=controls)
- rtype, rdata, rmsgid, rctrls = topology.standalone.result3(msgid)
+ msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ search_flt,
+ searchreq_attrlist,
+ serverctrls=controls)
+ rtype, rdata, rmsgid, rctrls = topology_st.standalone.result3(msgid)
pctrls = [
c
for c in rctrls
if c.controlType == SimplePagedResultsControl.controlType
- ]
+ ]
req_ctrl.cookie = pctrls[0].cookie
- msgid = topology.standalone.search_ext(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- search_flt,
- searchreq_attrlist,
- serverctrls=controls)
+ msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ search_flt,
+ searchreq_attrlist,
+ serverctrls=controls)
log.info('Set Directory Manager bind back')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- del_users(topology, users_list)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ del_users(topology_st, users_list)
log.info('Abandon the search')
users_num = 10
page_size = 0
- users_list = add_users(topology, users_num)
+ users_list = add_users(topology_st, users_num)
search_flt = r'(uid=test*)'
searchreq_attrlist = ['dn', 'sn']
log.info('Set user bind')
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
log.info('Create simple paged results control instance')
req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
controls = [req_ctrl]
- msgid = topology.standalone.search_ext(DEFAULT_SUFFIX,
- ldap.SCOPE_SUBTREE,
- search_flt,
- searchreq_attrlist,
- serverctrls=controls)
- rtype, rdata, rmsgid, rctrls = topology.standalone.result3(msgid)
+ msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX,
+ ldap.SCOPE_SUBTREE,
+ search_flt,
+ searchreq_attrlist,
+ serverctrls=controls)
+ rtype, rdata, rmsgid, rctrls = topology_st.standalone.result3(msgid)
pctrls = [
c
for c in rctrls
if c.controlType == SimplePagedResultsControl.controlType
- ]
+ ]
assert not pctrls[0].cookie
log.info('Set Directory Manager bind back')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- del_users(topology, users_list)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ del_users(topology_st, users_list)
log.info("Search should fail with 'nsPagedSizeLimit = 5'"
"and 'nsslapd-pagedsizelimit = 15' with 10 users")
@@ -266,16 +228,16 @@ def test_ticket48808(topology, test_user):
expected_rs = ldap.SIZELIMIT_EXCEEDED
users_num = 10
page_size = 10
- users_list = add_users(topology, users_num)
+ users_list = add_users(topology_st, users_num)
search_flt = r'(uid=test*)'
searchreq_attrlist = ['dn', 'sn']
- conf_attr_bck = change_conf_attr(topology, DN_CONFIG,
- 'nsslapd-pagedsizelimit', conf_attr)
- user_attr_bck = change_conf_attr(topology, TEST_USER_DN,
- 'nsPagedSizeLimit', user_attr)
+ conf_attr_bck = change_conf_attr(topology_st, DN_CONFIG,
+ 'nsslapd-pagedsizelimit', conf_attr)
+ user_attr_bck = change_conf_attr(topology_st, TEST_USER_DN,
+ 'nsPagedSizeLimit', user_attr)
log.info('Set user bind')
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
log.info('Create simple paged results control instance')
req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
@@ -283,15 +245,15 @@ def test_ticket48808(topology, test_user):
log.info('Expect to fail with SIZELIMIT_EXCEEDED')
with pytest.raises(expected_rs):
- all_results = paged_search(topology, controls,
+ all_results = paged_search(topology_st, controls,
search_flt, searchreq_attrlist)
log.info('Set Directory Manager bind back')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- del_users(topology, users_list)
- change_conf_attr(topology, DN_CONFIG,
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ del_users(topology_st, users_list)
+ change_conf_attr(topology_st, DN_CONFIG,
'nsslapd-pagedsizelimit', conf_attr_bck)
- change_conf_attr(topology, TEST_USER_DN,
+ change_conf_attr(topology_st, TEST_USER_DN,
'nsPagedSizeLimit', user_attr_bck)
log.info("Search should pass with 'nsPagedSizeLimit = 15'"
@@ -300,34 +262,34 @@ def test_ticket48808(topology, test_user):
user_attr = '15'
users_num = 10
page_size = 10
- users_list = add_users(topology, users_num)
+ users_list = add_users(topology_st, users_num)
search_flt = r'(uid=test*)'
searchreq_attrlist = ['dn', 'sn']
- conf_attr_bck = change_conf_attr(topology, DN_CONFIG,
- 'nsslapd-pagedsizelimit', conf_attr)
- user_attr_bck = change_conf_attr(topology, TEST_USER_DN,
- 'nsPagedSizeLimit', user_attr)
+ conf_attr_bck = change_conf_attr(topology_st, DN_CONFIG,
+ 'nsslapd-pagedsizelimit', conf_attr)
+ user_attr_bck = change_conf_attr(topology_st, TEST_USER_DN,
+ 'nsPagedSizeLimit', user_attr)
log.info('Set user bind')
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
log.info('Create simple paged results control instance')
req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
controls = [req_ctrl]
log.info('Search should PASS')
- all_results = paged_search(topology, controls,
+ all_results = paged_search(topology_st, controls,
search_flt, searchreq_attrlist)
log.info('%d results' % len(all_results))
assert len(all_results) == len(users_list)
log.info('Set Directory Manager bind back')
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- del_users(topology, users_list)
- change_conf_attr(topology, DN_CONFIG,
- 'nsslapd-pagedsizelimit', conf_attr_bck)
- change_conf_attr(topology, TEST_USER_DN,
- 'nsPagedSizeLimit', user_attr_bck)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ del_users(topology_st, users_list)
+ change_conf_attr(topology_st, DN_CONFIG,
+ 'nsslapd-pagedsizelimit', conf_attr_bck)
+ change_conf_attr(topology_st, TEST_USER_DN,
+ 'nsPagedSizeLimit', user_attr_bck)
if __name__ == '__main__':
diff --git a/dirsrvtests/tests/tickets/ticket48844_test.py b/dirsrvtests/tests/tickets/ticket48844_test.py
index de719c3..9e0ba68 100644
--- a/dirsrvtests/tests/tickets/ticket48844_test.py
+++ b/dirsrvtests/tests/tickets/ticket48844_test.py
@@ -1,24 +1,14 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
PLUGIN_BITWISE = 'Bitwise Plugin'
-TESTBASEDN="dc=bitwise,dc=com"
-TESTBACKEND_NAME="TestBitw"
+TESTBASEDN = "dc=bitwise,dc=com"
+TESTBACKEND_NAME = "TestBitw"
F1 = 'objectclass=testperson'
BITWISE_F2 = '(&(%s)(testUserAccountControl:1.2.840.113556.1.4.803:=514))' % F1
@@ -26,147 +16,110 @@ BITWISE_F3 = '(&(%s)(testUserAccountControl:1.2.840.113556.1.4.803:=513))' % F1
BITWISE_F6 = '(&(%s)(testUserAccountControl:1.2.840.113556.1.4.803:=16777216))' % F1
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-
-def _addBitwiseEntries(topology):
-
+def _addBitwiseEntries(topology_st):
users = [
- ('testuser2', '65536' ,'PasswordNeverExpired' ),
- ('testuser3', '8388608' ,'PasswordExpired'),
- ('testuser4', '256' ,'TempDuplicateAccount'),
- ('testuser5', '16777216' ,'TrustedAuthDelegation'),
- ('testuser6', '528' ,'AccountLocked'),
- ('testuser7', '513' ,'AccountActive'),
- ('testuser8', '98536 99512 99528'.split() ,'AccountActive PasswordExxpired AccountLocked'.split()),
- ('testuser9', '87536 912'.split() ,'AccountActive PasswordNeverExpired'.split()),
- ('testuser10', '89536 97546 96579'.split() ,'TestVerify1 TestVerify2 TestVerify3'.split() ),
- ('testuser11', '655236' ,'TestStatus1'),
- ('testuser12', '665522' ,'TestStatus2'),
- ('testuser13', '266552' ,'TestStatus3')]
+ ('testuser2', '65536', 'PasswordNeverExpired'),
+ ('testuser3', '8388608', 'PasswordExpired'),
+ ('testuser4', '256', 'TempDuplicateAccount'),
+ ('testuser5', '16777216', 'TrustedAuthDelegation'),
+ ('testuser6', '528', 'AccountLocked'),
+ ('testuser7', '513', 'AccountActive'),
+ ('testuser8', '98536 99512 99528'.split(), 'AccountActive PasswordExxpired AccountLocked'.split()),
+ ('testuser9', '87536 912'.split(), 'AccountActive PasswordNeverExpired'.split()),
+ ('testuser10', '89536 97546 96579'.split(), 'TestVerify1 TestVerify2 TestVerify3'.split()),
+ ('testuser11', '655236', 'TestStatus1'),
+ ('testuser12', '665522', 'TestStatus2'),
+ ('testuser13', '266552', 'TestStatus3')]
try:
- topology.standalone.add_s(Entry((TESTBASEDN,
- {'objectclass': "top dcobject".split(),
- 'dc': 'bitwise',
- 'aci': '(target =\"ldap:///dc=bitwise,dc=com\")' +\
- '(targetattr != \"userPassword\")' +\
- '(version 3.0;acl \"Anonymous read-search access\";' +\
- 'allow (read, search, compare)(userdn = \"ldap:///anyone\");)'})))
-
- topology.standalone.add_s(Entry(('uid=btestuser1,%s' % TESTBASEDN,
- {'objectclass': 'top testperson organizationalPerson inetorgperson'.split(),
- 'mail': 'btestuser1(a)redhat.com',
- 'uid': 'btestuser1',
- 'givenName': 'bit',
- 'sn': 'testuser1',
- 'userPassword': 'testuser1',
- 'testUserAccountControl': '514',
- 'testUserStatus': 'Disabled',
- 'cn': 'bit tetsuser1'})))
- for (userid, accCtl,accStatus) in users:
- topology.standalone.add_s(Entry(('uid=b%s,%s' % (userid, TESTBASEDN),
- {'objectclass': 'top testperson organizationalPerson inetorgperson'.split(),
- 'mail': '%s(a)redhat.com' % userid,
- 'uid': 'b%s' % userid,
- 'givenName': 'bit',
- 'sn': userid,
- 'userPassword': userid,
- 'testUserAccountControl': accCtl,
- 'testUserStatus': accStatus,
- 'cn': 'bit %s' % userid})))
+ topology_st.standalone.add_s(Entry((TESTBASEDN,
+ {'objectclass': "top dcobject".split(),
+ 'dc': 'bitwise',
+ 'aci': '(target =\"ldap:///dc=bitwise,dc=com\")' + \
+ '(targetattr != \"userPassword\")' + \
+ '(version 3.0;acl \"Anonymous read-search access\";' + \
+ 'allow (read, search, compare)(userdn = \"ldap:///anyone\");)'})))
+
+ topology_st.standalone.add_s(Entry(('uid=btestuser1,%s' % TESTBASEDN,
+ {'objectclass': 'top testperson organizationalPerson inetorgperson'.split(),
+ 'mail': 'btestuser1(a)redhat.com',
+ 'uid': 'btestuser1',
+ 'givenName': 'bit',
+ 'sn': 'testuser1',
+ 'userPassword': 'testuser1',
+ 'testUserAccountControl': '514',
+ 'testUserStatus': 'Disabled',
+ 'cn': 'bit tetsuser1'})))
+ for (userid, accCtl, accStatus) in users:
+ topology_st.standalone.add_s(Entry(('uid=b%s,%s' % (userid, TESTBASEDN),
+ {
+ 'objectclass': 'top testperson organizationalPerson inetorgperson'.split(),
+ 'mail': '%s(a)redhat.com' % userid,
+ 'uid': 'b%s' % userid,
+ 'givenName': 'bit',
+ 'sn': userid,
+ 'userPassword': userid,
+ 'testUserAccountControl': accCtl,
+ 'testUserStatus': accStatus,
+ 'cn': 'bit %s' % userid})))
except ValueError:
- topology.standalone.log.fatal("add_s failed: %s", ValueError)
+ topology_st.standalone.log.fatal("add_s failed: %s", ValueError)
-def test_ticket48844_init(topology):
+def test_ticket48844_init(topology_st):
# create a suffix where test entries will be stored
BITW_SCHEMA_AT_1 = '( NAME \'testUserAccountControl\' DESC \'Attribute Bitwise filteri-Multi-Valued\' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 )'
BITW_SCHEMA_AT_2 = '( NAME \'testUserStatus\' DESC \'State of User account active/disabled\' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 )'
- BITW_SCHEMA_OC_1 = '( NAME \'testperson\' SUP top STRUCTURAL MUST ( sn $ cn $ testUserAccountControl $ testUserStatus )' +\
- ' MAY ( userPassword $ telephoneNumber $ seeAlso $ description ) X-ORIGIN \'BitWise\' )'
- topology.standalone.schema.add_schema('attributetypes', [BITW_SCHEMA_AT_1, BITW_SCHEMA_AT_2])
- topology.standalone.schema.add_schema('objectClasses', BITW_SCHEMA_OC_1)
+ BITW_SCHEMA_OC_1 = '( NAME \'testperson\' SUP top STRUCTURAL MUST ( sn $ cn $ testUserAccountControl $ testUserStatus )' + \
+ ' MAY ( userPassword $ telephoneNumber $ seeAlso $ description ) X-ORIGIN \'BitWise\' )'
+ topology_st.standalone.schema.add_schema('attributetypes', [BITW_SCHEMA_AT_1, BITW_SCHEMA_AT_2])
+ topology_st.standalone.schema.add_schema('objectClasses', BITW_SCHEMA_OC_1)
- topology.standalone.backend.create(TESTBASEDN, {BACKEND_NAME: TESTBACKEND_NAME})
- topology.standalone.mappingtree.create(TESTBASEDN, bename=TESTBACKEND_NAME, parent=None)
- _addBitwiseEntries(topology)
+ topology_st.standalone.backend.create(TESTBASEDN, {BACKEND_NAME: TESTBACKEND_NAME})
+ topology_st.standalone.mappingtree.create(TESTBASEDN, bename=TESTBACKEND_NAME, parent=None)
+ _addBitwiseEntries(topology_st)
-def test_ticket48844_bitwise_on(topology):
+def test_ticket48844_bitwise_on(topology_st):
"""
Check that bitwise plugin (old style MR plugin) that defines
Its own indexer create function, is selected to evaluate the filter
"""
- topology.standalone.plugins.enable(name=PLUGIN_BITWISE)
- topology.standalone.restart(timeout=10)
- ents = topology.standalone.search_s('cn=%s,cn=plugins,cn=config' % PLUGIN_BITWISE, ldap.SCOPE_BASE, 'objectclass=*')
- assert(ents[0].hasValue('nsslapd-pluginEnabled', 'on'))
+ topology_st.standalone.plugins.enable(name=PLUGIN_BITWISE)
+ topology_st.standalone.restart(timeout=10)
+ ents = topology_st.standalone.search_s('cn=%s,cn=plugins,cn=config' % PLUGIN_BITWISE, ldap.SCOPE_BASE,
+ 'objectclass=*')
+ assert (ents[0].hasValue('nsslapd-pluginEnabled', 'on'))
expect = 2
- ents = topology.standalone.search_s(TESTBASEDN, ldap.SCOPE_SUBTREE, BITWISE_F2)
+ ents = topology_st.standalone.search_s(TESTBASEDN, ldap.SCOPE_SUBTREE, BITWISE_F2)
assert (len(ents) == expect)
- expect=1
- ents = topology.standalone.search_s(TESTBASEDN, ldap.SCOPE_SUBTREE, BITWISE_F3)
+ expect = 1
+ ents = topology_st.standalone.search_s(TESTBASEDN, ldap.SCOPE_SUBTREE, BITWISE_F3)
assert (len(ents) == expect)
assert (ents[0].hasAttr('testUserAccountControl'))
- expect=1
- ents = topology.standalone.search_s(TESTBASEDN, ldap.SCOPE_SUBTREE, BITWISE_F6)
+ expect = 1
+ ents = topology_st.standalone.search_s(TESTBASEDN, ldap.SCOPE_SUBTREE, BITWISE_F6)
assert (len(ents) == expect)
assert (ents[0].hasAttr('testUserAccountControl'))
-def test_ticket48844_bitwise_off(topology):
+def test_ticket48844_bitwise_off(topology_st):
"""
Check that when bitwise plugin is not enabled, no plugin
is identified to evaluate the filter -> ldap.UNAVAILABLE_CRITICAL_EXTENSION:
"""
- topology.standalone.plugins.disable(name=PLUGIN_BITWISE)
- topology.standalone.restart(timeout=10)
- ents = topology.standalone.search_s('cn=%s,cn=plugins,cn=config' % PLUGIN_BITWISE, ldap.SCOPE_BASE, 'objectclass=*')
- assert(ents[0].hasValue('nsslapd-pluginEnabled', 'off'))
+ topology_st.standalone.plugins.disable(name=PLUGIN_BITWISE)
+ topology_st.standalone.restart(timeout=10)
+ ents = topology_st.standalone.search_s('cn=%s,cn=plugins,cn=config' % PLUGIN_BITWISE, ldap.SCOPE_BASE,
+ 'objectclass=*')
+ assert (ents[0].hasValue('nsslapd-pluginEnabled', 'off'))
res = 0
try:
- ents = topology.standalone.search_s(TESTBASEDN, ldap.SCOPE_SUBTREE, BITWISE_F2)
+ ents = topology_st.standalone.search_s(TESTBASEDN, ldap.SCOPE_SUBTREE, BITWISE_F2)
except ldap.UNAVAILABLE_CRITICAL_EXTENSION:
res = 12
assert (res == 12)
diff --git a/dirsrvtests/tests/tickets/ticket48891_test.py b/dirsrvtests/tests/tickets/ticket48891_test.py
index 5c6e57d..91290d0 100644
--- a/dirsrvtests/tests/tickets/ticket48891_test.py
+++ b/dirsrvtests/tests/tickets/ticket48891_test.py
@@ -6,16 +6,12 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import time
-import ldap
+import fnmatch
import logging
+
import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
-import fnmatch
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
@@ -30,48 +26,7 @@ OTHER_NAME = 'other_entry'
MAX_OTHERS = 10
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=False)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
-
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def test_ticket48891_setup(topology):
+def test_ticket48891_setup(topology_st):
"""
Check there is no core
Create a second backend
@@ -81,58 +36,59 @@ def test_ticket48891_setup(topology):
log.info('Testing Ticket 48891 - ns-slapd crashes during the shutdown after adding attribute with a matching rule')
# bind as directory manager
- topology.standalone.log.info("Bind as %s" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.log.info("Bind as %s" % DN_DM)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
# check there is no core
- entry = topology.standalone.search_s(CONFIG_DN, ldap.SCOPE_BASE,
- "(cn=config)", ['nsslapd-errorlog'])
+ entry = topology_st.standalone.search_s(CONFIG_DN, ldap.SCOPE_BASE,
+ "(cn=config)", ['nsslapd-errorlog'])
assert entry
path = entry[0].getValue('nsslapd-errorlog').replace('errors', '')
log.debug('Looking for a core file in: ' + path)
cores = fnmatch.filter(os.listdir(path), 'core.*')
assert len(cores) == 0
- topology.standalone.log.info("\n\n######################### SETUP SUFFIX o=ticket48891.org ######################\n")
+ topology_st.standalone.log.info(
+ "\n\n######################### SETUP SUFFIX o=ticket48891.org ######################\n")
- topology.standalone.backend.create(MYSUFFIX, {BACKEND_NAME: MYSUFFIXBE})
- topology.standalone.mappingtree.create(MYSUFFIX, bename=MYSUFFIXBE)
- topology.standalone.add_s(Entry((MYSUFFIX, {
- 'objectclass': "top domain".split(),
- 'dc': RDN_VAL_SUFFIX})))
+ topology_st.standalone.backend.create(MYSUFFIX, {BACKEND_NAME: MYSUFFIXBE})
+ topology_st.standalone.mappingtree.create(MYSUFFIX, bename=MYSUFFIXBE)
+ topology_st.standalone.add_s(Entry((MYSUFFIX, {
+ 'objectclass': "top domain".split(),
+ 'dc': RDN_VAL_SUFFIX})))
- topology.standalone.log.info("\n\n######################### Generate Test data ######################\n")
+ topology_st.standalone.log.info("\n\n######################### Generate Test data ######################\n")
# add dummy entries on both backends
for cpt in range(MAX_OTHERS):
name = "%s%d" % (OTHER_NAME, cpt)
- topology.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
- 'objectclass': "top person".split(),
- 'sn': name,
- 'cn': name})))
+ topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
+ 'objectclass': "top person".split(),
+ 'sn': name,
+ 'cn': name})))
for cpt in range(MAX_OTHERS):
name = "%s%d" % (OTHER_NAME, cpt)
- topology.standalone.add_s(Entry(("cn=%s,%s" % (name, MYSUFFIX), {
- 'objectclass': "top person".split(),
- 'sn': name,
- 'cn': name})))
+ topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, MYSUFFIX), {
+ 'objectclass': "top person".split(),
+ 'sn': name,
+ 'cn': name})))
- topology.standalone.log.info("\n\n######################### SEARCH ALL ######################\n")
- topology.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.log.info("\n\n######################### SEARCH ALL ######################\n")
+ topology_st.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
- entries = topology.standalone.search_s(MYSUFFIX, ldap.SCOPE_SUBTREE, SEARCHFILTER)
- topology.standalone.log.info("Returned %d entries.\n", len(entries))
+ entries = topology_st.standalone.search_s(MYSUFFIX, ldap.SCOPE_SUBTREE, SEARCHFILTER)
+ topology_st.standalone.log.info("Returned %d entries.\n", len(entries))
assert MAX_OTHERS == len(entries)
- topology.standalone.log.info('%d person entries are successfully created under %s.' % (len(entries), MYSUFFIX))
- topology.standalone.stop(timeout=1)
+ topology_st.standalone.log.info('%d person entries are successfully created under %s.' % (len(entries), MYSUFFIX))
+ topology_st.standalone.stop(timeout=1)
cores = fnmatch.filter(os.listdir(path), 'core.*')
for core in cores:
core = os.path.join(path, core)
- topology.standalone.log.info('cores are %s' % core)
+ topology_st.standalone.log.info('cores are %s' % core)
assert not os.path.isfile(core)
log.info('Testcase PASSED')
diff --git a/dirsrvtests/tests/tickets/ticket48893_test.py b/dirsrvtests/tests/tickets/ticket48893_test.py
index 76d9dc9..12ff3f6 100644
--- a/dirsrvtests/tests/tickets/ticket48893_test.py
+++ b/dirsrvtests/tests/tickets/ticket48893_test.py
@@ -1,73 +1,17 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
-DEBUGGING = False
+DEBUGGING = os.getenv('DEBUGGING', False)
if DEBUGGING:
logging.getLogger(__name__).setLevel(logging.DEBUG)
else:
logging.getLogger(__name__).setLevel(logging.INFO)
-
log = logging.getLogger(__name__)
-class TopologyStandalone(object):
- """The DS Topology Class"""
- def __init__(self, standalone):
- """Init"""
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- """Create DS Deployment"""
-
- # Creating standalone instance ...
- if DEBUGGING:
- standalone = DirSrv(verbose=True)
- else:
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- """If we are debugging just stop the instances, otherwise remove
- them
- """
- if DEBUGGING:
- standalone.stop(60)
- else:
- standalone.delete()
-
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-
def _attr_present(conn):
results = conn.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectClass=*)')
if DEBUGGING:
@@ -76,7 +20,8 @@ def _attr_present(conn):
return True
return False
-def test_ticket48893(topology):
+
+def test_ticket48893(topology_st):
"""
Test that anonymous has NO VIEW to cn=config
"""
@@ -90,7 +35,7 @@ def test_ticket48893(topology):
conn.simple_bind_s()
# Make sure that we cannot see what's in cn=config as anonymous
- assert(not _attr_present(conn))
+ assert (not _attr_present(conn))
conn.unbind_s()
@@ -102,4 +47,3 @@ if __name__ == '__main__':
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
diff --git a/dirsrvtests/tests/tickets/ticket48896_test.py b/dirsrvtests/tests/tickets/ticket48896_test.py
index b2675ec..a170e1c 100644
--- a/dirsrvtests/tests/tickets/ticket48896_test.py
+++ b/dirsrvtests/tests/tickets/ticket48896_test.py
@@ -6,72 +6,22 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
-
CONFIG_DN = 'cn=config'
UID = 'buser123'
TESTDN = 'uid=%s,' % UID + DEFAULT_SUFFIX
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-installation1_prefix = None
-
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- standalone.clearTmpDir(__file__)
-
- return TopologyStandalone(standalone)
-
-def check_attr_val(topology, dn, attr, expected):
+def check_attr_val(topology_st, dn, attr, expected):
try:
- centry = topology.standalone.search_s(dn, ldap.SCOPE_BASE, 'cn=*')
+ centry = topology_st.standalone.search_s(dn, ldap.SCOPE_BASE, 'cn=*')
if centry:
val = centry[0].getValue(attr)
if val == expected:
@@ -86,6 +36,7 @@ def check_attr_val(topology, dn, attr, expected):
log.fatal('Failed to search ' + dn + ': ' + e.message['desc'])
assert False
+
def replace_pw(server, curpw, newpw, expstr, rc):
log.info('Binding as {%s, %s}' % (TESTDN, curpw))
server.simple_bind_s(TESTDN, curpw)
@@ -105,17 +56,18 @@ def replace_pw(server, curpw, newpw, expstr, rc):
log.info('PASSED')
-def test_ticket48896(topology):
+
+def test_ticket48896(topology_st):
"""
"""
log.info('Testing Ticket 48896 - Default Setting for passwordMinTokenLength does not work')
log.info("Setting global password policy with password syntax.")
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordCheckSyntax', 'on'),
- (ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordCheckSyntax', 'on'),
+ (ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])
- config = topology.standalone.search_s(CONFIG_DN, ldap.SCOPE_BASE, 'cn=*')
+ config = topology_st.standalone.search_s(CONFIG_DN, ldap.SCOPE_BASE, 'cn=*')
mintokenlen = config[0].getValue('passwordMinTokenLength')
history = config[0].getValue('passwordInHistory')
@@ -124,58 +76,58 @@ def test_ticket48896(topology):
log.info('Adding a user.')
curpw = 'password'
- topology.standalone.add_s(Entry((TESTDN,
- {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': 'test user',
- 'sn': 'user',
- 'userPassword': curpw})))
+ topology_st.standalone.add_s(Entry((TESTDN,
+ {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': 'test user',
+ 'sn': 'user',
+ 'userPassword': curpw})))
newpw = 'Abcd012+'
exp = 'be ok'
rc = 0
- replace_pw(topology.standalone, curpw, newpw, exp, rc)
+ replace_pw(topology_st.standalone, curpw, newpw, exp, rc)
curpw = 'Abcd012+'
newpw = 'user'
exp = 'fail'
rc = ldap.CONSTRAINT_VIOLATION
- replace_pw(topology.standalone, curpw, newpw, exp, rc)
+ replace_pw(topology_st.standalone, curpw, newpw, exp, rc)
curpw = 'Abcd012+'
newpw = UID
exp = 'fail'
rc = ldap.CONSTRAINT_VIOLATION
- replace_pw(topology.standalone, curpw, newpw, exp, rc)
+ replace_pw(topology_st.standalone, curpw, newpw, exp, rc)
curpw = 'Abcd012+'
newpw = 'Tuse!1234'
exp = 'fail'
rc = ldap.CONSTRAINT_VIOLATION
- replace_pw(topology.standalone, curpw, newpw, exp, rc)
+ replace_pw(topology_st.standalone, curpw, newpw, exp, rc)
curpw = 'Abcd012+'
newpw = 'Tuse!0987'
exp = 'fail'
rc = ldap.CONSTRAINT_VIOLATION
- replace_pw(topology.standalone, curpw, newpw, exp, rc)
+ replace_pw(topology_st.standalone, curpw, newpw, exp, rc)
curpw = 'Abcd012+'
newpw = 'Tabc!1234'
exp = 'fail'
rc = ldap.CONSTRAINT_VIOLATION
- replace_pw(topology.standalone, curpw, newpw, exp, rc)
+ replace_pw(topology_st.standalone, curpw, newpw, exp, rc)
curpw = 'Abcd012+'
newpw = 'Direc+ory389'
exp = 'be ok'
rc = 0
- replace_pw(topology.standalone, curpw, newpw, exp, rc)
+ replace_pw(topology_st.standalone, curpw, newpw, exp, rc)
log.info('SUCCESS')
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
-
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/tickets/ticket48906_test.py b/dirsrvtests/tests/tickets/ticket48906_test.py
index 2c9e6ca..c388feb 100644
--- a/dirsrvtests/tests/tickets/ticket48906_test.py
+++ b/dirsrvtests/tests/tickets/ticket48906_test.py
@@ -6,21 +6,13 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
+import fnmatch
import logging
-import pytest
import shutil
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
+
+import pytest
from lib389.tasks import *
-from ldap.controls import SimplePagedResultsControl
-from ldap.controls.simple import GetEffectiveRightsControl
-import fnmatch
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
@@ -33,64 +25,25 @@ SEARCHFILTER = '(objectclass=person)'
OTHER_NAME = 'other_entry'
MAX_OTHERS = 10
-DBLOCK_DEFAULT="10000"
-DBLOCK_LDAP_UPDATE="20000"
-DBLOCK_EDIT_UPDATE="40000"
-DBLOCK_MIN_UPDATE=DBLOCK_DEFAULT
-DBLOCK_ATTR_CONFIG="nsslapd-db-locks"
-DBLOCK_ATTR_MONITOR="nsslapd-db-configured-locks"
-DBLOCK_ATTR_GUARDIAN="locks"
-
-DBCACHE_DEFAULT="33554432"
-DBCACHE_LDAP_UPDATE="20000000"
-DBCACHE_EDIT_UPDATE="40000000"
-DBCACHE_ATTR_CONFIG="nsslapd-dbcachesize"
-DBCACHE_ATTR_GUARDIAN="cachesize"
+DBLOCK_DEFAULT = "10000"
+DBLOCK_LDAP_UPDATE = "20000"
+DBLOCK_EDIT_UPDATE = "40000"
+DBLOCK_MIN_UPDATE = DBLOCK_DEFAULT
+DBLOCK_ATTR_CONFIG = "nsslapd-db-locks"
+DBLOCK_ATTR_MONITOR = "nsslapd-db-configured-locks"
+DBLOCK_ATTR_GUARDIAN = "locks"
+
+DBCACHE_DEFAULT = "33554432"
+DBCACHE_LDAP_UPDATE = "20000000"
+DBCACHE_EDIT_UPDATE = "40000000"
+DBCACHE_ATTR_CONFIG = "nsslapd-dbcachesize"
+DBCACHE_ATTR_GUARDIAN = "cachesize"
ldbm_config = "cn=config,%s" % (DN_LDBM)
ldbm_monitor = "cn=database,cn=monitor,%s" % (DN_LDBM)
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- '''
- This fixture is used to standalone topology for the 'module'.
- '''
- standalone = DirSrv(verbose=True)
-
- # Args for the standalone instance
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
-
- # Get the status of the instance and restart it if it exists
- instance_standalone = standalone.exists()
-
- # Remove the instance
- if instance_standalone:
- standalone.delete()
-
- # Create the instance
- standalone.create()
-
- # Used to retrieve configuration information (dbdir, confdir...)
- standalone.open()
- # clear the tmp directory
- standalone.clearTmpDir(__file__)
-
- # Here we have standalone instance up and running
- return TopologyStandalone(standalone)
-
-
-def test_ticket48906_setup(topology):
+def test_ticket48906_setup(topology_st):
"""
Check there is no core
Create a second backend
@@ -100,11 +53,11 @@ def test_ticket48906_setup(topology):
log.info('Testing Ticket 48906 - ns-slapd crashes during the shutdown after adding attribute with a matching rule')
# bind as directory manager
- topology.standalone.log.info("Bind as %s" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.log.info("Bind as %s" % DN_DM)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
# check there is no core
- entry = topology.standalone.search_s(CONFIG_DN, ldap.SCOPE_BASE, "(cn=config)",['nsslapd-workingdir'])
+ entry = topology_st.standalone.search_s(CONFIG_DN, ldap.SCOPE_BASE, "(cn=config)", ['nsslapd-workingdir'])
assert entry
assert entry[0]
assert entry[0].hasAttr('nsslapd-workingdir')
@@ -112,59 +65,62 @@ def test_ticket48906_setup(topology):
cores = fnmatch.filter(os.listdir(path), 'core.*')
assert len(cores) == 0
-
# add dummy entries on backend
for cpt in range(MAX_OTHERS):
name = "%s%d" % (OTHER_NAME, cpt)
- topology.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
- 'objectclass': "top person".split(),
- 'sn': name,
- 'cn': name})))
+ topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
+ 'objectclass': "top person".split(),
+ 'sn': name,
+ 'cn': name})))
- topology.standalone.log.info("\n\n######################### SEARCH ALL ######################\n")
- topology.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.log.info("\n\n######################### SEARCH ALL ######################\n")
+ topology_st.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
- entries = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, SEARCHFILTER)
- topology.standalone.log.info("Returned %d entries.\n", len(entries))
+ entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, SEARCHFILTER)
+ topology_st.standalone.log.info("Returned %d entries.\n", len(entries))
assert MAX_OTHERS == len(entries)
- topology.standalone.log.info('%d person entries are successfully created under %s.' % (len(entries), SUFFIX))
+ topology_st.standalone.log.info('%d person entries are successfully created under %s.' % (len(entries), SUFFIX))
+
-def _check_configured_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=None, required=False):
- entries = topology.standalone.search_s(ldbm_config, ldap.SCOPE_BASE, 'cn=config')
+def _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=None, required=False):
+ entries = topology_st.standalone.search_s(ldbm_config, ldap.SCOPE_BASE, 'cn=config')
if required:
- assert(entries[0].hasValue(attr))
+ assert (entries[0].hasValue(attr))
elif entries[0].hasValue(attr):
- assert(entries[0].getValue(attr) == expected_value)
+ assert (entries[0].getValue(attr) == expected_value)
+
-def _check_monitored_value(topology, expected_value):
- entries = topology.standalone.search_s(ldbm_monitor, ldap.SCOPE_BASE, '(objectclass=*)')
- assert(entries[0].hasValue(DBLOCK_ATTR_MONITOR) and entries[0].getValue(DBLOCK_ATTR_MONITOR) == expected_value)
+def _check_monitored_value(topology_st, expected_value):
+ entries = topology_st.standalone.search_s(ldbm_monitor, ldap.SCOPE_BASE, '(objectclass=*)')
+ assert (entries[0].hasValue(DBLOCK_ATTR_MONITOR) and entries[0].getValue(DBLOCK_ATTR_MONITOR) == expected_value)
-def _check_dse_ldif_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE):
- dse_ref_ldif = topology.standalone.confdir + '/dse.ldif'
+
+def _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE):
+ dse_ref_ldif = topology_st.standalone.confdir + '/dse.ldif'
dse_ref = open(dse_ref_ldif, "r")
# Check the DBLOCK in dse.ldif
- value=None
+ value = None
while True:
line = dse_ref.readline()
if (line == ''):
break
elif attr in line.lower():
value = line.split()[1]
- assert(value == expected_value)
+ assert (value == expected_value)
break
- assert(value)
+ assert (value)
+
-def _check_guardian_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=None):
- guardian_file = os.path.join(topology.standalone.dbdir, 'guardian')
- assert(os.path.exists(guardian_file))
+def _check_guardian_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=None):
+ guardian_file = os.path.join(topology_st.standalone.dbdir, 'guardian')
+ assert (os.path.exists(guardian_file))
guardian = open(guardian_file, "r")
- value=None
+ value = None
while True:
line = guardian.readline()
if (line == ''):
@@ -177,181 +133,164 @@ def _check_guardian_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=None
print(expected_value)
print("value")
print(value)
- assert(str(value) == str(expected_value))
+ assert (str(value) == str(expected_value))
break
- assert(value)
-
-def test_ticket48906_dblock_default(topology):
- topology.standalone.log.info('###################################')
- topology.standalone.log.info('###')
- topology.standalone.log.info('### Check that before any change config/monitor')
- topology.standalone.log.info('### contains the default value')
- topology.standalone.log.info('###')
- topology.standalone.log.info('###################################')
- _check_monitored_value(topology, DBLOCK_DEFAULT)
- _check_configured_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_DEFAULT, required=False)
- _check_configured_value(topology, attr=DBCACHE_ATTR_CONFIG, expected_value=DBCACHE_DEFAULT, required=False)
-
-
-def test_ticket48906_dblock_ldap_update(topology):
- topology.standalone.log.info('###################################')
- topology.standalone.log.info('###')
- topology.standalone.log.info('### Check that after ldap update')
- topology.standalone.log.info('### - monitor contains DEFAULT')
- topology.standalone.log.info('### - configured contains DBLOCK_LDAP_UPDATE')
- topology.standalone.log.info('### - After stop dse.ldif contains DBLOCK_LDAP_UPDATE')
- topology.standalone.log.info('### - After stop guardian contains DEFAULT')
- topology.standalone.log.info('### In fact guardian should differ from config to recreate the env')
- topology.standalone.log.info('### Check that after restart (DBenv recreated)')
- topology.standalone.log.info('### - monitor contains DBLOCK_LDAP_UPDATE ')
- topology.standalone.log.info('### - configured contains DBLOCK_LDAP_UPDATE')
- topology.standalone.log.info('### - dse.ldif contains DBLOCK_LDAP_UPDATE')
- topology.standalone.log.info('###')
- topology.standalone.log.info('###################################')
-
- topology.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, DBLOCK_LDAP_UPDATE)])
- _check_monitored_value(topology, DBLOCK_DEFAULT)
- _check_configured_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True)
-
- topology.standalone.stop(timeout=10)
- _check_dse_ldif_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE)
- _check_guardian_value(topology, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_DEFAULT)
+ assert (value)
+
+
+def test_ticket48906_dblock_default(topology_st):
+ topology_st.standalone.log.info('###################################')
+ topology_st.standalone.log.info('###')
+ topology_st.standalone.log.info('### Check that before any change config/monitor')
+ topology_st.standalone.log.info('### contains the default value')
+ topology_st.standalone.log.info('###')
+ topology_st.standalone.log.info('###################################')
+ _check_monitored_value(topology_st, DBLOCK_DEFAULT)
+ _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_DEFAULT, required=False)
+ _check_configured_value(topology_st, attr=DBCACHE_ATTR_CONFIG, expected_value=DBCACHE_DEFAULT, required=False)
+
+
+def test_ticket48906_dblock_ldap_update(topology_st):
+ topology_st.standalone.log.info('###################################')
+ topology_st.standalone.log.info('###')
+ topology_st.standalone.log.info('### Check that after ldap update')
+ topology_st.standalone.log.info('### - monitor contains DEFAULT')
+ topology_st.standalone.log.info('### - configured contains DBLOCK_LDAP_UPDATE')
+ topology_st.standalone.log.info('### - After stop dse.ldif contains DBLOCK_LDAP_UPDATE')
+ topology_st.standalone.log.info('### - After stop guardian contains DEFAULT')
+ topology_st.standalone.log.info('### In fact guardian should differ from config to recreate the env')
+ topology_st.standalone.log.info('### Check that after restart (DBenv recreated)')
+ topology_st.standalone.log.info('### - monitor contains DBLOCK_LDAP_UPDATE ')
+ topology_st.standalone.log.info('### - configured contains DBLOCK_LDAP_UPDATE')
+ topology_st.standalone.log.info('### - dse.ldif contains DBLOCK_LDAP_UPDATE')
+ topology_st.standalone.log.info('###')
+ topology_st.standalone.log.info('###################################')
+
+ topology_st.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, DBLOCK_LDAP_UPDATE)])
+ _check_monitored_value(topology_st, DBLOCK_DEFAULT)
+ _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True)
+
+ topology_st.standalone.stop(timeout=10)
+ _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE)
+ _check_guardian_value(topology_st, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_DEFAULT)
# Check that the value is the same after restart and recreate
- topology.standalone.start(timeout=10)
- _check_monitored_value(topology, DBLOCK_LDAP_UPDATE)
- _check_configured_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True)
- _check_dse_ldif_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE)
-
-def test_ticket48906_dblock_edit_update(topology):
- topology.standalone.log.info('###################################')
- topology.standalone.log.info('###')
- topology.standalone.log.info('### Check that after stop')
- topology.standalone.log.info('### - dse.ldif contains DBLOCK_LDAP_UPDATE')
- topology.standalone.log.info('### - guardian contains DBLOCK_LDAP_UPDATE')
- topology.standalone.log.info('### Check that edit dse+restart')
- topology.standalone.log.info('### - monitor contains DBLOCK_EDIT_UPDATE')
- topology.standalone.log.info('### - configured contains DBLOCK_EDIT_UPDATE')
- topology.standalone.log.info('### Check that after stop')
- topology.standalone.log.info('### - dse.ldif contains DBLOCK_EDIT_UPDATE')
- topology.standalone.log.info('### - guardian contains DBLOCK_EDIT_UPDATE')
- topology.standalone.log.info('###')
- topology.standalone.log.info('###################################')
-
- topology.standalone.stop(timeout=10)
- _check_dse_ldif_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE)
- _check_guardian_value(topology, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_LDAP_UPDATE)
-
- dse_ref_ldif = topology.standalone.confdir + '/dse.ldif'
- dse_new_ldif = topology.standalone.confdir + '/dse.ldif.new'
+ topology_st.standalone.start(timeout=10)
+ _check_monitored_value(topology_st, DBLOCK_LDAP_UPDATE)
+ _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True)
+ _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE)
+
+
+def test_ticket48906_dblock_edit_update(topology_st):
+ topology_st.standalone.log.info('###################################')
+ topology_st.standalone.log.info('###')
+ topology_st.standalone.log.info('### Check that after stop')
+ topology_st.standalone.log.info('### - dse.ldif contains DBLOCK_LDAP_UPDATE')
+ topology_st.standalone.log.info('### - guardian contains DBLOCK_LDAP_UPDATE')
+ topology_st.standalone.log.info('### Check that edit dse+restart')
+ topology_st.standalone.log.info('### - monitor contains DBLOCK_EDIT_UPDATE')
+ topology_st.standalone.log.info('### - configured contains DBLOCK_EDIT_UPDATE')
+ topology_st.standalone.log.info('### Check that after stop')
+ topology_st.standalone.log.info('### - dse.ldif contains DBLOCK_EDIT_UPDATE')
+ topology_st.standalone.log.info('### - guardian contains DBLOCK_EDIT_UPDATE')
+ topology_st.standalone.log.info('###')
+ topology_st.standalone.log.info('###################################')
+
+ topology_st.standalone.stop(timeout=10)
+ _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE)
+ _check_guardian_value(topology_st, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_LDAP_UPDATE)
+
+ dse_ref_ldif = topology_st.standalone.confdir + '/dse.ldif'
+ dse_new_ldif = topology_st.standalone.confdir + '/dse.ldif.new'
dse_ref = open(dse_ref_ldif, "r")
dse_new = open(dse_new_ldif, "w")
# Change the DBLOCK in dse.ldif
- value=None
+ value = None
while True:
line = dse_ref.readline()
if (line == ''):
break
elif DBLOCK_ATTR_CONFIG in line.lower():
value = line.split()[1]
- assert(value == DBLOCK_LDAP_UPDATE)
+ assert (value == DBLOCK_LDAP_UPDATE)
new_value = [line.split()[0], DBLOCK_EDIT_UPDATE, ]
new_line = "%s\n" % " ".join(new_value)
else:
new_line = line
dse_new.write(new_line)
- assert(value)
+ assert (value)
dse_ref.close()
dse_new.close()
shutil.move(dse_new_ldif, dse_ref_ldif)
# Check that the value is the same after restart
- topology.standalone.start(timeout=10)
- _check_monitored_value(topology, DBLOCK_EDIT_UPDATE)
- _check_configured_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_EDIT_UPDATE, required=True)
-
- topology.standalone.stop(timeout=10)
- _check_dse_ldif_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_EDIT_UPDATE)
- _check_guardian_value(topology, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_EDIT_UPDATE)
-
-def test_ticket48906_dblock_robust(topology):
- topology.standalone.log.info('###################################')
- topology.standalone.log.info('###')
- topology.standalone.log.info('### Check that the following values are rejected')
- topology.standalone.log.info('### - negative value')
- topology.standalone.log.info('### - insuffisant value')
- topology.standalone.log.info('### - invalid value')
- topology.standalone.log.info('### Check that minimum value is accepted')
- topology.standalone.log.info('###')
- topology.standalone.log.info('###################################')
-
- topology.standalone.start(timeout=10)
- _check_monitored_value(topology, DBLOCK_EDIT_UPDATE)
- _check_configured_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_EDIT_UPDATE, required=True)
+ topology_st.standalone.start(timeout=10)
+ _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE)
+ _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_EDIT_UPDATE, required=True)
+
+ topology_st.standalone.stop(timeout=10)
+ _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_EDIT_UPDATE)
+ _check_guardian_value(topology_st, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_EDIT_UPDATE)
+
+
+def test_ticket48906_dblock_robust(topology_st):
+ topology_st.standalone.log.info('###################################')
+ topology_st.standalone.log.info('###')
+ topology_st.standalone.log.info('### Check that the following values are rejected')
+ topology_st.standalone.log.info('### - negative value')
+ topology_st.standalone.log.info('### - insuffisant value')
+ topology_st.standalone.log.info('### - invalid value')
+ topology_st.standalone.log.info('### Check that minimum value is accepted')
+ topology_st.standalone.log.info('###')
+ topology_st.standalone.log.info('###################################')
+
+ topology_st.standalone.start(timeout=10)
+ _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE)
+ _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_EDIT_UPDATE, required=True)
# Check negative value
try:
- topology.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, "-1")])
+ topology_st.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, "-1")])
except ldap.UNWILLING_TO_PERFORM:
pass
- _check_monitored_value(topology, DBLOCK_EDIT_UPDATE)
- _check_configured_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True)
+ _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE)
+ _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True)
# Check insuffisant value
too_small = int(DBLOCK_MIN_UPDATE) - 1
try:
- topology.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, str(too_small))])
+ topology_st.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, str(too_small))])
except ldap.UNWILLING_TO_PERFORM:
pass
- _check_monitored_value(topology, DBLOCK_EDIT_UPDATE)
- _check_configured_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True)
+ _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE)
+ _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True)
# Check invalid value
try:
- topology.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, "dummy")])
+ topology_st.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, "dummy")])
except ldap.UNWILLING_TO_PERFORM:
pass
- _check_monitored_value(topology, DBLOCK_EDIT_UPDATE)
- _check_configured_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True)
-
- #now check the minimal value
- topology.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, DBLOCK_MIN_UPDATE)])
- _check_monitored_value(topology, DBLOCK_EDIT_UPDATE)
- _check_configured_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_MIN_UPDATE, required=True)
-
- topology.standalone.stop(timeout=10)
- _check_dse_ldif_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_MIN_UPDATE)
- _check_guardian_value(topology, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_EDIT_UPDATE)
-
- topology.standalone.start(timeout=10)
- _check_monitored_value(topology, DBLOCK_MIN_UPDATE)
- _check_configured_value(topology, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_MIN_UPDATE, required=True)
-
-def text_ticket48906_final(topology):
- topology.standalone.delete()
- log.info('Testcase PASSED')
-
-
-def run_isolated():
- '''
- run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
- To run isolated without py.test, you need to
- - edit this file and comment '@pytest.fixture' line before 'topology' function.
- - set the installation prefix
- - run this program
- '''
- topo = topology(True)
- test_ticket48906_setup(topo)
- test_ticket48906_dblock_default(topo)
- test_ticket48906_dblock_ldap_update(topo)
- test_ticket48906_dblock_edit_update(topo)
- test_ticket48906_dblock_robust(topo)
- test_ticket48906_final(topo)
+ _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE)
+ _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True)
+ # now check the minimal value
+ topology_st.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, DBLOCK_MIN_UPDATE)])
+ _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE)
+ _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_MIN_UPDATE, required=True)
-if __name__ == '__main__':
- run_isolated()
+ topology_st.standalone.stop(timeout=10)
+ _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_MIN_UPDATE)
+ _check_guardian_value(topology_st, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_EDIT_UPDATE)
+ topology_st.standalone.start(timeout=10)
+ _check_monitored_value(topology_st, DBLOCK_MIN_UPDATE)
+ _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_MIN_UPDATE, required=True)
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/tickets/ticket48916_test.py b/dirsrvtests/tests/tickets/ticket48916_test.py
index 299b224..207b355 100644
--- a/dirsrvtests/tests/tickets/ticket48916_test.py
+++ b/dirsrvtests/tests/tickets/ticket48916_test.py
@@ -1,161 +1,40 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_m2
-DEBUGGING = False
+DEBUGGING = os.getenv('DEBUGGING', False)
if DEBUGGING:
logging.getLogger(__name__).setLevel(logging.DEBUG)
else:
logging.getLogger(__name__).setLevel(logging.INFO)
-
log = logging.getLogger(__name__)
-class TopologyReplication(object):
- """The Replication Topology Class"""
- def __init__(self, master1, master2):
- """Init"""
- master1.open()
- self.master1 = master1
- master2.open()
- self.master2 = master2
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- """Create Replication Deployment"""
-
- # Creating master 1...
- if DEBUGGING:
- master1 = DirSrv(verbose=True)
- else:
- master1 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master1.allocate(args_master)
- instance_master1 = master1.exists()
- if instance_master1:
- master1.delete()
- master1.create()
- master1.open()
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-
- # Creating master 2...
- if DEBUGGING:
- master2 = DirSrv(verbose=True)
- else:
- master2 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master2.allocate(args_master)
- instance_master2 = master2.exists()
- if instance_master2:
- master2.delete()
- master2.create()
- master2.open()
- master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
- #
- # Create all the agreements
- #
- # Creating agreement from master 1 to master 2
- properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
- if not m1_m2_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m1_m2_agmt)
-
- # Creating agreement from master 2 to master 1
- properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
- if not m2_m1_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m2_m1_agmt)
-
- # Allow the replicas to get situated with the new agreements...
- time.sleep(5)
-
- #
- # Initialize all the agreements
- #
- master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- master1.waitForReplInit(m1_m2_agmt)
-
- # Check replication is working...
- if master1.testReplication(DEFAULT_SUFFIX, master2):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- def fin():
- """If we are debugging just stop the instances, otherwise remove
- them
- """
- if DEBUGGING:
- master1.stop()
- master2.stop()
- else:
- master1.delete()
- master2.delete()
-
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- master1.clearTmpDir(__file__)
-
- return TopologyReplication(master1, master2)
-
-
def _create_user(inst, idnum):
inst.add_s(Entry(
('uid=user%s,ou=People,%s' % (idnum, DEFAULT_SUFFIX), {
- 'objectClass' : 'top account posixAccount'.split(' '),
- 'cn' : 'user',
- 'uid' : 'user%s' % idnum,
- 'homeDirectory' : '/home/user%s' % idnum,
- 'loginShell' : '/bin/nologin',
- 'gidNumber' : '-1',
- 'uidNumber' : '-1',
+ 'objectClass': 'top account posixAccount'.split(' '),
+ 'cn': 'user',
+ 'uid': 'user%s' % idnum,
+ 'homeDirectory': '/home/user%s' % idnum,
+ 'loginShell': '/bin/nologin',
+ 'gidNumber': '-1',
+ 'uidNumber': '-1',
})
))
-def test_ticket48916(topology):
+def test_ticket48916(topology_m2):
"""
https://bugzilla.redhat.com/show_bug.cgi?id=1353629
This is an issue with ID exhaustion in DNA causing a crash.
- To access each DirSrv instance use: topology.master1, topology.master2,
- ..., topology.hub1, ..., topology.consumer1,...
+ To access each DirSrv instance use: topology_m2.ms["master1"], topology_m2.ms["master2"],
+ ..., topology_m2.hub1, ..., topology_m2.consumer1,...
"""
@@ -166,16 +45,16 @@ def test_ticket48916(topology):
# Enable the plugin on both servers
- dna_m1 = topology.master1.plugins.get('Distributed Numeric Assignment Plugin')
- dna_m2 = topology.master2.plugins.get('Distributed Numeric Assignment Plugin')
+ dna_m1 = topology_m2.ms["master1"].plugins.get('Distributed Numeric Assignment Plugin')
+ dna_m2 = topology_m2.ms["master2"].plugins.get('Distributed Numeric Assignment Plugin')
# Configure it
# Create the container for the ranges to go into.
- topology.master1.add_s(Entry(
+ topology_m2.ms["master1"].add_s(Entry(
('ou=Ranges,%s' % DEFAULT_SUFFIX, {
- 'objectClass' : 'top organizationalUnit'.split(' '),
- 'ou' : 'Ranges',
+ 'objectClass': 'top organizationalUnit'.split(' '),
+ 'ou': 'Ranges',
})
))
@@ -186,9 +65,9 @@ def test_ticket48916(topology):
config_dn = dna_m1.dn
- topology.master1.add_s(Entry(
+ topology_m2.ms["master1"].add_s(Entry(
('cn=uids,%s' % config_dn, {
- 'objectClass' : 'top dnaPluginConfig'.split(' '),
+ 'objectClass': 'top dnaPluginConfig'.split(' '),
'cn': 'uids',
'dnatype': 'uidNumber gidNumber'.split(' '),
'dnafilter': '(objectclass=posixAccount)',
@@ -205,9 +84,9 @@ def test_ticket48916(topology):
})
))
- topology.master2.add_s(Entry(
+ topology_m2.ms["master2"].add_s(Entry(
('cn=uids,%s' % config_dn, {
- 'objectClass' : 'top dnaPluginConfig'.split(' '),
+ 'objectClass': 'top dnaPluginConfig'.split(' '),
'cn': 'uids',
'dnatype': 'uidNumber gidNumber'.split(' '),
'dnafilter': '(objectclass=posixAccount)',
@@ -228,8 +107,8 @@ def test_ticket48916(topology):
dna_m2.enable()
# Restart the instances
- topology.master1.restart(60)
- topology.master2.restart(60)
+ topology_m2.ms["master1"].restart(60)
+ topology_m2.ms["master2"].restart(60)
# Wait for a replication .....
time.sleep(40)
@@ -237,10 +116,10 @@ def test_ticket48916(topology):
# Allocate the 10 members to exhaust
for i in range(1, 11):
- _create_user(topology.master2, i)
+ _create_user(topology_m2.ms["master2"], i)
# Allocate the 11th
- _create_user(topology.master2, 11)
+ _create_user(topology_m2.ms["master2"], 11)
log.info('Test PASSED')
@@ -250,4 +129,3 @@ if __name__ == '__main__':
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
diff --git a/dirsrvtests/tests/tickets/ticket48956_test.py b/dirsrvtests/tests/tickets/ticket48956_test.py
index 291dd4e..b1200be 100644
--- a/dirsrvtests/tests/tickets/ticket48956_test.py
+++ b/dirsrvtests/tests/tickets/ticket48956_test.py
@@ -1,29 +1,17 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-import subprocess
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
-
-DEBUGGING = False
+DEBUGGING = os.getenv('DEBUGGING', False)
RDN_LONG_SUFFIX = 'this'
LONG_SUFFIX = "dc=%s,dc=is,dc=a,dc=very,dc=long,dc=suffix,dc=so,dc=long,dc=suffix,dc=extremely,dc=long,dc=suffix" % RDN_LONG_SUFFIX
LONG_SUFFIX_BE = 'ticket48956'
-
ACCT_POLICY_PLUGIN_DN = 'cn=%s,cn=plugins,cn=config' % PLUGIN_ACCT_POLICY
ACCT_POLICY_CONFIG_DN = 'cn=config,%s' % ACCT_POLICY_PLUGIN_DN
-
INACTIVITY_LIMIT = '9'
SEARCHFILTER = '(objectclass=*)'
@@ -34,52 +22,15 @@ if DEBUGGING:
logging.getLogger(__name__).setLevel(logging.DEBUG)
else:
logging.getLogger(__name__).setLevel(logging.INFO)
-log = logging.getLogger(__name__)
-
-class TopologyStandalone(object):
- """The DS Topology Class"""
- def __init__(self, standalone):
- """Init"""
- standalone.open()
- self.standalone = standalone
+log = logging.getLogger(__name__)
-(a)pytest.fixture(scope="module")
-def topology(request):
- """Create DS Deployment"""
-
- # Creating standalone instance ...
- if DEBUGGING:
- standalone = DirSrv(verbose=True)
- else:
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- """If we are debugging just stop the instances, otherwise remove them
- """
- if DEBUGGING:
- standalone.stop()
- else:
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
-
-def _check_status(topology, user, expected):
- nsaccountstatus = '%s/sbin/ns-accountstatus.pl' % topology.standalone.prefix
- proc = subprocess.Popen([nsaccountstatus, '-Z', 'standalone', '-D', DN_DM, '-w', PASSWORD, '-p', str(topology.standalone.port), '-I', user], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+def _check_status(topology_st, user, expected):
+ nsaccountstatus = '%s/sbin/ns-accountstatus.pl' % topology_st.standalone.prefix
+ proc = subprocess.Popen(
+ [nsaccountstatus, '-Z', 'standalone', '-D', DN_DM, '-w', PASSWORD, '-p', str(topology_st.standalone.port), '-I',
+ user], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
found = False
while True:
@@ -92,34 +43,37 @@ def _check_status(topology, user, expected):
break
return found
-def _check_inactivity(topology, mysuffix):
+
+def _check_inactivity(topology_st, mysuffix):
ACCT_POLICY_DN = 'cn=Account Inactivation Policy,%s' % mysuffix
log.info("\n######################### Adding Account Policy entry: %s ######################\n" % ACCT_POLICY_DN)
- topology.standalone.add_s(Entry((ACCT_POLICY_DN, {'objectclass': "top ldapsubentry extensibleObject accountpolicy".split(),
- 'accountInactivityLimit': INACTIVITY_LIMIT})))
+ topology_st.standalone.add_s(
+ Entry((ACCT_POLICY_DN, {'objectclass': "top ldapsubentry extensibleObject accountpolicy".split(),
+ 'accountInactivityLimit': INACTIVITY_LIMIT})))
TEST_USER_DN = 'uid=%s,%s' % (TEST_USER, mysuffix)
log.info("\n######################### Adding Test User entry: %s ######################\n" % TEST_USER_DN)
- topology.standalone.add_s(Entry((TEST_USER_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
- 'cn': TEST_USER,
- 'sn': TEST_USER,
- 'givenname': TEST_USER,
- 'userPassword': TEST_USER_PW,
- 'acctPolicySubentry': ACCT_POLICY_DN})))
+ topology_st.standalone.add_s(
+ Entry((TEST_USER_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': TEST_USER,
+ 'sn': TEST_USER,
+ 'givenname': TEST_USER,
+ 'userPassword': TEST_USER_PW,
+ 'acctPolicySubentry': ACCT_POLICY_DN})))
# Setting the lastLoginTime
try:
- topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
+ topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
except ldap.CONSTRAINT_VIOLATION as e:
log.error('CONSTRAINT VIOLATION ' + e.message['desc'])
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
- assert(_check_status(topology, TEST_USER_DN, '- activated'))
+ assert (_check_status(topology_st, TEST_USER_DN, '- activated'))
time.sleep(int(INACTIVITY_LIMIT) + 5)
- assert(_check_status(topology, TEST_USER_DN, '- inactivated (inactivity limit exceeded'))
+ assert (_check_status(topology_st, TEST_USER_DN, '- inactivated (inactivity limit exceeded'))
-def test_ticket48956(topology):
+
+def test_ticket48956(topology_st):
"""Write your testcase here...
Also, if you need any testcase initialization,
@@ -127,30 +81,31 @@ def test_ticket48956(topology):
"""
- topology.standalone.modify_s(ACCT_POLICY_PLUGIN_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', ACCT_POLICY_CONFIG_DN)])
+ topology_st.standalone.modify_s(ACCT_POLICY_PLUGIN_DN,
+ [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', ACCT_POLICY_CONFIG_DN)])
- topology.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'),
- (ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime'),
- (ldap.MOD_REPLACE, 'altstateattrname', 'createTimestamp'),
- (ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry'),
- (ldap.MOD_REPLACE, 'limitattrname', 'accountInactivityLimit')])
+ topology_st.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'),
+ (ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime'),
+ (ldap.MOD_REPLACE, 'altstateattrname', 'createTimestamp'),
+ (ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry'),
+ (ldap.MOD_REPLACE, 'limitattrname',
+ 'accountInactivityLimit')])
# Enable the plugins
- topology.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY)
+ topology_st.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY)
- topology.standalone.restart(timeout=10)
+ topology_st.standalone.restart(timeout=10)
# Check inactivity on standard suffix (short)
- _check_inactivity(topology, SUFFIX)
+ _check_inactivity(topology_st, SUFFIX)
# Check inactivity on a long suffix
- topology.standalone.backend.create(LONG_SUFFIX, {BACKEND_NAME: LONG_SUFFIX_BE})
- topology.standalone.mappingtree.create(LONG_SUFFIX, bename=LONG_SUFFIX_BE)
- topology.standalone.add_s(Entry((LONG_SUFFIX, {
- 'objectclass': "top domain".split(),
- 'dc': RDN_LONG_SUFFIX})))
- _check_inactivity(topology, LONG_SUFFIX)
-
+ topology_st.standalone.backend.create(LONG_SUFFIX, {BACKEND_NAME: LONG_SUFFIX_BE})
+ topology_st.standalone.mappingtree.create(LONG_SUFFIX, bename=LONG_SUFFIX_BE)
+ topology_st.standalone.add_s(Entry((LONG_SUFFIX, {
+ 'objectclass': "top domain".split(),
+ 'dc': RDN_LONG_SUFFIX})))
+ _check_inactivity(topology_st, LONG_SUFFIX)
if DEBUGGING:
# Add debugging steps(if any)...
@@ -164,4 +119,3 @@ if __name__ == '__main__':
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
diff --git a/dirsrvtests/tests/tickets/ticket48961_test.py b/dirsrvtests/tests/tickets/ticket48961_test.py
index ed5a137..2d92ada 100644
--- a/dirsrvtests/tests/tickets/ticket48961_test.py
+++ b/dirsrvtests/tests/tickets/ticket48961_test.py
@@ -1,88 +1,41 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
-DEBUGGING = False
+DEBUGGING = os.getenv('DEBUGGING', False)
if DEBUGGING:
logging.getLogger(__name__).setLevel(logging.DEBUG)
else:
logging.getLogger(__name__).setLevel(logging.INFO)
-log = logging.getLogger(__name__)
-
-
-class TopologyStandalone(object):
- """The DS Topology Class"""
- def __init__(self, standalone):
- """Init"""
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- """Create DS Deployment"""
-
- # Creating standalone instance ...
- if DEBUGGING:
- standalone = DirSrv(verbose=True)
- else:
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- def fin():
- """If we are debugging just stop the instances, otherwise remove them
- """
- if DEBUGGING:
- standalone.stop()
- else:
- standalone.delete()
- request.addfinalizer(fin)
- return TopologyStandalone(standalone)
+log = logging.getLogger(__name__)
-def test_ticket48961_storagescheme(topology):
+def test_ticket48961_storagescheme(topology_st):
"""
Test deleting of the storage scheme.
"""
- default = topology.standalone.config.get_attr_val('passwordStorageScheme')
+ default = topology_st.standalone.config.get_attr_val('passwordStorageScheme')
# Change it
- topology.standalone.config.set('passwordStorageScheme', 'CLEAR')
+ topology_st.standalone.config.set('passwordStorageScheme', 'CLEAR')
# Now delete it
- topology.standalone.config.remove('passwordStorageScheme', None)
+ topology_st.standalone.config.remove('passwordStorageScheme', None)
# Now check it's been reset.
- assert(default == topology.standalone.config.get_attr_val('passwordStorageScheme'))
+ assert (default == topology_st.standalone.config.get_attr_val('passwordStorageScheme'))
log.info(default)
log.info('Test PASSED')
+
def _reset_config_value(inst, attrname):
# None to value here means remove all instances of the attr.
inst.config.remove(attrname, None)
newval = inst.config.get_attr_val(attrname)
log.info("Reset %s to %s" % (attrname, newval))
-def test_ticket48961_deleteall(topology):
+
+def test_ticket48961_deleteall(topology_st):
"""
Test that we can delete all valid attrs, and that a few are rejected.
"""
@@ -90,12 +43,12 @@ def test_ticket48961_deleteall(topology):
'nsslapd-listenhost': 'localhost',
'nsslapd-securelistenhost': 'localhost',
'nsslapd-allowed-sasl-mechanisms': 'GSSAPI',
- 'nsslapd-svrtab': 'Some bogus data', # This one could reset?
+ 'nsslapd-svrtab': 'Some bogus data', # This one could reset?
}
attr_to_fail = {
# These are the values that should always be dn dse.ldif too
'nsslapd-localuser': 'dirsrv',
- 'nsslapd-defaultnamingcontext': 'dc=example,dc=com', # Can't delete
+ 'nsslapd-defaultnamingcontext': 'dc=example,dc=com', # Can't delete
'nsslapd-accesslog': '/opt/dirsrv/var/log/dirsrv/slapd-standalone/access',
'nsslapd-auditlog': '/opt/dirsrv/var/log/dirsrv/slapd-standalone/audit',
'nsslapd-errorlog': '/opt/dirsrv/var/log/dirsrv/slapd-standalone/errors',
@@ -111,17 +64,17 @@ def test_ticket48961_deleteall(topology):
'nsslapd-localhost': 'localhost.localdomain',
# These can't be reset, but might be in dse.ldif. Probably in libglobs.
'nsslapd-certmap-basedn': 'cn=certmap,cn=config',
- 'nsslapd-port': '38931', # Can't delete
- 'nsslapd-secureport': '636', # Can't delete
+ 'nsslapd-port': '38931', # Can't delete
+ 'nsslapd-secureport': '636', # Can't delete
'nsslapd-conntablesize': '1048576',
'nsslapd-rootpw': '{SSHA512}...',
# These are hardcoded server magic.
- 'nsslapd-hash-filters': 'off', # Can't delete
- 'nsslapd-requiresrestart': 'cn=config:nsslapd-port', # Can't change
- 'nsslapd-plugin': 'cn=case ignore string syntax,cn=plugins,cn=config', # Can't change
- 'nsslapd-privatenamespaces': 'cn=schema', # Can't change
- 'nsslapd-allowed-to-delete-attrs': 'None', # Can't delete
- 'nsslapd-accesslog-list': 'List!', # Can't delete
+ 'nsslapd-hash-filters': 'off', # Can't delete
+ 'nsslapd-requiresrestart': 'cn=config:nsslapd-port', # Can't change
+ 'nsslapd-plugin': 'cn=case ignore string syntax,cn=plugins,cn=config', # Can't change
+ 'nsslapd-privatenamespaces': 'cn=schema', # Can't change
+ 'nsslapd-allowed-to-delete-attrs': 'None', # Can't delete
+ 'nsslapd-accesslog-list': 'List!', # Can't delete
'nsslapd-auditfaillog-list': 'List!',
'nsslapd-auditlog-list': 'List!',
'nsslapd-errorlog-list': 'List!',
@@ -130,10 +83,10 @@ def test_ticket48961_deleteall(topology):
'objectclass': '',
'cn': '',
# These are the odd values
- 'nsslapd-backendconfig': 'cn=config,cn=userRoot,cn=ldbm database,cn=plugins,cn=config', # Doesn't exist?
- 'nsslapd-betype': 'ldbm database', # Doesn't exist?
- 'nsslapd-connection-buffer': 1, # Has an ldap problem
- 'nsslapd-malloc-mmap-threshold': '-10', # Defunct anyway
+ 'nsslapd-backendconfig': 'cn=config,cn=userRoot,cn=ldbm database,cn=plugins,cn=config', # Doesn't exist?
+ 'nsslapd-betype': 'ldbm database', # Doesn't exist?
+ 'nsslapd-connection-buffer': 1, # Has an ldap problem
+ 'nsslapd-malloc-mmap-threshold': '-10', # Defunct anyway
'nsslapd-malloc-mxfast': '-10',
'nsslapd-malloc-trim-threshold': '-10',
'nsslapd-referralmode': '',
@@ -141,7 +94,7 @@ def test_ticket48961_deleteall(topology):
'passwordadmindn': '',
}
- config_entry = topology.standalone.config.raw_entry()
+ config_entry = topology_st.standalone.config.raw_entry()
for attr in config_entry.getAttrs():
if attr.lower() in attr_to_fail:
@@ -151,23 +104,23 @@ def test_ticket48961_deleteall(topology):
log.info("Reseting %s" % (attr))
# Check if we have to do some override of this attr.
# Some attributes need specific syntax, so we override just these.
- newval = topology.standalone.config.get_attr_vals(attr)
+ newval = topology_st.standalone.config.get_attr_vals(attr)
log.info(" --> %s" % newval)
if attr.lower() in attr_to_test:
newval = attr_to_test[attr]
log.info("override --> %s" % newval)
# We need to set the attr to its own value
# so that it's "written".
- topology.standalone.config.set(attr, newval)
+ topology_st.standalone.config.set(attr, newval)
# Now we can really reset
- _reset_config_value(topology.standalone, attr)
+ _reset_config_value(topology_st.standalone, attr)
for attr in sorted(attr_to_fail):
log.info("Removing %s" % attr)
try:
- _reset_config_value(topology.standalone, attr)
+ _reset_config_value(topology_st.standalone, attr)
# Shouldn't reach here, the reset should fail!
- assert(False)
+ assert (False)
except ldap.UNWILLING_TO_PERFORM:
log.info('Change was rejected')
except ldap.OPERATIONS_ERROR:
@@ -178,12 +131,11 @@ def test_ticket48961_deleteall(topology):
log.info("This attribute isn't part of cn=config, so is already default!")
pass
+ topology_st.standalone.restart()
- topology.standalone.restart()
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
diff --git a/dirsrvtests/tests/tickets/ticket49073_test.py b/dirsrvtests/tests/tickets/ticket49073_test.py
index 0c594a9..79e98e0 100644
--- a/dirsrvtests/tests/tickets/ticket49073_test.py
+++ b/dirsrvtests/tests/tickets/ticket49073_test.py
@@ -1,17 +1,9 @@
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_m2
-DEBUGGING = False
+DEBUGGING = os.getenv('DEBUGGING', False)
GROUP_DN = ("cn=group," + DEFAULT_SUFFIX)
if DEBUGGING:
@@ -21,120 +13,12 @@ else:
log = logging.getLogger(__name__)
-class TopologyReplication(object):
- def __init__(self, master1, master2):
- master1.open()
- self.master1 = master1
- master2.open()
- self.master2 = master2
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- """Create Replication Deployment"""
-
- # Creating master 1...
- if DEBUGGING:
- master1 = DirSrv(verbose=True)
- else:
- master1 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master1.allocate(args_master)
- instance_master1 = master1.exists()
- if instance_master1:
- master1.delete()
- master1.create()
- master1.open()
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-
- # Creating master 2...
- if DEBUGGING:
- master2 = DirSrv(verbose=True)
- else:
- master2 = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master2.allocate(args_master)
- instance_master2 = master2.exists()
- if instance_master2:
- master2.delete()
- master2.create()
- master2.open()
- master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
- def fin():
- """If we are debugging just stop the instances,
- otherwise remove them
- """
-
- if DEBUGGING:
- master1.stop()
- master2.stop()
- else:
- #master1.delete()
- #master2.delete()
- pass
-
- request.addfinalizer(fin)
-
- # Create all the agreements
-
- # Creating agreement from master 1 to master 2
- properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
- if not m1_m2_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m1_m2_agmt)
-
- # Creating agreement from master 2 to master 1
- properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
- if not m2_m1_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m2_m1_agmt)
-
- # Allow the replicas to get situated with the new agreements...
- time.sleep(5)
-
- # Initialize all the agreements
- master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- master1.waitForReplInit(m1_m2_agmt)
-
- # Check replication is working...
- if master1.testReplication(DEFAULT_SUFFIX, master2):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- # Clear out the tmp dir
- master1.clearTmpDir(__file__)
-
- return TopologyReplication(master1, master2)
-
-def _add_group_with_members(topology):
+def _add_group_with_members(topology_m2):
# Create group
try:
- topology.master1.add_s(Entry((GROUP_DN,
- {'objectclass': 'top groupofnames'.split(),
- 'cn': 'group'})))
+ topology_m2.ms["master1"].add_s(Entry((GROUP_DN,
+ {'objectclass': 'top groupofnames'.split(),
+ 'cn': 'group'})))
except ldap.LDAPError as e:
log.fatal('Failed to add group: error ' + e.message['desc'])
assert False
@@ -144,15 +28,16 @@ def _add_group_with_members(topology):
for idx in range(1, 5):
try:
MEMBER_VAL = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX))
- topology.master1.modify_s(GROUP_DN,
- [(ldap.MOD_ADD,
- 'member',
- MEMBER_VAL)])
+ topology_m2.ms["master1"].modify_s(GROUP_DN,
+ [(ldap.MOD_ADD,
+ 'member',
+ MEMBER_VAL)])
except ldap.LDAPError as e:
log.fatal('Failed to update group: member (%s) - error: %s' %
(MEMBER_VAL, e.message['desc']))
assert False
+
def _check_memberof(master, presence_flag):
# Check that members have memberof attribute on M1
for idx in range(1, 5):
@@ -160,13 +45,14 @@ def _check_memberof(master, presence_flag):
USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX))
ent = master.getEntry(USER_DN, ldap.SCOPE_BASE, "(objectclass=*)")
if presence_flag:
- assert ent.hasAttr('memberof') and ent.getValue('memberof') == GROUP_DN
+ assert ent.hasAttr('memberof') and ent.getValue('memberof') == GROUP_DN
else:
- assert not ent.hasAttr('memberof')
+ assert not ent.hasAttr('memberof')
except ldap.LDAPError as e:
log.fatal('Failed to retrieve user (%s): error %s' % (USER_DN, e.message['desc']))
assert False
+
def _check_entry_exist(master, dn):
attempt = 0
while attempt <= 10:
@@ -182,32 +68,33 @@ def _check_entry_exist(master, dn):
assert False
assert attempt != 10
-def test_ticket49073(topology):
+
+def test_ticket49073(topology_m2):
"""Write your replication test here.
- To access each DirSrv instance use: topology.master1, topology.master2,
- ..., topology.hub1, ..., topology.consumer1,...
+ To access each DirSrv instance use: topology_m2.ms["master1"], topology_m2.ms["master2"],
+ ..., topology_m2.hub1, ..., topology_m2.consumer1,...
Also, if you need any testcase initialization,
please, write additional fixture for that(include finalizer).
"""
- topology.master1.plugins.enable(name=PLUGIN_MEMBER_OF)
- topology.master1.restart(timeout=10)
- topology.master2.plugins.enable(name=PLUGIN_MEMBER_OF)
- topology.master2.restart(timeout=10)
+ topology_m2.ms["master1"].plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_m2.ms["master1"].restart(timeout=10)
+ topology_m2.ms["master2"].plugins.enable(name=PLUGIN_MEMBER_OF)
+ topology_m2.ms["master2"].restart(timeout=10)
# Configure fractional to prevent total init to send memberof
- ents = topology.master1.agreement.list(suffix=SUFFIX)
+ ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX)
assert len(ents) == 1
log.info('update %s to add nsDS5ReplicatedAttributeListTotal' % ents[0].dn)
- topology.master1.modify_s(ents[0].dn,
- [(ldap.MOD_REPLACE,
- 'nsDS5ReplicatedAttributeListTotal',
- '(objectclass=*) $ EXCLUDE '),
- (ldap.MOD_REPLACE,
- 'nsDS5ReplicatedAttributeList',
- '(objectclass=*) $ EXCLUDE memberOf')])
- topology.master1.restart(timeout=10)
+ topology_m2.ms["master1"].modify_s(ents[0].dn,
+ [(ldap.MOD_REPLACE,
+ 'nsDS5ReplicatedAttributeListTotal',
+ '(objectclass=*) $ EXCLUDE '),
+ (ldap.MOD_REPLACE,
+ 'nsDS5ReplicatedAttributeList',
+ '(objectclass=*) $ EXCLUDE memberOf')])
+ topology_m2.ms["master1"].restart(timeout=10)
#
# create some users and a group
@@ -216,34 +103,33 @@ def test_ticket49073(topology):
for idx in range(1, 5):
try:
USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX))
- topology.master1.add_s(Entry((USER_DN,
- {'objectclass': 'top extensibleObject'.split(),
- 'uid': 'member%d' % (idx)})))
+ topology_m2.ms["master1"].add_s(Entry((USER_DN,
+ {'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'member%d' % (idx)})))
except ldap.LDAPError as e:
log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.message['desc']))
assert False
- _check_entry_exist(topology.master2, "uid=member4,%s" % (DEFAULT_SUFFIX))
- _add_group_with_members(topology)
- _check_entry_exist(topology.master2, GROUP_DN)
+ _check_entry_exist(topology_m2.ms["master2"], "uid=member4,%s" % (DEFAULT_SUFFIX))
+ _add_group_with_members(topology_m2)
+ _check_entry_exist(topology_m2.ms["master2"], GROUP_DN)
# Check that for regular update memberof was on both side (because plugin is enabled both)
time.sleep(5)
- _check_memberof(topology.master1, True)
- _check_memberof(topology.master2, True)
-
+ _check_memberof(topology_m2.ms["master1"], True)
+ _check_memberof(topology_m2.ms["master2"], True)
# reinit with fractional definition
- ents = topology.master1.agreement.list(suffix=SUFFIX)
+ ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX)
assert len(ents) == 1
- topology.master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- topology.master1.waitForReplInit(ents[0].dn)
+ topology_m2.ms["master1"].agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
+ topology_m2.ms["master1"].waitForReplInit(ents[0].dn)
# Check that for total update memberof was on both side
# because memberof is NOT excluded from total init
time.sleep(5)
- _check_memberof(topology.master1, True)
- _check_memberof(topology.master2, True)
+ _check_memberof(topology_m2.ms["master1"], True)
+ _check_memberof(topology_m2.ms["master2"], True)
if DEBUGGING:
# Add debugging steps(if any)...
@@ -255,4 +141,3 @@ if __name__ == '__main__':
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
diff --git a/dirsrvtests/tests/tickets/ticket548_test.py b/dirsrvtests/tests/tickets/ticket548_test.py
index 257213a..8c55254 100644
--- a/dirsrvtests/tests/tickets/ticket548_test.py
+++ b/dirsrvtests/tests/tickets/ticket548_test.py
@@ -6,18 +6,10 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-import os
-import sys
-import time
-import ldap
-import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
from lib389.tasks import *
from lib389.utils import *
+from lib389.topologies import topology_st
log = logging.getLogger(__name__)
@@ -34,45 +26,18 @@ USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX
USER3_DN = 'uid=user3,' + DEFAULT_SUFFIX
USER_PW = 'password'
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
-
- # Delete each instance in the end
- def fin():
- standalone.delete()
- request.addfinalizer(fin)
-
- return TopologyStandalone(standalone)
def days_to_secs(days):
# Value of 60 * 60 * 24
return days * 86400
+
# Values are in days
-def set_global_pwpolicy(topology, min_=1, max_=10, warn=3):
+def set_global_pwpolicy(topology_st, min_=1, max_=10, warn=3):
log.info(" +++++ Enable global password policy +++++\n")
# Enable password policy
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])
except ldap.LDAPError as e:
log.error('Failed to set pwpolicy-local: error ' + e.message['desc'])
assert False
@@ -82,29 +47,29 @@ def set_global_pwpolicy(topology, min_=1, max_=10, warn=3):
max_secs = days_to_secs(max_)
warn_secs = days_to_secs(warn)
- log.info(" Set global password Min Age -- %s day\n"% min_)
+ log.info(" Set global password Min Age -- %s day\n" % min_)
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordMinAge', '%s' % min_secs)])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordMinAge', '%s' % min_secs)])
except ldap.LDAPError as e:
log.error('Failed to set passwordMinAge: error ' + e.message['desc'])
assert False
log.info(" Set global password Max Age -- %s days\n" % max_)
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordMaxAge', '%s' % max_secs)])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordMaxAge', '%s' % max_secs)])
except ldap.LDAPError as e:
log.error('Failed to set passwordMaxAge: error ' + e.message['desc'])
assert False
log.info(" Set global password Warning -- %s days\n" % warn)
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordWarning', '%s' % warn_secs)])
+ topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordWarning', '%s' % warn_secs)])
except ldap.LDAPError as e:
log.error('Failed to set passwordWarning: error ' + e.message['desc'])
assert False
-def set_subtree_pwpolicy(topology, min_=2, max_=20, warn=6):
+def set_subtree_pwpolicy(topology_st, min_=2, max_=20, warn=6):
log.info(" +++++ Enable subtree level password policy +++++\n")
# Convert our values to seconds
@@ -114,63 +79,69 @@ def set_subtree_pwpolicy(topology, min_=2, max_=20, warn=6):
log.info(" Add the container")
try:
- topology.standalone.add_s(Entry((SUBTREE_CONTAINER, {'objectclass': 'top nsContainer'.split(),
- 'cn': 'nsPwPolicyContainer'})))
+ topology_st.standalone.add_s(Entry((SUBTREE_CONTAINER, {'objectclass': 'top nsContainer'.split(),
+ 'cn': 'nsPwPolicyContainer'})))
except ldap.LDAPError as e:
log.error('Failed to add subtree container: error ' + e.message['desc'])
- #assert False
+ # assert False
try:
# Purge the old policy
- topology.standalone.delete_s(SUBTREE_PWP)
+ topology_st.standalone.delete_s(SUBTREE_PWP)
except:
pass
- log.info(" Add the password policy subentry {passwordMustChange: on, passwordMinAge: %s, passwordMaxAge: %s, passwordWarning: %s}" % (min_, max_, warn))
+ log.info(
+ " Add the password policy subentry {passwordMustChange: on, passwordMinAge: %s, passwordMaxAge: %s, passwordWarning: %s}" % (
+ min_, max_, warn))
try:
- topology.standalone.add_s(Entry((SUBTREE_PWP, {'objectclass': 'top ldapsubentry passwordpolicy'.split(),
- 'cn': SUBTREE_PWPDN,
- 'passwordMustChange': 'on',
- 'passwordExp': 'on',
- 'passwordMinAge': '%s' % min_secs,
- 'passwordMaxAge': '%s' % max_secs,
- 'passwordWarning': '%s' % warn_secs,
- 'passwordChange': 'on',
- 'passwordStorageScheme': 'clear'})))
+ topology_st.standalone.add_s(Entry((SUBTREE_PWP, {'objectclass': 'top ldapsubentry passwordpolicy'.split(),
+ 'cn': SUBTREE_PWPDN,
+ 'passwordMustChange': 'on',
+ 'passwordExp': 'on',
+ 'passwordMinAge': '%s' % min_secs,
+ 'passwordMaxAge': '%s' % max_secs,
+ 'passwordWarning': '%s' % warn_secs,
+ 'passwordChange': 'on',
+ 'passwordStorageScheme': 'clear'})))
except ldap.LDAPError as e:
log.error('Failed to add passwordpolicy: error ' + e.message['desc'])
assert False
log.info(" Add the COS template")
try:
- topology.standalone.add_s(Entry((SUBTREE_COS_TMPL, {'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
- 'cn': SUBTREE_PWPDN,
- 'cosPriority': '1',
- 'cn': SUBTREE_COS_TMPLDN,
- 'pwdpolicysubentry': SUBTREE_PWP})))
+ topology_st.standalone.add_s(
+ Entry((SUBTREE_COS_TMPL, {'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
+ 'cn': SUBTREE_PWPDN,
+ 'cosPriority': '1',
+ 'cn': SUBTREE_COS_TMPLDN,
+ 'pwdpolicysubentry': SUBTREE_PWP})))
except ldap.LDAPError as e:
log.error('Failed to add COS template: error ' + e.message['desc'])
- #assert False
+ # assert False
log.info(" Add the COS definition")
try:
- topology.standalone.add_s(Entry((SUBTREE_COS_DEF, {'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
- 'cn': SUBTREE_PWPDN,
- 'costemplatedn': SUBTREE_COS_TMPL,
- 'cosAttribute': 'pwdpolicysubentry default operational-default'})))
+ topology_st.standalone.add_s(
+ Entry((SUBTREE_COS_DEF, {'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
+ 'cn': SUBTREE_PWPDN,
+ 'costemplatedn': SUBTREE_COS_TMPL,
+ 'cosAttribute': 'pwdpolicysubentry default operational-default'})))
except ldap.LDAPError as e:
log.error('Failed to add COS def: error ' + e.message['desc'])
- #assert False
+ # assert False
time.sleep(1)
-def update_passwd(topology, user, passwd, newpasswd):
+
+def update_passwd(topology_st, user, passwd, newpasswd):
log.info(" Bind as {%s,%s}" % (user, passwd))
- topology.standalone.simple_bind_s(user, passwd)
+ topology_st.standalone.simple_bind_s(user, passwd)
try:
- topology.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', newpasswd)])
+ topology_st.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', newpasswd)])
except ldap.LDAPError as e:
- log.fatal('test_ticket548: Failed to update the password ' + cpw + ' of user ' + user + ': error ' + e.message['desc'])
+ log.fatal('test_ticket548: Failed to update the password ' + cpw + ' of user ' + user + ': error ' + e.message[
+ 'desc'])
assert False
time.sleep(1)
@@ -190,24 +161,25 @@ def check_shadow_attr_value(entry, attr_type, expected, dn):
assert False
-def test_ticket548_test_with_no_policy(topology):
+def test_ticket548_test_with_no_policy(topology_st):
"""
Check shadowAccount under no password policy
"""
log.info("Case 1. No password policy")
log.info("Bind as %s" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
log.info('Add an entry' + USER1_DN)
try:
- topology.standalone.add_s(Entry((USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson shadowAccount".split(),
- 'sn': '1',
- 'cn': 'user 1',
- 'uid': 'user1',
- 'givenname': 'user',
- 'mail': 'user1@' + DEFAULT_SUFFIX,
- 'userpassword': USER_PW})))
+ topology_st.standalone.add_s(
+ Entry((USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson shadowAccount".split(),
+ 'sn': '1',
+ 'cn': 'user 1',
+ 'uid': 'user1',
+ 'givenname': 'user',
+ 'mail': 'user1@' + DEFAULT_SUFFIX,
+ 'userpassword': USER_PW})))
except ldap.LDAPError as e:
log.fatal('test_ticket548: Failed to add user' + USER1_DN + ': error ' + e.message['desc'])
assert False
@@ -216,14 +188,14 @@ def test_ticket548_test_with_no_policy(topology):
log.info('Search entry %s' % USER1_DN)
log.info("Bind as %s" % USER1_DN)
- topology.standalone.simple_bind_s(USER1_DN, USER_PW)
- entry = topology.standalone.getEntry(USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['shadowLastChange'])
+ topology_st.standalone.simple_bind_s(USER1_DN, USER_PW)
+ entry = topology_st.standalone.getEntry(USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['shadowLastChange'])
check_shadow_attr_value(entry, 'shadowLastChange', edate, USER1_DN)
log.info("Check shadowAccount with no policy was successfully verified.")
-def test_ticket548_test_global_policy(topology):
+def test_ticket548_test_global_policy(topology_st):
"""
Check shadowAccount with global password policy
"""
@@ -231,19 +203,20 @@ def test_ticket548_test_global_policy(topology):
log.info("Case 2. Check shadowAccount with global password policy")
log.info("Bind as %s" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
- set_global_pwpolicy(topology)
+ set_global_pwpolicy(topology_st)
log.info('Add an entry' + USER2_DN)
try:
- topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top person organizationalPerson inetOrgPerson shadowAccount".split(),
- 'sn': '2',
- 'cn': 'user 2',
- 'uid': 'user2',
- 'givenname': 'user',
- 'mail': 'user2@' + DEFAULT_SUFFIX,
- 'userpassword': USER_PW})))
+ topology_st.standalone.add_s(
+ Entry((USER2_DN, {'objectclass': "top person organizationalPerson inetOrgPerson shadowAccount".split(),
+ 'sn': '2',
+ 'cn': 'user 2',
+ 'uid': 'user2',
+ 'givenname': 'user',
+ 'mail': 'user2@' + DEFAULT_SUFFIX,
+ 'userpassword': USER_PW})))
except ldap.LDAPError as e:
log.fatal('test_ticket548: Failed to add user' + USER2_DN + ': error ' + e.message['desc'])
assert False
@@ -251,10 +224,10 @@ def test_ticket548_test_global_policy(topology):
edate = int(time.time() / (60 * 60 * 24))
log.info("Bind as %s" % USER1_DN)
- topology.standalone.simple_bind_s(USER1_DN, USER_PW)
+ topology_st.standalone.simple_bind_s(USER1_DN, USER_PW)
log.info('Search entry %s' % USER1_DN)
- entry = topology.standalone.getEntry(USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ entry = topology_st.standalone.getEntry(USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
check_shadow_attr_value(entry, 'shadowLastChange', edate, USER1_DN)
# passwordMinAge -- 1 day
@@ -267,10 +240,10 @@ def test_ticket548_test_global_policy(topology):
check_shadow_attr_value(entry, 'shadowWarning', 3, USER1_DN)
log.info("Bind as %s" % USER2_DN)
- topology.standalone.simple_bind_s(USER2_DN, USER_PW)
+ topology_st.standalone.simple_bind_s(USER2_DN, USER_PW)
log.info('Search entry %s' % USER2_DN)
- entry = topology.standalone.getEntry(USER2_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ entry = topology_st.standalone.getEntry(USER2_DN, ldap.SCOPE_BASE, "(objectclass=*)")
check_shadow_attr_value(entry, 'shadowLastChange', edate, USER2_DN)
# passwordMinAge -- 1 day
@@ -284,22 +257,22 @@ def test_ticket548_test_global_policy(topology):
# Bind as DM again, change policy
log.info("Bind as %s" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- set_global_pwpolicy(topology, 3, 30, 9)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ set_global_pwpolicy(topology_st, 3, 30, 9)
# change the user password, then check again.
log.info("Bind as %s" % USER2_DN)
- topology.standalone.simple_bind_s(USER2_DN, USER_PW)
+ topology_st.standalone.simple_bind_s(USER2_DN, USER_PW)
newpasswd = USER_PW + '2'
- update_passwd(topology, USER2_DN, USER_PW, newpasswd)
+ update_passwd(topology_st, USER2_DN, USER_PW, newpasswd)
log.info("Re-bind as %s with new password" % USER2_DN)
- topology.standalone.simple_bind_s(USER2_DN, newpasswd)
+ topology_st.standalone.simple_bind_s(USER2_DN, newpasswd)
## This tests if we update the shadow values on password change.
log.info('Search entry %s' % USER2_DN)
- entry = topology.standalone.getEntry(USER2_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ entry = topology_st.standalone.getEntry(USER2_DN, ldap.SCOPE_BASE, "(objectclass=*)")
# passwordMinAge -- 1 day
check_shadow_attr_value(entry, 'shadowMin', 3, USER2_DN)
@@ -313,7 +286,7 @@ def test_ticket548_test_global_policy(topology):
log.info("Check shadowAccount with global policy was successfully verified.")
-def test_ticket548_test_subtree_policy(topology):
+def test_ticket548_test_subtree_policy(topology_st):
"""
Check shadowAccount with subtree level password policy
"""
@@ -321,26 +294,27 @@ def test_ticket548_test_subtree_policy(topology):
log.info("Case 3. Check shadowAccount with subtree level password policy")
log.info("Bind as %s" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
# Check the global policy values
- set_subtree_pwpolicy(topology, 2, 20, 6)
+ set_subtree_pwpolicy(topology_st, 2, 20, 6)
log.info('Add an entry' + USER3_DN)
try:
- topology.standalone.add_s(Entry((USER3_DN, {'objectclass': "top person organizationalPerson inetOrgPerson shadowAccount".split(),
- 'sn': '3',
- 'cn': 'user 3',
- 'uid': 'user3',
- 'givenname': 'user',
- 'mail': 'user3@' + DEFAULT_SUFFIX,
- 'userpassword': USER_PW})))
+ topology_st.standalone.add_s(
+ Entry((USER3_DN, {'objectclass': "top person organizationalPerson inetOrgPerson shadowAccount".split(),
+ 'sn': '3',
+ 'cn': 'user 3',
+ 'uid': 'user3',
+ 'givenname': 'user',
+ 'mail': 'user3@' + DEFAULT_SUFFIX,
+ 'userpassword': USER_PW})))
except ldap.LDAPError as e:
log.fatal('test_ticket548: Failed to add user' + USER3_DN + ': error ' + e.message['desc'])
assert False
log.info('Search entry %s' % USER3_DN)
- entry0 = topology.standalone.getEntry(USER3_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ entry0 = topology_st.standalone.getEntry(USER3_DN, ldap.SCOPE_BASE, "(objectclass=*)")
log.info('Expecting shadowLastChange 0 since passwordMustChange is on')
check_shadow_attr_value(entry0, 'shadowLastChange', 0, USER3_DN)
@@ -355,11 +329,11 @@ def test_ticket548_test_subtree_policy(topology):
check_shadow_attr_value(entry0, 'shadowWarning', 6, USER3_DN)
log.info("Bind as %s" % USER3_DN)
- topology.standalone.simple_bind_s(USER3_DN, USER_PW)
+ topology_st.standalone.simple_bind_s(USER3_DN, USER_PW)
log.info('Search entry %s' % USER3_DN)
try:
- entry1 = topology.standalone.getEntry(USER3_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ entry1 = topology_st.standalone.getEntry(USER3_DN, ldap.SCOPE_BASE, "(objectclass=*)")
except ldap.UNWILLING_TO_PERFORM:
log.info('test_ticket548: Search by' + USER3_DN + ' failed by UNWILLING_TO_PERFORM as expected')
except ldap.LDAPError as e:
@@ -367,22 +341,22 @@ def test_ticket548_test_subtree_policy(topology):
assert False
log.info("Bind as %s and updating the password with a new one" % USER3_DN)
- topology.standalone.simple_bind_s(USER3_DN, USER_PW)
+ topology_st.standalone.simple_bind_s(USER3_DN, USER_PW)
# Bind as DM again, change policy
log.info("Bind as %s" % DN_DM)
- topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
- set_subtree_pwpolicy(topology, 4, 40, 12)
+ set_subtree_pwpolicy(topology_st, 4, 40, 12)
newpasswd = USER_PW + '0'
- update_passwd(topology, USER3_DN, USER_PW, newpasswd)
+ update_passwd(topology_st, USER3_DN, USER_PW, newpasswd)
log.info("Re-bind as %s with new password" % USER3_DN)
- topology.standalone.simple_bind_s(USER3_DN, newpasswd)
+ topology_st.standalone.simple_bind_s(USER3_DN, newpasswd)
try:
- entry2 = topology.standalone.getEntry(USER3_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ entry2 = topology_st.standalone.getEntry(USER3_DN, ldap.SCOPE_BASE, "(objectclass=*)")
except ldap.LDAPError as e:
log.fatal('test_ticket548: Failed to serch user' + USER3_DN + ' by self: error ' + e.message['desc'])
assert False
@@ -393,7 +367,7 @@ def test_ticket548_test_subtree_policy(topology):
check_shadow_attr_value(entry2, 'shadowLastChange', edate, USER3_DN)
log.info('Search entry %s' % USER3_DN)
- entry = topology.standalone.getEntry(USER3_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+ entry = topology_st.standalone.getEntry(USER3_DN, ldap.SCOPE_BASE, "(objectclass=*)")
check_shadow_attr_value(entry, 'shadowLastChange', edate, USER3_DN)
# passwordMinAge -- 1 day
7 years, 3 months
Branch '389-ds-base-1.3.5' - ldap/schema
by Noriko Hosoi
ldap/schema/01core389.ldif | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
New commits:
commit 238d3c74a6a289f4af2b6fdb66173df7840ff981
Author: Thierry Bordaz <tbordaz(a)redhat.com>
Date: Wed Dec 21 16:31:48 2016 +0100
Ticket 49074 - incompatible nsEncryptionConfig object definition prevents RHEL 7->6 schema replication
Bug Description:
nsEncryptionConfig schema definition diverge since 1.3.x and 1.2.11.15-83.
Schema learning mechanism does not merge definition so the schema can not be pushed RHEL7->6.
This triggers schema violation errors
Fix Description:
Defines nsTLS10, nsTLS11 and nsTLS12 attributetypes and add them to the allowed
attributes list of nsEncryptionConfig
https://fedorahosted.org/389/ticket/49074
Reviewed by: Noriko Hosoi (thanks!!)
Platforms tested: RHEL7.3 vs RHEL6.8 and RHEL6.9
Flag Day: no
Doc impact: no
(cherry picked from commit 64a425e4ea868bc1f08145490a7c8c9cf5c91581)
diff --git a/ldap/schema/01core389.ldif b/ldap/schema/01core389.ldif
index dfa4729..5e5f69f 100644
--- a/ldap/schema/01core389.ldif
+++ b/ldap/schema/01core389.ldif
@@ -91,6 +91,9 @@ attributeTypes: ( nsKeyfile-oid NAME 'nsKeyfile' DESC 'Netscape defined attribut
attributeTypes: ( nsSSL2-oid NAME 'nsSSL2' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape' )
attributeTypes: ( nsSSL3-oid NAME 'nsSSL3' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape' )
attributeTypes: ( nsTLS1-oid NAME 'nsTLS1' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape' )
+attributeTypes: ( nsTLS10-oid NAME 'nsTLS10' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape' )
+attributeTypes: ( nsTLS11-oid NAME 'nsTLS11' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape' )
+attributeTypes: ( nsTLS12-oid NAME 'nsTLS12' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape' )
attributeTypes: ( sslVersionMin-oid NAME 'sslVersionMin' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape' )
attributeTypes: ( sslVersionMax-oid NAME 'sslVersionMax' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape' )
attributeTypes: ( nsSSLClientAuth-oid NAME 'nsSSLClientAuth' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape' )
@@ -314,7 +317,7 @@ objectClasses: ( 2.16.840.1.113730.3.2.103 NAME 'nsDS5ReplicationAgreement' DESC
objectClasses: ( 2.16.840.1.113730.3.2.39 NAME 'nsslapdConfig' DESC 'Netscape defined objectclass' SUP top MAY ( cn ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.317 NAME 'nsSaslMapping' DESC 'Netscape defined objectclass' SUP top MUST ( cn $ nsSaslMapRegexString $ nsSaslMapBaseDNTemplate $ nsSaslMapFilterTemplate ) MAY ( nsSaslMapPriority ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.43 NAME 'nsSNMP' DESC 'Netscape defined objectclass' SUP top MUST ( cn $ nsSNMPEnabled ) MAY ( nsSNMPOrganization $ nsSNMPLocation $ nsSNMPContact $ nsSNMPDescription $ nsSNMPName $ nsSNMPMasterHost $ nsSNMPMasterPort ) X-ORIGIN 'Netscape Directory Server' )
-objectClasses: ( nsEncryptionConfig-oid NAME 'nsEncryptionConfig' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( nsCertfile $ nsKeyfile $ nsSSL2 $ nsSSL3 $ nsTLS1 $ sslVersionMin $ sslVersionMax $ nsSSLSessionTimeout $ nsSSL3SessionTimeout $ nsSSLClientAuth $ nsSSL2Ciphers $ nsSSL3Ciphers $ nsSSLSupportedCiphers $ allowWeakCipher $ CACertExtractFile $ allowWeakDHParam ) X-ORIGIN 'Netscape' )
+objectClasses: ( nsEncryptionConfig-oid NAME 'nsEncryptionConfig' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( nsCertfile $ nsKeyfile $ nsSSL2 $ nsSSL3 $ nsTLS1 $ nsTLS10 $ nsTLS11 $ nsTLS12 $ sslVersionMin $ sslVersionMax $ nsSSLSessionTimeout $ nsSSL3SessionTimeout $ nsSSLClientAuth $ nsSSL2Ciphers $ nsSSL3Ciphers $ nsSSLSupportedCiphers $ allowWeakCipher $ CACertExtractFile $ allowWeakDHParam ) X-ORIGIN 'Netscape' )
objectClasses: ( nsEncryptionModule-oid NAME 'nsEncryptionModule' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( nsSSLToken $ nsSSLPersonalityssl $ nsSSLActivation $ ServerKeyExtractFile $ ServerCertExtractFile ) X-ORIGIN 'Netscape' )
objectClasses: ( 2.16.840.1.113730.3.2.327 NAME 'rootDNPluginConfig' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( rootdn-open-time $ rootdn-close-time $ rootdn-days-allowed $ rootdn-allow-host $ rootdn-deny-host $ rootdn-allow-ip $ rootdn-deny-ip ) X-ORIGIN 'Netscape' )
objectClasses: ( 2.16.840.1.113730.3.2.328 NAME 'nsSchemaPolicy' DESC 'Netscape defined objectclass' SUP top MAY ( cn $ schemaUpdateObjectclassAccept $ schemaUpdateObjectclassReject $ schemaUpdateAttributeAccept $ schemaUpdateAttributeReject) X-ORIGIN 'Netscape Directory Server' )
7 years, 3 months
Branch '389-ds-base-1.2.11' - ldap/servers
by Mark Reynolds
ldap/servers/plugins/replication/cl5_api.c | 1 -
ldap/servers/plugins/replication/repl5_replica_config.c | 14 ++++++--------
2 files changed, 6 insertions(+), 9 deletions(-)
New commits:
commit d4d10723bcea672739fe3407390caac392d1acfe
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Wed Jan 4 09:41:38 2017 -0500
Ticket 48964 - should not free repl name after purging changelog
Bug Description: The previous commit for this ticket incorrectly
freed a replica struct const char. There was also
a minor converity issue.
Fix Description: Do not free the repl_name after purging, and do
not check if "replica" is NULL when creating the
purge data.
https://fedorahosted.org/389/ticket/48964
Reviewed by: tbordaz(Thanks!)
(cherry picked from commit 017469a1a055da03fc3fb4b34a7732611b7bd2b0)
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
index 3c46ed3..9b2ef37 100644
--- a/ldap/servers/plugins/replication/cl5_api.c
+++ b/ldap/servers/plugins/replication/cl5_api.c
@@ -6888,7 +6888,6 @@ cl5CleanRUV(ReplicaId rid){
static void free_purge_data(cleanruv_purge_data *purge_data)
{
slapi_ch_free_string(&purge_data->replGen);
- slapi_ch_free_string(&purge_data->replName);
slapi_ch_free((void **)&purge_data);
}
diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
index c6a9ffa..bd20b8a 100644
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
@@ -1245,14 +1245,12 @@ replica_execute_cleanruv_task (Object *r, ReplicaId rid, char *returntext /* not
/*
* Now purge the changelog. The purging thread will free the purge_data
*/
- if (replica){
- purge_data = (cleanruv_purge_data*)slapi_ch_calloc(1, sizeof(cleanruv_purge_data));
- purge_data->cleaned_rid = rid;
- purge_data->suffix_sdn = replica_get_root(replica);
- purge_data->replName = (char *)replica_get_name(replica);
- purge_data->replGen = replica_get_generation(replica);
- trigger_cl_purging(purge_data);
- }
+ purge_data = (cleanruv_purge_data*)slapi_ch_calloc(1, sizeof(cleanruv_purge_data));
+ purge_data->cleaned_rid = rid;
+ purge_data->suffix_sdn = replica_get_root(replica);
+ purge_data->replName = (char *)replica_get_name(replica);
+ purge_data->replGen = replica_get_generation(replica);
+ trigger_cl_purging(purge_data);
if (rc != RUV_SUCCESS){
slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "cleanruv_task: task failed(%d)\n",rc);
7 years, 3 months
Branch '389-ds-base-1.3.4' - ldap/servers
by Mark Reynolds
ldap/servers/plugins/replication/cl5_api.c | 1 -
ldap/servers/plugins/replication/repl5_replica_config.c | 14 ++++++--------
2 files changed, 6 insertions(+), 9 deletions(-)
New commits:
commit 56a24df1e33d3526297f05948c0d71a6ddf034e4
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Wed Jan 4 09:41:38 2017 -0500
Ticket 48964 - should not free repl name after purging changelog
Bug Description: The previous commit for this ticket incorrectly
freed a replica struct const char. There was also
a minor converity issue.
Fix Description: Do not free the repl_name after purging, and do
not check if "replica" is NULL when creating the
purge data.
https://fedorahosted.org/389/ticket/48964
Reviewed by: tbordaz(Thanks!)
(cherry picked from commit 017469a1a055da03fc3fb4b34a7732611b7bd2b0)
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
index 3d0b002..093a0ab 100644
--- a/ldap/servers/plugins/replication/cl5_api.c
+++ b/ldap/servers/plugins/replication/cl5_api.c
@@ -6977,7 +6977,6 @@ cl5CleanRUV(ReplicaId rid){
static void free_purge_data(cleanruv_purge_data *purge_data)
{
slapi_ch_free_string(&purge_data->replGen);
- slapi_ch_free_string(&purge_data->replName);
slapi_ch_free((void **)&purge_data);
}
diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
index d3e5ecf..0dca922 100644
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
@@ -1468,14 +1468,12 @@ replica_execute_cleanruv_task (Object *r, ReplicaId rid, char *returntext /* not
/*
* Now purge the changelog. The purging thread will free the purge_data
*/
- if (replica){
- purge_data = (cleanruv_purge_data*)slapi_ch_calloc(1, sizeof(cleanruv_purge_data));
- purge_data->cleaned_rid = rid;
- purge_data->suffix_sdn = replica_get_root(replica);
- purge_data->replName = (char *)replica_get_name(replica);
- purge_data->replGen = replica_get_generation(replica);
- trigger_cl_purging(purge_data);
- }
+ purge_data = (cleanruv_purge_data*)slapi_ch_calloc(1, sizeof(cleanruv_purge_data));
+ purge_data->cleaned_rid = rid;
+ purge_data->suffix_sdn = replica_get_root(replica);
+ purge_data->replName = (char *)replica_get_name(replica);
+ purge_data->replGen = replica_get_generation(replica);
+ trigger_cl_purging(purge_data);
if (rc != RUV_SUCCESS){
slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "cleanruv_task: task failed(%d)\n",rc);
7 years, 3 months
Branch '389-ds-base-1.3.5' - ldap/servers
by Mark Reynolds
ldap/servers/plugins/replication/cl5_api.c | 1 -
ldap/servers/plugins/replication/repl5_replica_config.c | 14 ++++++--------
2 files changed, 6 insertions(+), 9 deletions(-)
New commits:
commit 0929992503e143aaaa3cbfcafc4de170217ae0bc
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Wed Jan 4 09:41:38 2017 -0500
Ticket 48964 - should not free repl name after purging changelog
Bug Description: The previous commit for this ticket incorrectly
freed a replica struct const char. There was also
a minor converity issue.
Fix Description: Do not free the repl_name after purging, and do
not check if "replica" is NULL when creating the
purge data.
https://fedorahosted.org/389/ticket/48964
Reviewed by: tbordaz(Thanks!)
(cherry picked from commit 017469a1a055da03fc3fb4b34a7732611b7bd2b0)
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
index 66c2677..e72633a 100644
--- a/ldap/servers/plugins/replication/cl5_api.c
+++ b/ldap/servers/plugins/replication/cl5_api.c
@@ -6983,7 +6983,6 @@ cl5CleanRUV(ReplicaId rid){
static void free_purge_data(cleanruv_purge_data *purge_data)
{
slapi_ch_free_string(&purge_data->replGen);
- slapi_ch_free_string(&purge_data->replName);
slapi_ch_free((void **)&purge_data);
}
diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
index d78d982..d10443d 100644
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
@@ -1468,14 +1468,12 @@ replica_execute_cleanruv_task (Object *r, ReplicaId rid, char *returntext /* not
/*
* Now purge the changelog. The purging thread will free the purge_data
*/
- if (replica){
- purge_data = (cleanruv_purge_data*)slapi_ch_calloc(1, sizeof(cleanruv_purge_data));
- purge_data->cleaned_rid = rid;
- purge_data->suffix_sdn = replica_get_root(replica);
- purge_data->replName = (char *)replica_get_name(replica);
- purge_data->replGen = replica_get_generation(replica);
- trigger_cl_purging(purge_data);
- }
+ purge_data = (cleanruv_purge_data*)slapi_ch_calloc(1, sizeof(cleanruv_purge_data));
+ purge_data->cleaned_rid = rid;
+ purge_data->suffix_sdn = replica_get_root(replica);
+ purge_data->replName = (char *)replica_get_name(replica);
+ purge_data->replGen = replica_get_generation(replica);
+ trigger_cl_purging(purge_data);
if (rc != RUV_SUCCESS){
slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "cleanruv_task: task failed(%d)\n",rc);
7 years, 3 months