[Pkg-fedora-ds-maintainers] 389-ds-base: Changes to 'upstream'

Timo Aaltonen tjaalton at moszumanska.debian.org
Wed Oct 12 09:32:09 UTC 2016


Rebased ref, commits from common ancestor:
commit 43de686b709728ddef6c475fe108b09069d5cbc8
Author: Noriko Hosoi <nhosoi at redhat.com>
Date:   Mon Aug 8 12:16:51 2016 -0700

    bump version to 1.3.5.13

diff --git a/VERSION.sh b/VERSION.sh
index ebcf385..f831270 100644
--- a/VERSION.sh
+++ b/VERSION.sh
@@ -10,7 +10,7 @@ vendor="389 Project"
 # PACKAGE_VERSION is constructed from these
 VERSION_MAJOR=1
 VERSION_MINOR=3
-VERSION_MAINT=5.12
+VERSION_MAINT=5.13
 # NOTE: VERSION_PREREL is automatically set for builds made out of a git tree
 VERSION_PREREL=
 VERSION_DATE=`date -u +%Y%m%d%H%M%S`

commit 632e5b660fff80566e8912bc6edb88e3ab28a4ea
Author: William Brown <firstyear at redhat.com>
Date:   Mon Aug 8 13:56:02 2016 +1000

    Ticket 48450 - Autotools components for ds_systemd_ask_password_acl
    
    Bug Description:  William forgot to add the Makefile.in to the commit. This was
    found in the rpm build because it does not run autoreconf to regenerate the
    Makefile.in, manifiesting as a missing file.
    
    Fix Description:  Commit Makefile.in
    
    https://fedorahosted.org/389/ticket/48450
    
    Author: wibrown
    
    Review by: nhosoi at redhat.com

diff --git a/Makefile.in b/Makefile.in
index 8109469..6788fe1 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -2044,7 +2044,8 @@ sbin_SCRIPTS = ldap/admin/src/scripts/setup-ds.pl \
 	ldap/admin/src/scripts/dbmon.sh \
 	ldap/admin/src/scripts/ds_selinux_enabled \
 	ldap/admin/src/scripts/ds_selinux_port_query \
-    wrappers/ldap-agent
+	wrappers/ds_systemd_ask_password_acl \
+	wrappers/ldap-agent
 
 bin_SCRIPTS = ldap/servers/slapd/tools/rsearch/scripts/dbgen.pl \
 	wrappers/dbscan \
@@ -10359,7 +10360,7 @@ distdir: $(DISTFILES)
 	  ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \
 	|| chmod -R a+r "$(distdir)"
 dist-gzip: distdir
-	tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz
+	tardir=$(distdir) && $(am__tar) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).tar.gz
 	$(am__post_remove_distdir)
 dist-bzip2: distdir
 	tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2
@@ -10384,7 +10385,7 @@ dist-shar: distdir
 	@echo WARNING: "Support for shar distribution archives is" \
 	               "deprecated." >&2
 	@echo WARNING: "It will be removed altogether in Automake 2.0" >&2
-	shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz
+	shar $(distdir) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).shar.gz
 	$(am__post_remove_distdir)
 
 dist-zip: distdir
@@ -10402,7 +10403,7 @@ dist dist-all:
 distcheck: dist
 	case '$(DIST_ARCHIVES)' in \
 	*.tar.gz*) \
-	  GZIP=$(GZIP_ENV) gzip -dc $(distdir).tar.gz | $(am__untar) ;;\
+	  eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).tar.gz | $(am__untar) ;;\
 	*.tar.bz2*) \
 	  bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\
 	*.tar.lz*) \
@@ -10412,7 +10413,7 @@ distcheck: dist
 	*.tar.Z*) \
 	  uncompress -c $(distdir).tar.Z | $(am__untar) ;;\
 	*.shar.gz*) \
-	  GZIP=$(GZIP_ENV) gzip -dc $(distdir).shar.gz | unshar ;;\
+	  eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).shar.gz | unshar ;;\
 	*.zip*) \
 	  unzip $(distdir).zip ;;\
 	esac

commit deef3c534ed6d5e83c03564a2a609df04ce0aa8d
Author: Noriko Hosoi <nhosoi at redhat.com>
Date:   Thu Aug 4 15:50:30 2016 -0700

    bump version to 1.3.5.12

diff --git a/VERSION.sh b/VERSION.sh
index fb72bed..ebcf385 100644
--- a/VERSION.sh
+++ b/VERSION.sh
@@ -10,7 +10,7 @@ vendor="389 Project"
 # PACKAGE_VERSION is constructed from these
 VERSION_MAJOR=1
 VERSION_MINOR=3
-VERSION_MAINT=5.11
+VERSION_MAINT=5.12
 # NOTE: VERSION_PREREL is automatically set for builds made out of a git tree
 VERSION_PREREL=
 VERSION_DATE=`date -u +%Y%m%d%H%M%S`

commit caa351ae0cc81cbf2309a43c5f74b359cda152d0
Author: Ludwig Krispenz <lkrispen at redhat.com>
Date:   Thu Aug 4 11:45:49 2016 -0700

    Bug 1347760 - CVE-2016-4992 389-ds-base: Information disclosure via repeated use of LDAP ADD operation, etc.
    
    Description: do not overwrite rc used to decide if bind was successful.
    When the bind is through ldapi/autobind, an entry does not exist to be
    checked with slapi_check_account_lock.  In that case, a variable rc is
    not supposed to be modified which confuses the following code path.
    
    Reviewed by nhosoi at redhat.com.

diff --git a/ldap/servers/slapd/bind.c b/ldap/servers/slapd/bind.c
index c271577..3054c1f 100644
--- a/ldap/servers/slapd/bind.c
+++ b/ldap/servers/slapd/bind.c
@@ -775,10 +775,12 @@ do_bind( Slapi_PBlock *pb )
                      */
                     if (!slapi_be_is_flag_set(be, SLAPI_BE_FLAG_REMOTE_DATA)) {
                         bind_target_entry = get_entry(pb, slapi_sdn_get_ndn(sdn));
-                        rc = slapi_check_account_lock(pb, bind_target_entry, pw_response_requested, 1, 1);
-                        if (1 == rc) { /* account is locked */
+                        myrc = slapi_check_account_lock(pb, bind_target_entry, pw_response_requested, 1, 1);
+                        if (1 == myrc) { /* account is locked */
+                            rc = myrc;
                             goto account_locked;
                         }
+                        myrc = 0;
                     }
                     if (!auto_bind) {
                         /* 

commit e6b48924adb753f47683f25fab6e2b8e5d3cf84c
Author: William Brown <firstyear at redhat.com>
Date:   Fri Jul 29 14:36:19 2016 +1000

    Ticket 48450 - Add prestart work around for systemd ask password
    
    Bug Description:  Due to a lack of response to fix the systemd ask password
    permissions, we must resolve this ourselves. Without this, we cannot utilise
    the ask password feature at all.
    
    Fix Description:  We add an execstartpre script, that parses dse.ldif for
    the running server user. If found, we add the acl to ask-password directory
    which will allow the server to start. We do this so that if each instance
    has a unique user, they can all use ask pass correctly.
    
    https://fedorahosted.org/389/ticket/48450
    
    Author: wibrown
    
    Review by: nhosoi (Thanks)

diff --git a/Makefile.am b/Makefile.am
index ed3d462..3e1bf47 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -620,7 +620,8 @@ sbin_SCRIPTS = ldap/admin/src/scripts/setup-ds.pl \
 	ldap/admin/src/scripts/dbmon.sh \
 	ldap/admin/src/scripts/ds_selinux_enabled \
 	ldap/admin/src/scripts/ds_selinux_port_query \
-    wrappers/ldap-agent
+	wrappers/ds_systemd_ask_password_acl \
+	wrappers/ldap-agent
 
 bin_SCRIPTS = ldap/servers/slapd/tools/rsearch/scripts/dbgen.pl \
 	wrappers/dbscan \
diff --git a/wrappers/ds_systemd_ask_password_acl.in b/wrappers/ds_systemd_ask_password_acl.in
new file mode 100644
index 0000000..59bffc5
--- /dev/null
+++ b/wrappers/ds_systemd_ask_password_acl.in
@@ -0,0 +1,34 @@
+#!/bin/sh
+# BEGIN COPYRIGHT BLOCK
+# Copyright (C) 2016 Red Hat, Inc.
+#
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# END COPYRIGHT BLOCK
+
+# Systemd has not fixed the issue at https://bugzilla.redhat.com/show_bug.cgi?id=1322167
+# As a result, we need a way to fix the permissions as we start.
+# We have to reset these each time, as this folder is on a tmpfs.
+# If we don't do this, we can't prompt for the password!
+# If you want this script to go away, fix the bugzilla so we don't need it!
+
+# Make sure we have the path to the dse.ldif
+if [ -z $1 ]
+then
+    echo "usage: ${0} /etc/dirsrv/slapd-<instance>/dse.ldif"
+    exit 1
+fi
+
+# Grep the user out
+
+DS_USER=`grep 'nsslapd-localuser: ' $1 | awk '{print $2}'`
+
+# Now apply the acl
+
+if [ -d /var/run/systemd/ask-password ]
+then
+    setfacl -m u:${DS_USER}:rwx /var/run/systemd/ask-password
+fi
+
diff --git a/wrappers/systemd.template.asan.service.in b/wrappers/systemd.template.asan.service.in
index dd361b4..5de91de 100644
--- a/wrappers/systemd.template.asan.service.in
+++ b/wrappers/systemd.template.asan.service.in
@@ -25,6 +25,7 @@ PIDFile=@localstatedir@/run/@package_name@/slapd-%i.pid
 # We can't symbolize here, as llvm symbolize crashes when it goes near systemd.
 Environment='ASAN_OPTIONS="detect_leaks=1 symbolize=0 log_path=@localstatedir@/run/@package_name@/ns-slapd-%i.asan detect_deadlocks=1"'
 LimitCORE=infinity
+ExecStartPre=@sbindir@/ds_systemd_ask_password_acl @instconfigdir@/slapd-%i/dse.ldif
 ExecStart=@sbindir@/ns-slapd -D @instconfigdir@/slapd-%i -i @localstatedir@/run/@package_name@/slapd-%i.pid
 # if you need to set other directives e.g. LimitNOFILE=8192
 # set them in this file
diff --git a/wrappers/systemd.template.service.in b/wrappers/systemd.template.service.in
index a045036..6f096b7 100644
--- a/wrappers/systemd.template.service.in
+++ b/wrappers/systemd.template.service.in
@@ -22,6 +22,7 @@ Type=notify
 EnvironmentFile=@initconfigdir@/@package_name@
 EnvironmentFile=@initconfigdir@/@package_name at -%i
 PIDFile=@localstatedir@/run/@package_name@/slapd-%i.pid
+ExecStartPre=@sbindir@/ds_systemd_ask_password_acl @instconfigdir@/slapd-%i/dse.ldif
 ExecStart=@sbindir@/ns-slapd -D @instconfigdir@/slapd-%i -i @localstatedir@/run/@package_name@/slapd-%i.pid
 # if you need to set other directives e.g. LimitNOFILE=8192
 # set them in this file

commit 802224f2846900c870a780fe7608782792806d85
Author: Noriko Hosoi <nhosoi at redhat.com>
Date:   Sat Jul 30 16:56:57 2016 -0700

    Ticket #48943 - When fine-grained policy is applied, a sub-tree has a priority over a user while changing password
    
    Description: If the user entry has a pwdpolicysubentry, the configuration
    in the pwpolicy should be the strongest and respected.  If the entry does
    not have it, it retrieves the pwpolicy from the CoS Cache, which is the
    current behaviour.
    
    https://fedorahosted.org/389/ticket/48943
    
    Reviewed by wibrown at redhat.com (Thank you, William!!)

diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c
index 498afd4..6b865ec 100644
--- a/ldap/servers/slapd/pw.c
+++ b/ldap/servers/slapd/pw.c
@@ -1777,9 +1777,17 @@ new_passwdPolicy(Slapi_PBlock *pb, const char *dn)
 			attribute in the target entry itself. */
 		} else {
 			if ( (e = get_entry( pb, dn )) != NULL ) {
-				rc = slapi_vattr_values_get(e, "pwdpolicysubentry", &values,
-					&type_name_disposition, &actual_type_name, 
-					SLAPI_VIRTUALATTRS_REQUEST_POINTERS, &attr_free_flags);
+				Slapi_Attr* attr = NULL;
+				rc = slapi_entry_attr_find(e, "pwdpolicysubentry", &attr);
+				if (attr && (0 == rc)) {
+					/* If the entry has pwdpolicysubentry, use the PwPolicy. */
+					values = valueset_dup(&attr->a_present_values);
+				} else {
+					/* Otherwise, retrieve the policy from CoS Cache */
+					rc = slapi_vattr_values_get(e, "pwdpolicysubentry", &values,
+						&type_name_disposition, &actual_type_name,
+						SLAPI_VIRTUALATTRS_REQUEST_POINTERS, &attr_free_flags);
+				}
 				if (rc) {
 					values = NULL;
 				}

commit a12dc3690f49704d86f63a96bff81abe62267397
Author: Simon Pichugin <spichugi at redhat.com>
Date:   Tue Aug 2 17:29:00 2016 +0200

    Ticket 47976 - Add fixed CI test case
    
    Description: Add test case written by Thierry and fix it depending on
    the current directory structure.
    
    https://fedorahosted.org/389/ticket/47976
    
    Reviewed by: wibrown, mreynolds (Thanks!)

diff --git a/dirsrvtests/tests/tickets/ticket47976_test.py b/dirsrvtests/tests/tickets/ticket47976_test.py
new file mode 100644
index 0000000..df4891d
--- /dev/null
+++ b/dirsrvtests/tests/tickets/ticket47976_test.py
@@ -0,0 +1,203 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+
+PEOPLE_OU='people'
+PEOPLE_DN = "ou=%s,%s" % (PEOPLE_OU, SUFFIX)
+GROUPS_OU='groups'
+GROUPS_DN = "ou=%s,%s" % (GROUPS_OU, SUFFIX)
+DEFINITIONS_CN='definitions'
+DEFINITIONS_DN = "cn=%s,%s" % (DEFINITIONS_CN, SUFFIX)
+TEMPLATES_CN='templates'
+TEMPLATES_DN = "cn=%s,%s" % (TEMPLATES_CN, SUFFIX)
+MANAGED_GROUP_TEMPLATES_CN='managed group templates'
+MANAGED_GROUP_TEMPLATES_DN='cn=%s,%s' % (MANAGED_GROUP_TEMPLATES_CN, TEMPLATES_DN)
+MANAGED_GROUP_MEP_TMPL_CN='UPG'
+MANAGED_GROUP_MEP_TMPL_DN='cn=%s,%s' % (MANAGED_GROUP_MEP_TMPL_CN, MANAGED_GROUP_TEMPLATES_DN)
+MANAGED_GROUP_DEF_CN='managed group definition'
+MANAGED_GROUP_DEF_DN='cn=%s,%s' % (MANAGED_GROUP_DEF_CN, DEFINITIONS_DN)
+
+MAX_ACCOUNTS=2
+
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+installation1_prefix = None
+
+
+class TopologyStandalone(object):
+    def __init__(self, standalone):
+        standalone.open()
+        self.standalone = standalone
+
+
+ at pytest.fixture(scope="module")
+def topology(request):
+    global installation1_prefix
+    if installation1_prefix:
+        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+
+    # Creating standalone instance ...
+    standalone = DirSrv(verbose=False)
+    if installation1_prefix:
+        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+    args_instance[SER_HOST] = HOST_STANDALONE
+    args_instance[SER_PORT] = PORT_STANDALONE
+    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+    args_standalone = args_instance.copy()
+    standalone.allocate(args_standalone)
+    instance_standalone = standalone.exists()
+    if instance_standalone:
+        standalone.delete()
+    standalone.create()
+    standalone.open()
+
+    # Delete each instance in the end
+    def fin():
+        standalone.delete()
+    request.addfinalizer(fin)
+
+    return TopologyStandalone(standalone)
+
+
+def test_ticket47976_init(topology):
+    """Create mep definitions and templates"""
+
+    try:
+        topology.standalone.add_s(Entry((PEOPLE_DN, {
+                                            'objectclass': "top extensibleObject".split(),
+                                            'ou': 'people'})))
+    except ldap.ALREADY_EXISTS:
+        pass
+    try:
+        topology.standalone.add_s(Entry((GROUPS_DN, {
+                                            'objectclass': "top extensibleObject".split(),
+                                            'ou': GROUPS_OU})))
+    except ldap.ALREADY_EXISTS:
+        pass
+    topology.standalone.add_s(Entry((DEFINITIONS_DN, {
+                                            'objectclass': "top nsContainer".split(),
+                                            'cn': DEFINITIONS_CN})))
+    topology.standalone.add_s(Entry((TEMPLATES_DN, {
+                                            'objectclass': "top nsContainer".split(),
+                                            'cn': TEMPLATES_CN})))
+    topology.standalone.add_s(Entry((MANAGED_GROUP_DEF_DN, {
+                                        'objectclass': "top extensibleObject".split(),
+                                        'cn': MANAGED_GROUP_DEF_CN,
+                                        'originScope': PEOPLE_DN,
+                                        'originFilter': '(objectclass=posixAccount)',
+                                        'managedBase': GROUPS_DN,
+                                        'managedTemplate': MANAGED_GROUP_MEP_TMPL_DN})))
+
+    topology.standalone.add_s(Entry((MANAGED_GROUP_TEMPLATES_DN, {
+                                            'objectclass': "top nsContainer".split(),
+                                            'cn': MANAGED_GROUP_TEMPLATES_CN})))
+
+    topology.standalone.add_s(Entry((MANAGED_GROUP_MEP_TMPL_DN, {
+                                            'objectclass': "top mepTemplateEntry".split(),
+                                            'cn': MANAGED_GROUP_MEP_TMPL_CN,
+                                            'mepRDNAttr': 'cn',
+                                            'mepStaticAttr': ['objectclass: posixGroup',
+                                                              'objectclass: extensibleObject'],
+                                            'mepMappedAttr': ['cn: $cn|uid: $cn',
+                                                              'gidNumber: $uidNumber']})))
+
+
+    topology.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY)
+    topology.standalone.restart(timeout=10)
+
+
+def test_ticket47976_1(topology):
+    mod = [(ldap.MOD_REPLACE, 'nsslapd-pluginConfigArea', DEFINITIONS_DN)]
+    topology.standalone.modify_s('cn=%s,cn=plugins,cn=config' % PLUGIN_MANAGED_ENTRY, mod)
+    topology.standalone.stop(timeout=10)
+    topology.standalone.start(timeout=10)
+    for cpt in range(MAX_ACCOUNTS):
+        name = "user%d" % (cpt)
+        topology.standalone.add_s(Entry(("uid=%s,%s" %(name, PEOPLE_DN), {
+                          'objectclass': 'top posixAccount extensibleObject'.split(),
+                          'uid': name,
+                          'cn': name,
+                          'uidNumber': '1',
+                          'gidNumber': '1',
+                          'homeDirectory': '/home/%s' % name
+                          })))
+
+
+def test_ticket47976_2(topology):
+    """It reimports the database with a very large page size
+    so all the entries (user and its private group).
+    """
+
+    log.info('Test complete')
+    mod = [(ldap.MOD_REPLACE, 'nsslapd-db-page-size', str(128*1024))]
+    topology.standalone.modify_s(DN_LDBM, mod)
+
+    # Get the the full path and name for our LDIF we will be exporting
+    log.info('Export LDIF file...')
+    ldif_dir = topology.standalone.get_ldif_dir()
+    ldif_file = ldif_dir + "/export.ldif"
+    args = {EXPORT_REPL_INFO: False,
+            TASK_WAIT: True}
+    exportTask = Tasks(topology.standalone)
+    try:
+        exportTask.exportLDIF(DEFAULT_SUFFIX, None, ldif_file, args)
+    except ValueError:
+        assert False
+    # import the new ldif file
+    log.info('Import LDIF file...')
+    importTask = Tasks(topology.standalone)
+    args = {TASK_WAIT: True}
+    try:
+        importTask.importLDIF(DEFAULT_SUFFIX, None, ldif_file, args)
+        os.remove(ldif_file)
+    except ValueError:
+        os.remove(ldif_file)
+        assert False
+
+
+def test_ticket47976_3(topology):
+    """A single delete of a user should hit 47976, because mep post op will
+    delete its related group.
+    """
+
+    log.info('Testing if the delete will hang or not')
+    #log.info("\n\nAttach\n\n debugger")
+    #time.sleep(60)
+    topology.standalone.set_option(ldap.OPT_TIMEOUT, 5)
+    try:
+        for cpt in range(MAX_ACCOUNTS):
+            name = "user%d" % (cpt)
+            topology.standalone.delete_s("uid=%s,%s" %(name, PEOPLE_DN))
+    except ldap.TIMEOUT as e:
+        log.fatal('Timeout... likely it hangs (47976)')
+        assert False
+
+    # check the entry has been deleted
+    for cpt in range(MAX_ACCOUNTS):
+        try:
+            name = "user%d" % (cpt)
+            topology.standalone.getEntry("uid=%s,%s" %(name, PEOPLE_DN), ldap.SCOPE_BASE, 'objectclass=*')
+            assert False
+        except ldap.NO_SUCH_OBJECT:
+            log.info('%s was correctly deleted' % name)
+            pass
+
+    assert cpt == (MAX_ACCOUNTS -1)
+
+
+if __name__ == '__main__':
+    # Run isolated
+    # -s for DEBUG mode
+    CURRENT_FILE = os.path.realpath(__file__)
+    pytest.main("-s %s" % CURRENT_FILE)

commit 7110db91e75f392f1c83643d9aa88895992d9c01
Author: Ludwig Krispenz <lkrispen at redhat.com>
Date:   Mon Aug 1 10:47:31 2016 +0200

    Ticket 48882 - server can hang in connection list processing
    
    Bug Description: if a thread holding the connection monitor
    		 is stuck in polling and the client doesn't
    		 respond, the main thread can be blocked on
    		 this connection when iterating the connection
    		 table.
    
    Fix Description: Implement a test and enter function for the connection
    		 monitor, so the main thread will never wait for a
    		 connection monitor already owned by an other thread
    
    https://fedorahosted.org/389/ticket/48882
    
    Reviewed by: Noriko, Thanks

diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 81a54cf..23c30c3 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -164,6 +164,67 @@ static void unfurl_banners(Connection_Table *ct,daemon_ports_t *ports, PRFileDes
 static int write_pid_file();
 static int init_shutdown_detect();
 
+/*
+ * NSPR has different implementations for PRMonitor, depending
+ * on the availble threading model
+ * The PR_TestAndEnterMonitor is not available for pthreads
+ * so this is a implementation based on the code in
+ * prmon.c adapted to resemble the implementation in ptsynch.c
+ *
+ * The function needs access to the elements of the PRMonitor struct.
+ * Therfor the pthread variant of PRMonitor is copied here.
+ */
+typedef struct MY_PRMonitor {
+    const char* name;
+    pthread_mutex_t lock;
+    pthread_t owner;
+    pthread_cond_t entryCV;
+    pthread_cond_t waitCV;
+    PRInt32 refCount;
+    PRUint32 entryCount;
+    PRIntn notifyTimes;
+} MY_PRMonitor;
+
+static PRBool MY_TestAndEnterMonitor(MY_PRMonitor *mon)
+{
+    pthread_t self = pthread_self();
+    PRStatus rv;
+    PRBool rc = PR_FALSE;
+
+    PR_ASSERT(mon != NULL);
+    rv = pthread_mutex_lock(&mon->lock);
+    if (rv != 0) {
+	slapi_log_error(SLAPI_LOG_FATAL ,"TestAndEnterMonitor",
+                        "Failed to acquire monitor mutex, error (%d)\n", rv);
+	return rc;
+    }
+    if (mon->entryCount != 0) {
+        if (pthread_equal(mon->owner, self))
+            goto done;
+        rv = pthread_mutex_unlock(&mon->lock);
+	if (rv != 0) {
+	    slapi_log_error(SLAPI_LOG_FATAL ,"TestAndEnterMonitor",
+                        "Failed to release monitor mutex, error (%d)\n", rv);
+	}
+        return PR_FALSE;
+    }
+    /* and now I have the monitor */
+    PR_ASSERT(mon->notifyTimes == 0);
+    PR_ASSERT((mon->owner) == 0);
+    mon->owner = self;
+
+done:
+    mon->entryCount += 1;
+    rv = pthread_mutex_unlock(&mon->lock);
+    if (rv == PR_SUCCESS) {
+	rc = PR_TRUE;
+    } else {
+	slapi_log_error(SLAPI_LOG_FATAL ,"TestAndEnterMonitor",
+                        "Failed to release monitor mutex, error (%d)\n", rv);
+	rc = PR_FALSE;
+    }
+    return rc;
+}
 /* Globals which are used to store the sockets between
  * calls to daemon_pre_setuid_init() and the daemon thread
  * creation. */
@@ -1552,7 +1613,13 @@ setup_pr_read_pds(Connection_Table *ct, PRFileDesc **n_tcps, PRFileDesc **s_tcps
 		}
 		else
 		{
-			PR_EnterMonitor(c->c_mutex);
+			/* we try to acquire the connection mutex, if it is already
+			 * acquired by another thread, don't wait
+			 */
+			if (PR_FALSE == MY_TestAndEnterMonitor((MY_PRMonitor *)c->c_mutex)) {
+				c = next;
+				continue;
+			}
 			if (c->c_flags & CONN_FLAG_CLOSING)
 			{
 				/* A worker thread has marked that this connection

commit 8d4000b79f2b7e7801df32cf64f0c84321c2adae
Author: Mark Reynolds <mreynolds at redhat.com>
Date:   Mon Aug 1 09:52:49 2016 -0400

    Ticket 48921 - Adding replication and reliability tests
    
    Description:  Add two replication stress tests, one with just 4 masters, and
                  the other with 4 masters, 2 hubs, and 4 consumers.
    
                  Add a connection stress test (for nunc-stans)
    
                  Also made some small changes to the create_test.py script
    
    https://fedorahosted.org/389/ticket/48921
    
    Reviewed by: ?

diff --git a/dirsrvtests/create_test.py b/dirsrvtests/create_test.py
index 19b838f..3898279 100755
--- a/dirsrvtests/create_test.py
+++ b/dirsrvtests/create_test.py
@@ -67,13 +67,13 @@ def writeFinalizer():
     TEST.write('    def fin():\n')
     TEST.write('        """')
     TEST.write('If we are debugging just stop the instances, ' +
-               'otherwise remove\n        them\n')
+               'otherwise remove them\n')
     TEST.write('        """\n')
     TEST.write('        if DEBUGGING:\n')
     writeInstanceOp('stop')
     TEST.write('        else:\n')
     writeInstanceOp('delete')
-    TEST.write('\n    request.addfinalizer(fin)')
+    TEST.write('    request.addfinalizer(fin)')
     TEST.write('\n\n')
 
 
@@ -191,7 +191,7 @@ if len(sys.argv) > 0:
     TEST.write('if DEBUGGING:\n')
     TEST.write('    logging.getLogger(__name__).setLevel(logging.DEBUG)\n')
     TEST.write('else:\n')
-    TEST.write('    logging.getLogger(__name__).setLevel(logging.INFO)\n\n\n')
+    TEST.write('    logging.getLogger(__name__).setLevel(logging.INFO)\n')
     TEST.write('log = logging.getLogger(__name__)\n\n\n')
 
     #
@@ -649,7 +649,6 @@ if len(sys.argv) > 0:
 
         writeFinalizer()
 
-        TEST.write('\n')
         TEST.write('    return TopologyStandalone(standalone')
         for idx in range(instances):
             idx += 1
diff --git a/dirsrvtests/tests/stress/reliabilty/reliab_conn_test.py b/dirsrvtests/tests/stress/reliabilty/reliab_conn_test.py
new file mode 100644
index 0000000..d3ee773
--- /dev/null
+++ b/dirsrvtests/tests/stress/reliabilty/reliab_conn_test.py
@@ -0,0 +1,289 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import pytest
+import signal
+import threading
+from lib389 import DirSrv
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+
+DEBUGGING = False
+
+if DEBUGGING:
+    logging.getLogger(__name__).setLevel(logging.DEBUG)
+else:
+    logging.getLogger(__name__).setLevel(logging.INFO)
+log = logging.getLogger(__name__)
+
+MAX_CONNS = 10000000
+MAX_THREADS = 20
+STOP = False
+HOSTNAME = DirSrvTools.getLocalhost()
+PORT = 389
+
+
+class TopologyStandalone(object):
+    """The DS Topology Class"""
+    def __init__(self, standalone):
+        """Init"""
+        standalone.open()
+        self.standalone = standalone
+
+
+ at pytest.fixture(scope="module")
+def topology(request):
+    """Create DS Deployment"""
+
+    # Creating standalone instance ...
+    if DEBUGGING:
+        standalone = DirSrv(verbose=True)
+    else:
+        standalone = DirSrv(verbose=False)
+    args_instance[SER_HOST] = HOST_STANDALONE
+    args_instance[SER_PORT] = PORT_STANDALONE
+    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+    args_standalone = args_instance.copy()
+    standalone.allocate(args_standalone)
+    instance_standalone = standalone.exists()
+    if instance_standalone:
+        standalone.delete()
+    standalone.create()
+    standalone.open()
+
+    def fin():
+        """If we are debugging just stop the instances, otherwise remove them
+        """
+        if DEBUGGING:
+            standalone.stop()
+        else:
+            standalone.delete()
+    request.addfinalizer(fin)
+
+    return TopologyStandalone(standalone)
+
+
+def signalHandler(signal, frame):
+    """
+    handle control-C cleanly
+    """
+    global STOP
+    STOP = True
+    sys.exit(0)
+
+
+def init(inst):
+    """Set the idle timeout, and add sample entries
+    """
+
+    try:
+        inst.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
+                                   'nsslapd-idletimeout',
+                                   '5')])
+    except ldap.LDAPError as e:
+        log.fatal('Failed to set idletimeout: ' + str(e))
+        assert False
+
+    try:
+        inst.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
+                                   'nsslapd-enable-nunc-stans',
+                                   'on')])
+    except ldap.LDAPError as e:
+        log.fatal('Failed to enable nunc-stans: ' + str(e))
+        assert False
+
+    for idx in range(0, 9):
+        user_dn = 'uid=entry%d,%s' % (idx, DEFAULT_SUFFIX)
+        try:
+            inst.add_s(Entry((user_dn,
+                {'objectclass': ['top', 'extensibleObject'],
+                 'uid': 'entry%d' % idx,
+                 'cn': 'entry%d' % idx,
+                 'userpassword': 'password'})))
+        except ldap.LDAPError as e:
+            log.fatal('Failed to add user entry (%s): %s' % (user_dn, str(e)))
+            assert False
+
+    inst.restart()
+
+
+class BindOnlyConn(threading.Thread):
+    """This class opens and closes connections
+    """
+    def __init__(self, inst):
+        """Initialize the thread class with the server instance info"""
+        threading.Thread.__init__(self)
+        self.daemon = True
+        self.inst = inst
+
+    def run(self):
+        """Keep opening and closing connections"""
+        idx = 0
+        err_count = 0
+        global STOP
+        while idx < MAX_CONNS and not STOP:
+            try:
+                conn = self.inst.openConnection()
+                conn.unbind_s()
+                time.sleep(.2)
+                err_count = 0
+            except ldap.LDAPError as e:
+                err_count += 1
+                if err_count > 3:
+                    log.error('BindOnlyConn exiting thread: %s' %
+                          (str(e)))
+                    return
+                time.sleep(.4)
+            idx += 1
+
+
+class IdleConn(threading.Thread):
+    """This class opens and closes connections
+    """
+    def __init__(self, inst):
+        """Initialize the thread class withte server isntance info"""
+        threading.Thread.__init__(self)
+        self.daemon = True
+        self.inst = inst
+
+    def run(self):
+        """Assume idleTimeout is set to less than 10 seconds
+        """
+        idx = 0
+        err_count = 0
+        global STOP
+        while idx < (MAX_CONNS / 10) and not STOP:
+            try:
+                conn = self.inst.openConnection()
+                conn.simple_bind_s('uid=entry0,dc=example,dc=com', 'password')
+                conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE,
+                              'uid=*')
+                time.sleep(10)
+                conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE,
+                              'cn=*')
+                conn.unbind_s()
+                time.sleep(.2)
+                err_count = 0
+            except ldap.LDAPError as e:
+                err_count += 1
+                if err_count > 3:
+                    log.error('IdleConn exiting thread: %s' %
+                              (str(e)))
+                    return
+                time.sleep(.4)
+            idx += 1
+
+
+class LongConn(threading.Thread):
+    """This class opens and closes connections to a specified server
+    """
+    def __init__(self, inst):
+        """Initialize the thread class with the server instance info"""
+        threading.Thread.__init__(self)
+        self.daemon = True
+        self.inst = inst
+
+    def run(self):
+        """Assume idleTimeout is set to less than 10 seconds
+        """
+        idx = 0
+        err_count = 0
+        global STOP
+        while idx < MAX_CONNS and not STOP:
+            try:
+                conn = self.inst.openConnection()
+                conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE,
+                              'objectclass=*')
+                conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE,
+                              'uid=mark')
+                conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE,
+                              'cn=*')
+                conn.search_s('', ldap.SCOPE_BASE, 'objectclass=*')
+                conn.unbind_s()
+                time.sleep(.2)
+                err_count = 0
+            except ldap.LDAPError as e:
+                err_count += 1
+                if err_count > 3:
+                    log.error('LongConn search exiting thread: %s' %
+                              (str(e)))
+                    return
+                time.sleep(.4)
+            idx += 1
+
+
+def test_connection_load(topology):
+    """Send the server a variety of connections using many threads:
+        - Open, Bind, Close
+        - Open, Bind, Search, wait to trigger idletimeout, Search, Close
+        - Open, Bind, Search, Search, Search, Close
+    """
+
+    # setup the control-C signal handler
+    signal.signal(signal.SIGINT, signalHandler)
+
+    # Set the config and add sample entries
+    log.info('Initializing setup...')
+    init(topology.standalone)
+
+    #
+    # Bind/Unbind Conn Threads
+    #
+    log.info('Launching Bind-Only Connection threads...')
+    threads = []
+    idx = 0
+    while idx < MAX_THREADS:
+        threads.append(BindOnlyConn(topology.standalone))
+        idx += 1
+    for thread in threads:
+        thread.start()
+        time.sleep(0.1)
+
+    #
+    # Idle Conn Threads
+    #
+    log.info('Launching Idle Connection threads...')
+    idx = 0
+    idle_threads = []
+    while idx < MAX_THREADS:
+        idle_threads.append(IdleConn(topology.standalone))
+        idx += 1
+    for thread in idle_threads:
+        thread.start()
+        time.sleep(0.1)
+
+    #
+    # Long Conn Threads
+    #
+    log.info('Launching Long Connection threads...')
+    idx = 0
+    long_threads = []
+    while idx < MAX_THREADS:
+        long_threads.append(LongConn(topology.standalone))
+        idx += 1
+    for thread in long_threads:
+        thread.start()
+        time.sleep(0.1)
+
+    #
+    # Now wait for all the threads to complete
+    #
+    log.info('Waiting for threads to finish...')
+    while threading.active_count() > 0:
+        time.sleep(1)
+
+    log.info('Done')
+
+
+if __name__ == '__main__':
+    # Run isolated
+    # -s for DEBUG mode
+    CURRENT_FILE = os.path.realpath(__file__)
+    pytest.main("-s %s" % CURRENT_FILE)
+
diff --git a/dirsrvtests/tests/stress/replication/mmr_01_4m-2h-4c_test.py b/dirsrvtests/tests/stress/replication/mmr_01_4m-2h-4c_test.py
new file mode 100644
index 0000000..95c0b71
--- /dev/null
+++ b/dirsrvtests/tests/stress/replication/mmr_01_4m-2h-4c_test.py
@@ -0,0 +1,969 @@
+import os
+import sys
+import time
+import datetime
+import ldap
+import logging
+import pytest
+import threading
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+from lib389.repltools import ReplTools
+
+logging.getLogger(__name__).setLevel(logging.INFO)
+log = logging.getLogger(__name__)
+
+DEBUGGING = False
+ADD_DEL_COUNT = 500
+MAX_LOOPS = 5
+TEST_CONVERGE_LATENCY = True
+CONVERGENCE_TIMEOUT = '60'
+master_list = []
+hub_list = []
+con_list = []
+TEST_START = time.time()
+
+LAST_DN_IDX = ADD_DEL_COUNT - 1
+LAST_DN_M1 = 'DEL dn="uid=master_1-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX)
+LAST_DN_M2 = 'DEL dn="uid=master_2-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX)



More information about the Pkg-fedora-ds-maintainers mailing list