[Pkg-fedora-ds-maintainers] 389-ds-base: Changes to 'master'

Timo Aaltonen tjaalton at moszumanska.debian.org
Wed May 10 06:28:38 UTC 2017


 debian/changelog                       |    7 
 debian/patches/fix-upstream-49245.diff |  544 +++++++++++++++++++++++++++++++++
 debian/patches/series                  |    1 
 3 files changed, 552 insertions(+)

New commits:
commit 7961e81777534d265a11dbd8da1c4539ff132098
Author: Timo Aaltonen <tjaalton at debian.org>
Date:   Wed May 10 09:26:03 2017 +0300

    releasing package 389-ds-base version 1.3.5.17-2

diff --git a/debian/changelog b/debian/changelog
index 78c02c8..19c5b85 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,9 +1,9 @@
-389-ds-base (1.3.5.17-2) UNRELEASED; urgency=medium
+389-ds-base (1.3.5.17-2) unstable; urgency=medium
 
   * fix-upstream-49245.diff: Pull commits from upstream 1.3.5.x, which
     remove rest of the asm code. (Closes: #862194)
 
- -- Timo Aaltonen <tjaalton at debian.org>  Wed, 10 May 2017 09:23:19 +0300
+ -- Timo Aaltonen <tjaalton at debian.org>  Wed, 10 May 2017 09:25:03 +0300
 
 389-ds-base (1.3.5.17-1) unstable; urgency=medium
 

commit af1ca7c86166ebe1bfe38de3abeddb5a4925cf0c
Author: Timo Aaltonen <tjaalton at debian.org>
Date:   Wed May 10 09:24:58 2017 +0300

    fix-upstream-49245.diff: Pull commits from upstream 1.3.5.x, which remove rest of the asm code. (Closes: #862194)

diff --git a/debian/changelog b/debian/changelog
index 54db9b0..78c02c8 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,10 @@
+389-ds-base (1.3.5.17-2) UNRELEASED; urgency=medium
+
+  * fix-upstream-49245.diff: Pull commits from upstream 1.3.5.x, which
+    remove rest of the asm code. (Closes: #862194)
+
+ -- Timo Aaltonen <tjaalton at debian.org>  Wed, 10 May 2017 09:23:19 +0300
+
 389-ds-base (1.3.5.17-1) unstable; urgency=medium
 
   * New upstream bugfix release.
diff --git a/debian/patches/fix-upstream-49245.diff b/debian/patches/fix-upstream-49245.diff
new file mode 100644
index 0000000..9c78400
--- /dev/null
+++ b/debian/patches/fix-upstream-49245.diff
@@ -0,0 +1,544 @@
+diff --git a/Makefile.am b/Makefile.am
+index d54a2cc..f885eea 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -1059,9 +1059,6 @@ libslapd_la_SOURCES = ldap/servers/slapd/add.c \
+ 	$(libavl_a_SOURCES)
+ 
+ libslapd_la_CPPFLAGS = $(PLUGIN_CPPFLAGS) @sasl_inc@ @db_inc@ @svrcore_inc@ @kerberos_inc@ @pcre_inc@
+-if SPARC
+-libslapd_la_SOURCES += ldap/servers/slapd/slapi_counter_sunos_sparcv9.S
+-endif
+ libslapd_la_LIBADD = $(LDAPSDK_LINK) $(SASL_LINK) $(NUNC_STANS_LINK) $(SVRCORE_LINK) $(NSS_LINK) $(NSPR_LINK) $(KERBEROS_LINK) $(PCRE_LINK) $(THREADLIB) $(SYSTEMD_LINK)
+ 
+ 
+diff --git a/configure.ac b/configure.ac
+index 846e3b4..9b6377a 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -519,11 +519,14 @@ case $host in
+     case $host in
+       i*86-*-linux*)
+         AC_DEFINE([CPU_x86], [], [cpu type x86])
+-        AC_DEFINE([ATOMIC_64BIT_OPERATIONS], [1], [enabling atomic counter])
+         ;;
+       x86_64-*-linux*)
+-        AC_DEFINE([CPU_x86_64], [], [cpu type x86_64])
+-        AC_DEFINE([ATOMIC_64BIT_OPERATIONS], [1], [enabling atomic counter])
++        AC_DEFINE([CPU_x86_64], [1], [cpu type x86_64])
++
++        # This turns on and off LFDS inside of libsds
++        # wibrown -- 2017-02-21 disabled temporarily
++        # with_atomic_queue="yes"
++        # AC_DEFINE([ATOMIC_QUEUE_OPERATIONS], [1], [enabling atomic queue operations])
+         ;;
+       aarch64-*-linux*)
+         AC_DEFINE([CPU_arm], [], [cpu type arm])
+@@ -532,16 +535,6 @@ case $host in
+         AC_DEFINE([CPU_arm], [], [cpu type arm])
+         ;;
+     esac
+-    AC_MSG_CHECKING([for GCC provided 64-bit atomic bool cas function ...])
+-    AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],
+-                                    [[long long ptrval = 0, val = 0, newval = 1; (void)__sync_bool_compare_and_swap_8(&ptrval, val, newval);]])],
+-                   [AC_DEFINE([HAVE_64BIT_ATOMIC_CAS_FUNC], [1], [have 64-bit atomic bool compare and swap function provided by gcc])AC_MSG_RESULT([yes])],
+-                   [AC_MSG_RESULT([no])])
+-    AC_MSG_CHECKING([for GCC provided 64-bit atomic ops functions ...])
+-    AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],
+-                                    [[long long ptrval = 0, val = 0; (void)__sync_add_and_fetch_8(&ptrval, val);]])],
+-                   [AC_DEFINE([HAVE_64BIT_ATOMIC_OP_FUNCS], [1], [have 64-bit atomic operation functions provided by gcc])AC_MSG_RESULT([yes])],
+-                   [AC_MSG_RESULT([no])])
+     # some programs use the native thread library directly
+     THREADLIB=-lpthread
+     AC_SUBST([THREADLIB], [$THREADLIB])
+@@ -576,7 +569,6 @@ case $host in
+     AC_DEFINE([_POSIX_C_SOURCE], [199506L], [POSIX revision])
+     AC_DEFINE([_HPUX_SOURCE], [1], [Source namespace])
+     AC_DEFINE([_INCLUDE_STDC__SOURCE_199901], [1], [to pick up all of the printf format macros in inttypes.h])
+-    AC_DEFINE([ATOMIC_64BIT_OPERATIONS], [1], [enabling atomic counter])
+     # assume 64 bit
+     initconfigdir="/$PACKAGE_NAME/config"
+     perlexec='/opt/perl_64/bin/perl'
+@@ -611,12 +603,11 @@ dnl Cstd and Crun are required to link any C++ related code
+     initdir='$(sysconfdir)/init.d'
+     case $host in
+       i?86-*-solaris2.1[[0-9]]*)
+-dnl I dont know why i386 need this explicit
++        dnl I dont know why i386 need this explicit
+         AC_DEFINE([HAVE_GETPEERUCRED], [1], [have getpeerucred])
+         ;;
+       sparc-*-solaris*)
+-dnl includes some assembler stuff in counter.o
+-        AC_DEFINE([ATOMIC_64BIT_OPERATIONS], [1], [enabling atomic counter])
++        dnl includes some assembler stuff in counter.o
+         AC_DEFINE([CPU_sparc], [], [cpu type sparc])
+         TARGET='SPARC'
+         ;;
+@@ -627,6 +618,30 @@ dnl includes some assembler stuff in counter.o
+     ;;
+ esac
+ 
++AC_MSG_CHECKING([for GCC provided 64-bit atomic operations])
++AC_LINK_IFELSE([AC_LANG_PROGRAM([[
++    #include <inttypes.h>
++    ]],
++    [[
++    uint64_t t_counter = 0;
++    uint64_t t_oldval = 0;
++    uint64_t t_newval = 1;
++
++    __atomic_compare_exchange_8(&t_counter, &t_oldval, t_newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
++    __atomic_add_fetch_8(&t_counter, t_newval, __ATOMIC_SEQ_CST);
++    __atomic_sub_fetch_8(&t_counter, t_newval, __ATOMIC_SEQ_CST);
++    __atomic_load(&t_counter, &t_oldval, __ATOMIC_SEQ_CST);
++    return 0;
++    ]])],
++    [
++        AC_DEFINE([ATOMIC_64BIT_OPERATIONS], [1], [have 64-bit atomic operation functions provided by gcc])
++        AC_MSG_RESULT([yes])
++    ],
++    [
++        AC_MSG_RESULT([no])
++    ]
++)
++
+ # cmd line overrides default setting above
+ if test -n "$with_initddir" ; then
+    initdir="$with_initddir"
+diff --git a/ldap/servers/slapd/slapi_counter.c b/ldap/servers/slapd/slapi_counter.c
+index d0696eb..9904fe9 100644
+--- a/ldap/servers/slapd/slapi_counter.c
++++ b/ldap/servers/slapd/slapi_counter.c
+@@ -12,17 +12,22 @@
+ 
+ #include "slap.h"
+ 
++#ifndef ATOMIC_64BIT_OPERATIONS
++#include <pthread.h>
++#endif
++
+ #ifdef HPUX
+-#ifdef ATOMIC_64BIT_OPERATIONS
+ #include <machine/sys/inline.h>
+ #endif
+-#endif
+ 
+ /*
+  * Counter Structure
+  */
+ typedef struct slapi_counter {
+     uint64_t value;
++#ifndef ATOMIC_64BIT_OPERATIONS
++    pthread_mutex_t _lock;
++#endif
+ } slapi_counter;
+ 
+ /*
+@@ -53,6 +58,9 @@ void slapi_counter_init(Slapi_Counter *counter)
+     if (counter != NULL) {
+         /* Set the value to 0. */
+         slapi_counter_set_value(counter, 0);
++#ifndef ATOMIC_64BIT_OPERATIONS
++        pthread_mutex_init(&(counter->_lock), NULL);
++#endif
+     }
+ }
+ 
+@@ -65,6 +73,9 @@ void slapi_counter_init(Slapi_Counter *counter)
+ void slapi_counter_destroy(Slapi_Counter **counter)
+ {
+     if ((counter != NULL) && (*counter != NULL)) {
++#ifndef ATOMIC_64BIT_OPERATIONS
++        pthread_mutex_destroy(&((*counter)->_lock));
++#endif
+         slapi_ch_free((void **)counter);
+     }
+ }
+@@ -99,17 +110,15 @@ uint64_t slapi_counter_decrement(Slapi_Counter *counter)
+ uint64_t slapi_counter_add(Slapi_Counter *counter, uint64_t addvalue)
+ {
+     uint64_t newvalue = 0;
+-#ifdef HPUX
+-    uint64_t prev = 0;
+-#endif
+ 
+     if (counter == NULL) {
+         return newvalue;
+     }
+-
+-#ifndef HPUX
++#ifdef ATOMIC_64BIT_OPERATIONS
+     newvalue = __atomic_add_fetch_8(&(counter->value), addvalue, __ATOMIC_SEQ_CST);
+ #else
++#ifdef HPUX
++    uint64_t prev = 0;
+     /* fetchadd only works with values of 1, 4, 8, and 16.  In addition, it requires
+      * it's argument to be an integer constant. */
+     if (addvalue == 1) {
+@@ -133,6 +142,12 @@ uint64_t slapi_counter_add(Slapi_Counter *counter, uint64_t addvalue)
+            _Asm_mov_to_ar(_AREG_CCV, prev);
+         } while (prev != _Asm_cmpxchg(_FASZ_D, _SEM_ACQ, &(counter->value), newvalue, _LDHINT_NONE));
+     }
++#else
++    pthread_mutex_lock(&(counter->_lock));
++    counter->value += addvalue;
++    newvalue = counter->value;
++    pthread_mutex_unlock(&(counter->_lock));
++#endif
+ #endif
+ 
+     return newvalue;
+@@ -147,17 +162,16 @@ uint64_t slapi_counter_add(Slapi_Counter *counter, uint64_t addvalue)
+ uint64_t slapi_counter_subtract(Slapi_Counter *counter, uint64_t subvalue)
+ {
+     uint64_t newvalue = 0;
+-#ifdef HPUX
+-    uint64_t prev = 0;
+-#endif
+ 
+     if (counter == NULL) {
+         return newvalue;
+     }
+ 
+-#ifndef HPUX
++#ifdef ATOMIC_64BIT_OPERATIONS
+     newvalue = __atomic_sub_fetch_8(&(counter->value), subvalue, __ATOMIC_SEQ_CST);
+ #else
++#ifdef HPUX
++    uint64_t prev = 0;
+     /* fetchadd only works with values of -1, -4, -8, and -16.  In addition, it requires
+      * it's argument to be an integer constant. */
+     if (subvalue == 1) {
+@@ -181,6 +195,12 @@ uint64_t slapi_counter_subtract(Slapi_Counter *counter, uint64_t subvalue)
+            _Asm_mov_to_ar(_AREG_CCV, prev);
+         } while (prev != _Asm_cmpxchg(_FASZ_D, _SEM_ACQ, &(counter->value), newvalue, _LDHINT_NONE));
+     }
++#else
++    pthread_mutex_lock(&(counter->_lock));
++    counter->value -= subvalue;
++    newvalue = counter->value;
++    pthread_mutex_unlock(&(counter->_lock));
++#endif
+ #endif
+ 
+     return newvalue;
+@@ -199,58 +219,22 @@ uint64_t slapi_counter_set_value(Slapi_Counter *counter, uint64_t newvalue)
+         return value;
+     }
+ 
+-#ifndef HPUX
+-/* Use our own inline assembly for an atomic set if
+- * the builtins aren't available. */
+-#if !HAVE_64BIT_ATOMIC_CAS_FUNC
+-    /*
+-     * %0 = counter->value
+-     * %1 = newvalue
+-     */
+-    __asm__ __volatile__(
+-#ifdef CPU_x86
+-        /* Save the PIC register */
+-        " pushl %%ebx;"
+-#endif /* CPU_x86 */
+-        /* Put value of counter->value in EDX:EAX */
+-        "retryset: movl %0, %%eax;"
+-        " movl 4%0, %%edx;"
+-        /* Put newval in ECX:EBX */
+-        " movl %1, %%ebx;"
+-        " movl 4+%1, %%ecx;"
+-        /* If EDX:EAX and counter-> are the same,
+-         * replace *ptr with ECX:EBX */
+-        " lock; cmpxchg8b %0;"
+-        " jnz retryset;"
+-#ifdef CPU_x86
+-        /* Restore the PIC register */
+-        " popl %%ebx"
+-#endif /* CPU_x86 */
+-        : "+o" (counter->value)
+-        : "m" (newvalue)
+-#ifdef CPU_x86
+-        : "memory", "eax", "ecx", "edx", "cc");
+-#else
+-        : "memory", "eax", "ebx", "ecx", "edx", "cc");
+-#endif
+-
+-    return newvalue;
+-#else /* HAVE_64BIT_ATOMIC_CAS_FUNC */
+-    while (1) {
+-        value = __atomic_load_8(&(counter->value), __ATOMIC_SEQ_CST);
+-        if (__atomic_compare_exchange_8(&(counter->value), &value, newvalue, PR_FALSE, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)){
+-            return newvalue;
+-        }
+-    }
+-#endif
++#ifdef ATOMIC_64BIT_OPERATIONS
++    __atomic_store_8(&(counter->value), newvalue, __ATOMIC_SEQ_CST);
+ #else /* HPUX */
++#ifdef HPUX
+     do {
+         value = counter->value;
+         /* Put value in a register for cmpxchg to compare against */
+         _Asm_mov_to_ar(_AREG_CCV, value);
+     } while (value != _Asm_cmpxchg(_FASZ_D, _SEM_ACQ, &(counter->value), newvalue, _LDHINT_NONE));
+-    return newvalue;
++#else
++    pthread_mutex_lock(&(counter->_lock));
++    counter->value = newvalue;
++    pthread_mutex_unlock(&(counter->_lock));
++#endif
+ #endif
++    return newvalue;
+ }
+ 
+ /*
+@@ -266,57 +250,20 @@ uint64_t slapi_counter_get_value(Slapi_Counter *counter)
+         return value;
+     }
+ 
+-#ifndef HPUX
+-/* Use our own inline assembly for an atomic get if
+- * the builtins aren't available. */
+-#if !HAVE_64BIT_ATOMIC_CAS_FUNC
+-    /*
+-     * %0 = counter->value
+-     * %1 = value
+-     */
+-    __asm__ __volatile__(
+-#ifdef CPU_x86
+-        /* Save the PIC register */
+-        " pushl %%ebx;"
+-#endif /* CPU_x86 */
+-        /* Put value of counter->value in EDX:EAX */
+-        "retryget: movl %0, %%eax;"
+-        " movl 4%0, %%edx;"
+-        /* Copy EDX:EAX to ECX:EBX */
+-        " movl %%eax, %%ebx;"
+-        " movl %%edx, %%ecx;"
+-        /* If EDX:EAX and counter->value are the same,
+-         * replace *ptr with ECX:EBX */
+-        " lock; cmpxchg8b %0;"
+-        " jnz retryget;"
+-        /* Put retrieved value into value */
+-        " movl %%ebx, %1;"
+-        " movl %%ecx, 4%1;"
+-#ifdef CPU_x86
+-        /* Restore the PIC register */
+-        " popl %%ebx"
+-#endif /* CPU_x86 */
+-        : "+o" (counter->value), "=m" (value)
+-        : 
+-#ifdef CPU_x86
+-        : "memory", "eax", "ecx", "edx", "cc");
+-#else
+-        : "memory", "eax", "ebx", "ecx", "edx", "cc");
+-#endif
+-#else  /* HAVE_64BIT_ATOMIC_CAS_FUNC */
+-    while (1) {
+-        value = __atomic_load_8(&(counter->value), __ATOMIC_SEQ_CST);
+-        if (__atomic_compare_exchange_8(&(counter->value), &value, value, PR_FALSE, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)){
+-            break;
+-        }
+-    }
+-#endif
++#ifdef ATOMIC_64BIT_OPERATIONS
++    value = __atomic_load_8(&(counter->value), __ATOMIC_SEQ_CST);
+ #else  /* HPUX */
++#ifdef HPUX
+     do {
+         value = counter->value;
+         /* Put value in a register for cmpxchg to compare against */
+         _Asm_mov_to_ar(_AREG_CCV, value);
+     } while (value != _Asm_cmpxchg(_FASZ_D, _SEM_ACQ, &(counter->value), value, _LDHINT_NONE));
++#else
++    pthread_mutex_lock(&(counter->_lock));
++    value = counter->value;
++    pthread_mutex_unlock(&(counter->_lock));
++#endif
+ #endif
+ 
+     return value;
+diff --git a/ldap/servers/slapd/slapi_counter_sunos_sparcv9.S b/ldap/servers/slapd/slapi_counter_sunos_sparcv9.S
+deleted file mode 100644
+index e582c2a..0000000
+--- a/ldap/servers/slapd/slapi_counter_sunos_sparcv9.S
++++ /dev/null
+@@ -1,105 +0,0 @@
+-! BEGIN COPYRIGHT BLOCK
+-! The Original Code is the Netscape Portable Runtime (NSPR).
+-!
+-! The Initial Developer of the Original Code is
+-! Netscape Communications Corporation.
+-! Portions created by the Initial Developer are Copyright (C) 1998-2000
+-! the Initial Developer. All Rights Reserved.
+-!
+-! The original code has been modified to support 64-bit atomic increments by
+-! Red Hat, Inc.  These portions are Copyright (C) 2008 Red Hat, Inc. All Rights
+-! reserved.
+-!
+-! License: GPL (version 3 or any later version).
+-! See LICENSE for details. 
+-! END COPYRIGHT BLOCK
+-!
+-
+-#define _ASM 1 /* force to set an assembler code macro _ASM */
+-#include <sys/asm_linkage.h>
+-
+-!  ======================================================================
+-!
+-!  Perform the sequence a = b atomically with respect to other
+-!  fetch-and-stores to location a in a wait-free fashion.
+-!
+-!  usage : old_val = PR_AtomicSet(address, newval)
+-!
+-!  -----------------------
+-!  Note on REGISTER USAGE:
+-!  as this is a LEAF procedure, a new stack frame is not created;
+-!  we use the caller's stack frame so what would normally be %i (input)
+-!  registers are actually %o (output registers).  Also, we must not
+-!  overwrite the contents of %l (local) registers as they are not
+-!  assumed to be volatile during calls.
+-!  -----------------------
+-
+-        ENTRY(_sparcv9_AtomicSet)            ! standard assembler/ELF prologue
+-
+-retryAS:
+-        ldx     [%o0], %o2              ! set o2 to the current value
+-        mov     %o1, %o3                ! set up the new value
+-        casx    [%o0], %o2, %o3         ! atomically set if o0 hasn't changed
+-        cmp     %o2, %o3                ! see if we set the value
+-        bne     retryAS                 ! if not, try again
+-        nop                             ! empty out the branch pipeline
+-        retl                            ! return back to the caller
+-        mov     %o3, %o0                ! set the return code to the prev value
+-
+-        SET_SIZE(_sparcv9_AtomicSet)    ! standard assembler/ELF epilogue
+-
+-!
+-!  end
+-!
+-!  ======================================================================
+-!
+-!  Perform the sequence a = a + b atomically with respect to other
+-!  fetch-and-adds to location a in a wait-free fashion.
+-!
+-!  usage : newval = PR_AtomicAdd(address, val)
+-!  return: the value after addition
+-!
+-        ENTRY(_sparcv9_AtomicAdd)      ! standard assembler/ELF prologue
+-
+-retryAA:
+-        ldx     [%o0], %o2              ! set o2 to the current value
+-        addx    %o2, %o1, %o3           ! calc the new value
+-        mov     %o3, %o4                ! save the return value
+-        casx    [%o0], %o2, %o3         ! atomically set if o0 hasn't changed
+-        cmp     %o2, %o3                ! see if we set the value
+-        bne     retryAA                 ! if not, try again
+-        nop                             ! empty out the branch pipeline
+-        retl                            ! return back to the caller
+-        mov     %o4, %o0                ! set the return code to the new value
+-
+-        SET_SIZE(_sparcv9_AtomicAdd)    ! standard assembler/ELF epilogue
+-
+-!
+-!  end
+-!
+-!  ======================================================================
+-!
+-!  Perform the sequence a = a - b atomically with respect to other
+-!  fetch-and-subs to location a in a wait-free fashion.
+-!
+-!  usage : newval = PR_AtomicSub(address, val)
+-!  return: the value after addition
+-!
+-        ENTRY(_sparcv9_AtomicSub)      ! standard assembler/ELF prologue
+-
+-retryAU:
+-        ldx     [%o0], %o2              ! set o2 to the current value
+-        subx    %o2, %o1, %o3           ! calc the new value
+-        mov     %o3, %o4                ! save the return value
+-        casx    [%o0], %o2, %o3         ! atomically set if o0 hasn't changed
+-        cmp     %o2, %o3                ! see if we set the value
+-        bne     retryAU                 ! if not, try again
+-        nop                             ! empty out the branch pipeline
+-        retl                            ! return back to the caller
+-        mov     %o4, %o0                ! set the return code to the new value
+-
+-        SET_SIZE(_sparcv9_AtomicSub)    ! standard assembler/ELF epilogue
+-
+-!
+-!  end
+-!
+diff --git a/test/libslapd/counters/atomic.c b/test/libslapd/counters/atomic.c
+new file mode 100644
+index 0000000..72fad88
+--- /dev/null
++++ b/test/libslapd/counters/atomic.c
+@@ -0,0 +1,69 @@
++/** BEGIN COPYRIGHT BLOCK
++ * Copyright (C) 2017 Red Hat, Inc.
++ * All rights reserved.
++ *
++ * License: GPL (version 3 or any later version).
++ * See LICENSE for details.
++ * END COPYRIGHT BLOCK **/
++
++#include "../../test_slapd.h"
++
++void
++test_libslapd_counters_atomic_usage(void **state __attribute__((unused))) {
++    Slapi_Counter *tc = slapi_counter_new();
++
++    uint64_t value = 0;
++    /* Check that it starts as 0 */
++    value = slapi_counter_get_value(tc);
++    assert_true(value == 0);
++    /* Increment */
++    slapi_counter_increment(tc);
++    value = slapi_counter_get_value(tc);
++    assert_true(value == 1);
++    /* add */
++    slapi_counter_add(tc, 100);
++    value = slapi_counter_get_value(tc);
++    assert_true(value == 101);
++    /* set */
++    slapi_counter_set_value(tc, 200);
++    value = slapi_counter_get_value(tc);
++    assert_true(value == 200);
++    /* dec */
++    slapi_counter_decrement(tc);
++    value = slapi_counter_get_value(tc);
++    assert_true(value == 199);
++    /* sub */
++    slapi_counter_subtract(tc, 99);
++    value = slapi_counter_get_value(tc);
++    assert_true(value == 100);
++    /* init */
++    slapi_counter_init(tc);
++    value = slapi_counter_get_value(tc);
++    assert_true(value == 0);
++
++
++    slapi_counter_destroy(&tc);
++
++    /* We could attempt a more complex thread test later? */
++
++}
++
++void
++test_libslapd_counters_atomic_overflow(void **state __attribute__((unused))) {
++    Slapi_Counter *tc = slapi_counter_new();
++    /* This is intmax ... */
++    uint32_t value_32 = 0xFFFFFFFF;
++    uint64_t value = 0;
++
++    slapi_counter_set_value(tc, (uint64_t)value_32);
++    value = slapi_counter_get_value(tc);
++    assert_true(value == (uint64_t)value_32);
++
++    slapi_counter_increment(tc);
++    value = slapi_counter_get_value(tc);
++    assert_true(value != 0);
++    assert_true(value > (uint64_t)value_32);
++
++    slapi_counter_destroy(&tc);
++}
++
diff --git a/debian/patches/series b/debian/patches/series
index 9903469..9702f7b 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -8,3 +8,4 @@ fix-saslpath.diff
 reproducible-build.diff
 fix-systemctl-path.diff
 fix-48986-cve-2017-2591.diff
+fix-upstream-49245.diff



More information about the Pkg-fedora-ds-maintainers mailing list