Hello community,

here is the log from the commit of package 389-ds for openSUSE:Factory checked 
in at 2020-12-03 18:43:01
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/389-ds (Old)
 and      /work/SRC/openSUSE:Factory/.389-ds.new.5913 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "389-ds"

Thu Dec  3 18:43:01 2020 rev:37 rq:852782 version:2.0.1~git0.b557f5daa

Changes:
--------
--- /work/SRC/openSUSE:Factory/389-ds/389-ds.changes    2020-11-12 
22:49:42.218787656 +0100
+++ /work/SRC/openSUSE:Factory/.389-ds.new.5913/389-ds.changes  2020-12-03 
18:43:56.210253439 +0100
@@ -1,0 +2,17 @@
+Tue Dec 01 22:58:32 UTC 2020 - [email protected]
+
+- Rust is a hard-requirement of 2.0.0 series, so enable-rust flags removed
+- Perl has been completly removed in 2.0.0, enable-perl removed and lib389
+  is the default. Perl tools have not been included in SUSE since 1.4.1.x
+- Update to version 2.0.1~git0.b557f5daa:
+  * Bump version to 2.0.1
+  * Issue 4420 - change NVR to use X.X.X instead of X.X.X.X
+  * Issue 4391 - DSE config modify does not call be_postop (#4394)
+  * Issue 4218 - Verify the new wtime and optime access log keywords (#4397)
+  * Issue 4176 - CL trimming causes high CPU
+  * ticket 2058: Add keep alive entry after on-line initialization - second 
version (#4399)
+  * Issue 4403 RFE - OpenLDAP pw hash migration tests (#4408)
+  * Bump version to 2.0.0
+  * Bump version to 1.4.5.0
+
+-------------------------------------------------------------------

Old:
----
  389-ds-base-1.4.4.8~git0.bf454ad07.tar.bz2

New:
----
  389-ds-base-2.0.1~git0.b557f5daa.tar.bz2

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ 389-ds.spec ++++++
--- /var/tmp/diff_new_pack.YcLlQ7/_old  2020-12-03 18:43:57.382254988 +0100
+++ /var/tmp/diff_new_pack.YcLlQ7/_new  2020-12-03 18:43:57.386254993 +0100
@@ -16,20 +16,6 @@
 #
 
 
-# bcond is confusingly backwards to what you expect - without means
-#  to ENABLE the option, with means to DISABLE it.
-%if (0%{?sle_version} > 150099) || (0%{?suse_version} > 1549)
-%bcond_without lib389
-%else
-%bcond_with    lib389
-%endif
-
-%if (0%{?sle_version} > 150299) || (0%{?suse_version} > 1549)
-%bcond_without rust
-%else
-%bcond_with    rust
-%endif
-
 %define use_python python3
 %define skip_python2 1
 %{?!python_module:%define python_module() python-%{**} python3-%{**}}
@@ -51,7 +37,7 @@
 %define svrcorelib libsvrcore0
 
 Name:           389-ds
-Version:        1.4.4.8~git0.bf454ad07
+Version:        2.0.1~git0.b557f5daa
 Release:        0
 Summary:        389 Directory Server
 License:        GPL-3.0-or-later AND MPL-2.0
@@ -60,10 +46,8 @@
 Source:         389-ds-base-%{version}.tar.bz2
 Source1:        extra-schema.tgz
 Source2:        LICENSE.openldap
-%if %{with rust}
 Source3:        vendor.tar.xz
 # Source4:        cargo_config
-%endif
 Source9:        %{name}-rpmlintrc
 Source10:       %{user_group}-user.conf
 # 389-ds does not support i686
@@ -89,17 +73,15 @@
 BuildRequires:  net-snmp-devel >= 5.1.2
 BuildRequires:  openldap2-devel
 # pam-devel is required by the pam passthru auth plug-in
-BuildRequires:  %{python_module devel}
-BuildRequires:  %{python_module setuptools}
-%if %{with lib389}
 BuildRequires:  %{python_module argcomplete}
 BuildRequires:  %{python_module argparse-manpage}
+BuildRequires:  %{python_module devel}
 BuildRequires:  %{python_module ldap >= 3}
 BuildRequires:  %{python_module pyasn1-modules}
 BuildRequires:  %{python_module pyasn1}
 BuildRequires:  %{python_module python-dateutil}
+BuildRequires:  %{python_module setuptools}
 BuildRequires:  %{python_module six}
-%endif
 BuildRequires:  pam-devel
 BuildRequires:  pkgconfig
 BuildRequires:  python-rpm-macros
@@ -116,11 +98,9 @@
 %if %{use_tcmalloc}
 BuildRequires:  pkgconfig(libtcmalloc)
 %endif
-BuildRequires:  rsync
-%if %{with rust}
 BuildRequires:  cargo
+BuildRequires:  rsync
 BuildRequires:  rust
-%endif
 Requires:       %{_sbindir}/service
 Requires:       acl
 # This is a requirement as it's the only known "safe" method of
@@ -128,18 +108,7 @@
 # ldaps.
 Requires:       cyrus-sasl-plain
 Requires:       db-utils
-%if %{with lib389}
 Requires:       lib389 = %{version}
-%else
-Requires:       bind-utils
-Requires:       perl(Mozilla::LDAP::API)
-Requires:       perl(Mozilla::LDAP::Conn)
-Requires:       perl(Mozilla::LDAP::Entry)
-Requires:       perl(Mozilla::LDAP::LDIF)
-Requires:       perl(Mozilla::LDAP::Utils)
-Requires:       perl(NetAddr::IP)
-Requires:       perl(Socket6)
-%endif
 # Needed for creating the ccache and some GSSAPI steps in sasl
 Requires:       krb5
 %sysusers_requires
@@ -203,7 +172,6 @@
 %description      snmp
 SNMP Agent for the 389 Directory Server base package.
 
-%if %{with lib389}
 %package -n lib389
 Summary:        389 Directory Server administration tools and library
 License:        GPL-3.0-or-later AND MPL-2.0
@@ -234,7 +202,6 @@
 %description -n lib389
 Python library for interacting with and administering 389
 Directory Server instances locally or remotely.
-%endif
 
 %package -n %{svrcorelib}
 Summary:        Secure PIN handling using NSS crypto
@@ -253,11 +220,7 @@
 
 # Extract the vendor.tar.gz. The -D -T here prevents removal of the sources
 # from the previous setup step.
-%if %{with rust}
-# mkdir .cargo
-# cp %{SOURCE4} .cargo/config
 %setup -q -n %{name}-base-%{version} -D -T -a 3
-%endif
 
 %build
 %sysusers_generate_pre %{SOURCE10} %{user_group}
@@ -283,15 +246,8 @@
   --enable-tcmalloc \
   %endif
   --with-selinux \
-  %if %{with rust}
   --enable-rust-offline \
-  %endif
-  %if %{with lib389}
   --disable-perl \
-  %else
-  --enable-perl \
-  --with-perldir=%{_bindir} \
-  %endif
   --libexecdir=%{_prefix}/lib/dirsrv/ \
   --with-pythonexec="%{_bindir}/%{use_python}" \
   --with-systemd \
@@ -304,21 +260,17 @@
 export XCFLAGS="$CFLAGS"
 make %{?_smp_mflags}
 #make setup.py
-%if %{with lib389}
 pushd src/lib389
 %python_build
 popd
-%endif
 
 %install
 %make_install
-%if %{with lib389}
 pushd src/lib389
 %python_install
 mv %{buildroot}/usr/libexec/dirsrv/dscontainer 
%{buildroot}%{_prefix}/lib/dirsrv/
 rmdir %{buildroot}/usr/libexec/dirsrv/
 popd
-%endif
 
 cp -r man/man3 %{buildroot}%{_mandir}/man3
 
@@ -331,11 +283,6 @@
 #remove libtool archives and static libs
 find %{buildroot} -type f -name "*.la" -delete -print
 
-# make sure perl scripts have a proper shebang
-%if ! %{with lib389}
-sed -i -e 's|#{{PERL-EXEC}}|#!%{_bindir}/perl|' 
%{buildroot}%{_datadir}/%{pkgname}/script-templates/template-*.pl
-%endif
-
 # install extra schema files
 cp -R extra-schema "%{buildroot}/%{_datadir}/dirsrv/"
 
@@ -402,111 +349,20 @@
 %dir %{_sysconfdir}/dirsrv/schema
 %{_libdir}/dirsrv/libns-dshttpd-*.so
 %{_libdir}/dirsrv/librewriters.so
-%if ! %{with lib389}
-%{_libdir}/dirsrv/perl/*.pm
-%endif
 %{_libdir}/dirsrv/plugins/*.so
 %{_libdir}/dirsrv/python/*.py
 %{_libdir}/dirsrv/*.so.*
 %exclude %{_mandir}/man1/ldap-agent*
 %{_mandir}/man1/*
 %{_mandir}/man5/*
-%if %{with lib389}
 %{_mandir}/man8/ns-slapd.8.gz
 %{_mandir}/man8/openldap_to_ds.8.gz
-
-# With lib389 we don't package all the man pages for deprecated commands. 
Upstream needs to remove
-# these from the build with --disable-perl flag set.
-# These are excluded now
-%exclude %{_mandir}/man8/bak2db.8.gz
-%exclude %{_mandir}/man8/bak2db.pl.8.gz
-%exclude %{_mandir}/man8/cleanallruv.pl.8.gz
-%exclude %{_mandir}/man8/db2bak.8.gz
-%exclude %{_mandir}/man8/db2bak.pl.8.gz
-%exclude %{_mandir}/man8/db2index.8.gz
-%exclude %{_mandir}/man8/db2index.pl.8.gz
-%exclude %{_mandir}/man8/db2ldif.8.gz
-%exclude %{_mandir}/man8/db2ldif.pl.8.gz
-%exclude %{_mandir}/man8/dbmon.sh.8.gz
-%exclude %{_mandir}/man8/dbverify.8.gz
-%exclude %{_mandir}/man8/dn2rdn.8.gz
-%exclude %{_mandir}/man8/fixup-linkedattrs.pl.8.gz
-%exclude %{_mandir}/man8/fixup-memberof.pl.8.gz
-%exclude %{_mandir}/man8/ldif2db.8.gz
-%exclude %{_mandir}/man8/ldif2db.pl.8.gz
-%exclude %{_mandir}/man8/ldif2ldap.8.gz
-%exclude %{_mandir}/man8/migrate-ds.pl.8.gz
-%exclude %{_mandir}/man8/monitor.8.gz
-%exclude %{_mandir}/man8/ns-accountstatus.pl.8.gz
-%exclude %{_mandir}/man8/ns-activate.pl.8.gz
-%exclude %{_mandir}/man8/ns-inactivate.pl.8.gz
-%exclude %{_mandir}/man8/ns-newpwpolicy.pl.8.gz
-%exclude %{_mandir}/man8/remove-ds.pl.8.gz
-%exclude %{_mandir}/man8/restart-dirsrv.8.gz
-%exclude %{_mandir}/man8/restoreconfig.8.gz
-%exclude %{_mandir}/man8/saveconfig.8.gz
-%exclude %{_mandir}/man8/schema-reload.pl.8.gz
-%exclude %{_mandir}/man8/setup-ds.pl.8.gz
-%exclude %{_mandir}/man8/start-dirsrv.8.gz
-%exclude %{_mandir}/man8/status-dirsrv.8.gz
-%exclude %{_mandir}/man8/stop-dirsrv.8.gz
-%exclude %{_mandir}/man8/suffix2instance.8.gz
-%exclude %{_mandir}/man8/syntax-validate.pl.8.gz
-%exclude %{_mandir}/man8/upgradedb.8.gz
-%exclude %{_mandir}/man8/upgradednformat.8.gz
-%exclude %{_mandir}/man8/usn-tombstone-cleanup.pl.8.gz
-%exclude %{_mandir}/man8/verify-db.pl.8.gz
-%exclude %{_mandir}/man8/vlvindex.8.gz
-%else
-%{_mandir}/man8/*
-%endif
 %{_bindir}/*
 # TODO: audit bug running https://bugzilla.opensuse.org/show_bug.cgi?id=1111564
 # This also needs a lot more work on the service file
 #attr(750,root,dirsrv) #caps(CAP_NET_BIND_SERVICE=pe) #{_sbindir}/ns-slapd
 %verify(not caps) %attr(755,root,root) %{_sbindir}/ns-slapd
 %{_sbindir}/openldap_to_ds
-%if ! %{with lib389}
-%{_sbindir}/bak2db
-%{_sbindir}/bak2db.pl
-%{_sbindir}/cleanallruv.pl
-%{_sbindir}/db2bak
-%{_sbindir}/db2bak.pl
-%{_sbindir}/db2index
-%{_sbindir}/db2index.pl
-%{_sbindir}/db2ldif
-%{_sbindir}/db2ldif.pl
-%{_sbindir}/dbmon.sh
-%{_sbindir}/dbverify
-%{_sbindir}/dn2rdn
-%{_sbindir}/fixup-linkedattrs.pl
-%{_sbindir}/fixup-memberof.pl
-%{_sbindir}/ldif2db
-%{_sbindir}/ldif2db.pl
-%{_sbindir}/ldif2ldap
-%{_sbindir}/migrate-ds.pl
-%{_sbindir}/monitor
-%{_sbindir}/ns-accountstatus.pl
-%{_sbindir}/ns-activate.pl
-%{_sbindir}/ns-inactivate.pl
-%{_sbindir}/ns-newpwpolicy.pl
-%{_sbindir}/remove-ds.pl
-%{_sbindir}/restart-dirsrv
-%{_sbindir}/restoreconfig
-%{_sbindir}/saveconfig
-%{_sbindir}/schema-reload.pl
-%{_sbindir}/setup-ds.pl
-%{_sbindir}/start-dirsrv
-%{_sbindir}/status-dirsrv
-%{_sbindir}/stop-dirsrv
-%{_sbindir}/suffix2instance
-%{_sbindir}/syntax-validate.pl
-%{_sbindir}/upgradedb
-%{_sbindir}/upgradednformat
-%{_sbindir}/usn-tombstone-cleanup.pl
-%{_sbindir}/verify-db.pl
-%{_sbindir}/vlvindex
-%endif
 %{_unitdir}/[email protected]
 %{_unitdir}/dirsrv.target
 %exclude %{_unitdir}/[email protected]/custom.conf
@@ -549,7 +405,6 @@
 %{_mandir}/man1/ldap-agent.1*
 %{_unitdir}/%{pkgname}-snmp.service
 
-%if %{with lib389}
 %files -n lib389
 %defattr(-,root,root,-)
 %license src/lib389/LICENSE
@@ -565,6 +420,5 @@
 %{_mandir}/man8/dsctl.8.gz
 %{_mandir}/man8/dsidm.8.gz
 /usr/lib/python*/site-packages/lib389*
-%endif
 
 %changelog

++++++ 389-ds-base-1.4.4.8~git0.bf454ad07.tar.bz2 -> 
389-ds-base-2.0.1~git0.b557f5daa.tar.bz2 ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/389-ds-base-1.4.4.8~git0.bf454ad07/Makefile.am 
new/389-ds-base-2.0.1~git0.b557f5daa/Makefile.am
--- old/389-ds-base-1.4.4.8~git0.bf454ad07/Makefile.am  2020-11-08 
04:33:31.000000000 +0100
+++ new/389-ds-base-2.0.1~git0.b557f5daa/Makefile.am    2020-11-03 
19:43:44.000000000 +0100
@@ -1205,6 +1205,14 @@
 
 libslapd_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(SASL_CFLAGS) 
@db_inc@ $(KERBEROS_CFLAGS) $(PCRE_CFLAGS) $(SDS_CPPFLAGS) $(SVRCORE_INCLUDES)
 libslapd_la_LIBADD = $(LDAPSDK_LINK) $(SASL_LINK) $(NSS_LINK) $(NSPR_LINK) 
$(KERBEROS_LIBS) $(PCRE_LIBS) $(THREADLIB) $(SYSTEMD_LIBS) libsds.la 
libsvrcore.la
+# If asan is enabled, it creates special libcrypt interceptors. However, they 
are
+# detected by the first load of libasan at runtime, and what is in the linked 
lib
+# so we need libcrypt to be present as soon as libasan is loaded for the 
interceptors
+# to function. Since ns-slapd links libslapd, this is pulled at startup, which 
allows
+# pwdstorage to be asan checked with libcrypt.
+if enable_asan
+libslapd_la_LIBADD += $(LIBCRYPT)
+endif
 libslapd_la_LDFLAGS = $(AM_LDFLAGS) $(SLAPD_LDFLAGS)
 
 if RUST_ENABLE
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/389-ds-base-1.4.4.8~git0.bf454ad07/VERSION.sh 
new/389-ds-base-2.0.1~git0.b557f5daa/VERSION.sh
--- old/389-ds-base-1.4.4.8~git0.bf454ad07/VERSION.sh   2020-11-08 
04:33:31.000000000 +0100
+++ new/389-ds-base-2.0.1~git0.b557f5daa/VERSION.sh     2020-11-03 
19:43:44.000000000 +0100
@@ -8,9 +8,9 @@
 vendor="389 Project"
 
 # PACKAGE_VERSION is constructed from these
-VERSION_MAJOR=1
-VERSION_MINOR=4
-VERSION_MAINT=4.8
+VERSION_MAJOR=2
+VERSION_MINOR=0
+VERSION_MAINT=1
 # NOTE: VERSION_PREREL is automatically set for builds made out of a git tree
 VERSION_PREREL=
 VERSION_DATE=$(date -u +%Y%m%d)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/389-ds-base-1.4.4.8~git0.bf454ad07/configure.ac 
new/389-ds-base-2.0.1~git0.b557f5daa/configure.ac
--- old/389-ds-base-1.4.4.8~git0.bf454ad07/configure.ac 2020-11-08 
04:33:31.000000000 +0100
+++ new/389-ds-base-2.0.1~git0.b557f5daa/configure.ac   2020-11-03 
19:43:44.000000000 +0100
@@ -164,7 +164,7 @@
 fi
 AC_SUBST([asan_cflags])
 AC_SUBST([asan_rust_defs])
-AM_CONDITIONAL(enable_asan,test "$enable_asan" = "yes")
+AM_CONDITIONAL(enable_asan,[test "$enable_asan" = yes])
 
 AC_MSG_CHECKING(for --enable-msan)
 AC_ARG_ENABLE(msan, AS_HELP_STRING([--enable-msan], [Enable gcc/clang memory 
sanitizer options (default: no)]),
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-1.4.4.8~git0.bf454ad07/dirsrvtests/tests/suites/clu/schema_test.py
 
new/389-ds-base-2.0.1~git0.b557f5daa/dirsrvtests/tests/suites/clu/schema_test.py
--- 
old/389-ds-base-1.4.4.8~git0.bf454ad07/dirsrvtests/tests/suites/clu/schema_test.py
  2020-11-08 04:33:31.000000000 +0100
+++ 
new/389-ds-base-2.0.1~git0.b557f5daa/dirsrvtests/tests/suites/clu/schema_test.py
    1970-01-01 01:00:00.000000000 +0100
@@ -1,61 +0,0 @@
-import logging
-import pytest
-import os
-from lib389.topologies import topology_st as topo
-from lib389.schema import Schema
-
-log = logging.getLogger(__name__)
-
-
-def test_origins_with_extra_parenthesis(topo):
-    """Test the custom schema with extra parenthesis in X-ORIGIN can be parsed
-    into JSON
-
-    :id: 4230f83b-0dc3-4bc4-a7a8-5ab0826a4f05
-    :setup: Standalone Instance
-    :steps:
-        1. Add attribute with X-ORIGIN that contains extra parenthesis
-        2. Querying for that attribute with JSON flag
-    :expectedresults:
-        1. Success
-        2. Success
-    """
-
-    ATTR_NAME = 'testAttribute'
-    X_ORG_VAL = 'test (TEST)'
-    schema = Schema(topo.standalone)
-
-    # Add new attribute
-    parameters = {
-        'names': [ATTR_NAME],
-        'oid': '1.1.1.1.1.1.1.22222',
-        'desc': 'Test extra parenthesis in X-ORIGIN',
-        'x_origin': X_ORG_VAL,
-        'syntax': '1.3.6.1.4.1.1466.115.121.1.15',
-        'syntax_len': None,
-        'x_ordered': None,
-        'collective': None,
-        'obsolete': None,
-        'single_value': None,
-        'no_user_mod': None,
-        'equality': None,
-        'substr': None,
-        'ordering': None,
-        'usage': None,
-        'sup': None
-    }
-    schema.add_attributetype(parameters)
-
-    # Search for attribute with JSON option
-    attr_result = schema.query_attributetype(ATTR_NAME, json=True)
-
-    # Verify the x-origin value is correct
-    assert attr_result['at']['x_origin'][0] == X_ORG_VAL
-
-
-if __name__ == '__main__':
-    # Run isolated
-    # -s for DEBUG mode
-    CURRENT_FILE = os.path.realpath(__file__)
-    pytest.main(["-s", CURRENT_FILE])
-
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-1.4.4.8~git0.bf454ad07/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
 
new/389-ds-base-2.0.1~git0.b557f5daa/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
--- 
old/389-ds-base-1.4.4.8~git0.bf454ad07/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
     2020-11-08 04:33:31.000000000 +0100
+++ 
new/389-ds-base-2.0.1~git0.b557f5daa/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
       2020-11-03 19:43:44.000000000 +0100
@@ -20,6 +20,7 @@
 from lib389.utils import ds_is_older, ds_is_newer
 import ldap
 import glob
+import re
 
 pytestmark = pytest.mark.tier1
 
@@ -774,7 +775,7 @@
     assert not invalid_etime
 
 
[email protected](ds_is_older('1.3.10.1'), reason="Fail because of bug 
1749236")
[email protected](ds_is_older('1.3.10.1', '1.4.1'), reason="Fail because of 
bug 1749236")
 @pytest.mark.bz1749236
 def test_etime_order_of_magnitude(topology_st, clean_access_logs, 
remove_users, disable_access_log_buffering):
     """Test that the etime reported in the access log has a correct order of 
magnitude
@@ -835,21 +836,101 @@
     assert len(result_str) > 0
 
     # The result_str returned looks like :
-    # [23/Apr/2020:06:06:14.366429900 -0400] conn=1 op=93 RESULT err=0 tag=101 
nentries=30 etime=0.005723017
-
+    # For ds older than 1.4.3.8: [23/Apr/2020:06:06:14.366429900 -0400] conn=1 
op=93 RESULT err=0 tag=101 nentries=30 etime=0.005723017
+    # For ds newer than 1.4.3.8: [21/Oct/2020:09:27:50.095209871 -0400] conn=1 
op=96 RESULT err=0 tag=101 nentries=30 wtime=0.000412584 optime=0.005428971 
etime=0.005836077
+    
     log.info('get the operation end time from the RESULT string')
     # Here we are getting the sec.nanosec part of the date, '14.366429900' in 
the above example
     end_time = (result_str.split()[0]).split(':')[3]
 
     log.info('get the logged etime for the operation from the RESULT string')
     # Here we are getting the etime value, '0.005723017' in the example above
-    etime = result_str.split()[8].split('=')[1][:-3]
+    if ds_is_older('1.4.3.8'):
+        etime = result_str.split()[8].split('=')[1][:-3]
+    else:
+        etime = result_str.split()[10].split('=')[1][:-3]
 
     log.info('Calculate the ratio between logged etime for the operation and 
elapsed time from its start time to its end time - should be around 1')
     etime_ratio = (Decimal(end_time) - Decimal(start_time)) // Decimal(etime)
     assert etime_ratio <= 1
 
 
[email protected](ds_is_older('1.4.3.8'), reason="Fail because of bug 
1850275")
[email protected]
+def test_optime_and_wtime_keywords(topology_st, clean_access_logs, 
remove_users, disable_access_log_buffering):
+    """Test that the new optime and wtime keywords are present in the access 
log and have correct values
+
+    :id: dfb4a49d-1cfc-400e-ba43-c107f58d62cf
+    :setup: Standalone instance
+    :steps:
+         1. Unset log buffering for the access log
+         2. Delete potential existing access logs
+         3. Add users
+         4. Search users
+         5. Parse the access log looking for the SRCH operation log
+         6. From the SRCH string get the op number of the operation
+         7. From the op num find the associated RESULT string in the access log
+         8. Search for the wtime optime keywords in the RESULT string
+         9. From the RESULT string get the wtime, optime and etime values for 
the operation
+         10. Check that optime + wtime is approximatively etime
+    :expectedresults:
+         1. access log buffering is off
+         2. Previously existing access logs are deleted
+         3. Users are successfully added
+         4. Search operation is successful
+         5. SRCH operation log string is catched
+         6. op number is collected
+         7. RESULT string is catched from the access log
+         8. wtime and optime keywords are collected
+         9. wtime, optime and etime values are collected
+         10. (optime + wtime) =~ etime
+    """
+
+    log.info('add_users')
+    add_users(topology_st.standalone, 30)
+
+    log.info ('search users')
+    search_users(topology_st.standalone)
+
+    log.info('parse the access logs to get the SRCH string')
+    # Here we are looking at the whole string logged for the search request 
with base ou=People,dc=example,dc=com
+    search_str = str(topology_st.standalone.ds_access_log.match(r'.*SRCH 
base="ou=People,dc=example,dc=com.*'))[1:-1]
+    assert len(search_str) > 0
+
+    # the search_str returned looks like :
+    # [22/Oct/2020:09:47:11.951316798 -0400] conn=1 op=96 SRCH 
base="ou=People,dc=example,dc=com" scope=2 
filter="(&(objectClass=account)(objectClass=posixaccount)(objectClass=inetOrgPerson)(objectClass=organizationalPerson))"
 attrs="distinguishedName"
+
+    log.info('get the OP number from the SRCH string')
+    # Here we are getting the op number, 'op=96' in the above example
+    op_num = search_str.split()[3]
+
+    log.info('get the RESULT string matching the SRCH op number')
+    # Here we are looking at the RESULT string for the above search op, 
'op=96' in this example
+    result_str = str(topology_st.standalone.ds_access_log.match(r'.*{} 
RESULT*'.format(op_num)))[1:-1]
+    assert len(result_str) > 0
+
+    # The result_str returned looks like :
+    # [22/Oct/2020:09:47:11.963276018 -0400] conn=1 op=96 RESULT err=0 tag=101 
nentries=30 wtime=0.000180294 optime=0.011966632 etime=0.012141311
+    log.info('Search for the wtime keyword in the RESULT string')
+    assert re.search('wtime', result_str)
+
+    log.info('get the wtime value from the RESULT string')
+    wtime_value = result_str.split()[8].split('=')[1][:-3]
+
+    log.info('Search for the optime keyword in the RESULT string')
+    assert re.search('optime', result_str)
+
+    log.info('get the optime value from the RESULT string')
+    optime_value = result_str.split()[9].split('=')[1][:-3]
+
+    log.info('get the etime value from the RESULT string')
+    etime_value = result_str.split()[10].split('=')[1][:-3]
+
+    log.info('Check that (wtime + optime) is approximately equal to etime i.e. 
their ratio is 1')
+    etime_ratio = (Decimal(wtime_value) + Decimal(optime_value)) // 
Decimal(etime_value)
+    assert etime_ratio == 1
+
+
 @pytest.mark.xfail(ds_is_older('1.3.10.1'), reason="May fail because of bug 
1662461")
 @pytest.mark.bz1662461
 @pytest.mark.ds50428
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-1.4.4.8~git0.bf454ad07/dirsrvtests/tests/suites/mapping_tree/acceptance_test.py
 
new/389-ds-base-2.0.1~git0.b557f5daa/dirsrvtests/tests/suites/mapping_tree/acceptance_test.py
--- 
old/389-ds-base-1.4.4.8~git0.bf454ad07/dirsrvtests/tests/suites/mapping_tree/acceptance_test.py
     1970-01-01 01:00:00.000000000 +0100
+++ 
new/389-ds-base-2.0.1~git0.b557f5daa/dirsrvtests/tests/suites/mapping_tree/acceptance_test.py
       2020-11-03 19:43:44.000000000 +0100
@@ -0,0 +1,65 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2020 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import ldap
+import logging
+import pytest
+import os
+from lib389._constants import *
+from lib389.topologies import topology_st as topo
+from lib389.mappingTree import MappingTrees
+
+DEBUGGING = os.getenv("DEBUGGING", default=False)
+if DEBUGGING:
+    logging.getLogger(__name__).setLevel(logging.DEBUG)
+else:
+    logging.getLogger(__name__).setLevel(logging.INFO)
+log = logging.getLogger(__name__)
+
+
+def test_invalid_mt(topo):
+    """Test that you can not add a new suffix/mapping tree
+    that does not already have the backend entry created.
+
+    :id: caabd407-f541-4695-b13f-8f92af1112a0
+    :setup: Standalone Instance
+    :steps:
+        1. Create a new suffix that specifies an existing backend which has a
+           different suffix.
+        2. Create a suffix that has no backend entry at all.
+    :expectedresults:
+        1. Should fail with UNWILLING_TO_PERFORM
+        1. Should fail with UNWILLING_TO_PERFORM
+    """
+
+    bad_suffix = 'dc=does,dc=not,dc=exist'
+    mts = MappingTrees(topo.standalone)
+    
+    properties = {
+        'cn': bad_suffix,
+        'nsslapd-state': 'backend',
+        'nsslapd-backend': 'userroot',
+    }
+    with pytest.raises(ldap.UNWILLING_TO_PERFORM):
+        mts.create(properties=properties)
+
+    properties = {
+        'cn': bad_suffix,
+        'nsslapd-state': 'backend',
+        'nsslapd-backend': 'notCreatedRoot',
+    }
+    with pytest.raises(ldap.UNWILLING_TO_PERFORM):
+        mts.create(properties=properties)
+
+
+if __name__ == '__main__':
+    # Run isolated
+    # -s for DEBUG mode
+    CURRENT_FILE = os.path.realpath(__file__)
+    pytest.main(["-s", CURRENT_FILE])
+
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-1.4.4.8~git0.bf454ad07/dirsrvtests/tests/suites/openldap_2_389/password_migrate_test.py
 
new/389-ds-base-2.0.1~git0.b557f5daa/dirsrvtests/tests/suites/openldap_2_389/password_migrate_test.py
--- 
old/389-ds-base-1.4.4.8~git0.bf454ad07/dirsrvtests/tests/suites/openldap_2_389/password_migrate_test.py
     1970-01-01 01:00:00.000000000 +0100
+++ 
new/389-ds-base-2.0.1~git0.b557f5daa/dirsrvtests/tests/suites/openldap_2_389/password_migrate_test.py
       2020-11-03 19:43:44.000000000 +0100
@@ -0,0 +1,73 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2020 William Brown <[email protected]>
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import pytest
+import os
+from lib389.topologies import topology_st
+from lib389.utils import ds_is_older
+from lib389.idm.user import nsUserAccounts
+from lib389._constants import DEFAULT_SUFFIX
+
+pytestmark = pytest.mark.tier1
+
[email protected](ds_is_older('1.4.3'), reason="Not implemented")
+def test_migrate_openldap_password_hash(topology_st):
+    """Test import of an openldap password value into the directory and assert
+    it can bind.
+
+    :id: e4898e0d-5d18-4765-9249-84bcbf862fde
+    :setup: Standalone Instance
+    :steps:
+        1. Import a hash
+        2. Attempt a bind
+        3. Goto 1
+
+    :expectedresults:
+        1. Success
+        2. Success
+        3. Success
+    """
+    inst = topology_st.standalone
+    inst.config.set('nsslapd-allow-hashed-passwords', 'on')
+
+    # You generate these with:
+    # slappasswd -s password -o module-load=/usr/lib64/openldap/pw-argon2.so 
-h {ARGON2}
+    pwds = [
+        '{CRYPT}ZZKRwXSu3tt8s',
+        '{SSHA}jdALDtX0+MVMkRsX0ilHz0O6Uos95D4s',
+        '{MD5}X03MO1qnZdYdgyfeuILPmQ==',
+        '{SMD5}RnexgcsjdBHMQ1yhB7+sD+a+qDI=',
+        '{SHA}W6ph5Mm5Pz8GgiULbPgzG37mj9g=',
+        '{SHA256}XohImNooBHFR0OVvjcYpJ3NgPQ1qq73WKhHvch0VQtg=',
+        '{SSHA256}covFryM35UrKB3gMYxtYpQYTHbTn5kFphjcNHewfj581SLJwjA9jew==',
+        
'{SHA384}qLZLq9CsqRpZvbt3YbQh1PK7OCgNOnW6DyHyvrxFWD1EbFmGYMlM5oDEfRnDB4On',
+        
'{SSHA384}kNjTWdmyy2G1IgJF8WrOpq0N//Yc2Ec5TIQYceuiuHQXRXpC1bfnMqyOx0NxrSREjBWDwUpqXjo=',
+        
'{SHA512}sQnzu7wkTrgkQZF+0G1hi5AI3Qmzvv0bXgc5THBqi7mAsdd4Xll27ASbRt9fEyavWi6m0QP9B8lThf+rDKy8hg==',
+        
'{SSHA512}+7A8kA32q4mCBao4Cbatdyzl5imVwJ62ZAE7UOTP4pfrF90E9R2LabOfJFzx6guaYhTmUEVK2wRKC8bToqspdeTluX2d1BX2',
+        # Need to check --
+        # '{PBKDF2}10000$IlfapjA351LuDSwYC0IQ8Q$saHqQTuYnjJN/tmAndT.8mJt.6w',
+        # 
'{PBKDF2-SHA1}10000$ZBEH6B07rgQpJSikyvMU2w$TAA03a5IYkz1QlPsbJKvUsTqNV',
+        # 
'{PBKDF2-SHA256}10000$henZGfPWw79Cs8ORDeVNrQ$1dTJy73v6n3bnTmTZFghxHXHLsAzKaAy8SksDfZBPIw',
+        # 
'{PBKDF2-SHA512}10000$Je1Uw19Bfv5lArzZ6V3EPw$g4T/1sqBUYWl9o93MVnyQ/8zKGSkPbKaXXsT8WmysXQJhWy8MRP2JFudSL.N9RklQYgDPxPjnfum/F2f/TrppA',
+        # 
'{ARGON2}$argon2id$v=19$m=65536,t=2,p=1$IyTQMsvzB2JHDiWx8fq7Ew$VhYOA7AL0kbRXI5g2kOyyp8St1epkNj7WZyUY4pAIQQ',
+    ]
+
+    accounts = nsUserAccounts(inst, basedn=DEFAULT_SUFFIX)
+    account = accounts.create(properties={
+        'uid': 'pw_migrate_test_user',
+        'cn': 'pw_migrate_test_user',
+        'displayName': 'pw_migrate_test_user',
+        'uidNumber': '12345',
+        'gidNumber': '12345',
+        'homeDirectory': '/var/empty',
+    })
+
+    for pwhash in pwds:
+        inst.log.debug(f"Attempting -> {pwhash}")
+        account.set('userPassword', pwhash)
+        nconn = account.bind('password')
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-1.4.4.8~git0.bf454ad07/dirsrvtests/tests/suites/replication/regression_test.py
 
new/389-ds-base-2.0.1~git0.b557f5daa/dirsrvtests/tests/suites/replication/regression_test.py
--- 
old/389-ds-base-1.4.4.8~git0.bf454ad07/dirsrvtests/tests/suites/replication/regression_test.py
      2020-11-08 04:33:31.000000000 +0100
+++ 
new/389-ds-base-2.0.1~git0.b557f5daa/dirsrvtests/tests/suites/replication/regression_test.py
        2020-11-03 19:43:44.000000000 +0100
@@ -109,6 +109,30 @@
         for dn, entry in ldif_list:
             ldif_writer.unparse(dn, entry)
 
+def _remove_replication_data(ldif_file):
+    """ Remove the replication data from ldif file:
+        db2lif without -r includes some of the replica data like 
+        - nsUniqueId
+        - keepalive entries
+        This function filters the ldif fil to remove these data
+    """
+
+    with open(ldif_file) as f:
+        parser = ldif.LDIFRecordList(f)
+        parser.parse()
+
+        ldif_list = parser.all_records
+        # Iterate on a copy of the ldif entry list
+        for dn, entry in ldif_list[:]:
+            if dn.startswith('cn=repl keep alive'):
+                ldif_list.remove((dn,entry))
+            else:
+                entry.pop('nsUniqueId')
+    with open(ldif_file, 'w') as f:
+        ldif_writer = ldif.LDIFWriter(f)
+        for dn, entry in ldif_list:
+            ldif_writer.unparse(dn, entry)
+
 
 @pytest.fixture(scope="module")
 def topo_with_sigkill(request):
@@ -924,6 +948,112 @@
     assert len(m1entries) == len(m2entries)
 
 
+def get_keepalive_entries(instance,replica):
+    # Returns the keep alive entries that exists with the suffix of the server 
instance
+    try:
+        entries = instance.search_s(replica.get_suffix(), ldap.SCOPE_ONELEVEL,
+                    "(&(objectclass=ldapsubentry)(cn=repl keep alive*))",
+                    ['cn', 'nsUniqueId', 'modifierTimestamp'])
+    except ldap.LDAPError as e:
+        log.fatal('Failed to retrieve keepalive entry (%s) on instance %s: 
error %s' % (dn, instance, str(e)))
+        assert False
+    # No error, so lets log the keepalive entries
+    if log.isEnabledFor(logging.DEBUG):
+        for ret in entries:
+            log.debug("Found keepalive entry:\n"+str(ret));
+    return entries
+
+def verify_keepalive_entries(topo, expected):
+    #Check that keep alive entries exists (or not exists) for every masters on 
every masters
+    #Note: The testing method is quite basic: counting that there is one 
keepalive entry per master.
+    # that is ok for simple test cases like 
test_online_init_should_create_keepalive_entries but
+    # not for the general case as keep alive associated with no more existing 
master may exists
+    # (for example after: db2ldif / demote a master / ldif2db / init other 
masters)
+    # ==> if the function is somehow pushed in lib389, a check better than 
simply counting the entries
+    # should be done.
+    for masterId in topo.ms:
+        master=topo.ms[masterId]
+        for replica in Replicas(master).list():
+            if (replica.get_role() != ReplicaRole.MASTER):
+               continue
+            replica_info = f'master: {masterId} RID: {replica.get_rid()} 
suffix: {replica.get_suffix()}'
+            log.debug(f'Checking keepAliveEntries on {replica_info}')
+            keepaliveEntries = get_keepalive_entries(master, replica);
+            expectedCount = len(topo.ms) if expected else 0
+            foundCount = len(keepaliveEntries)
+            if (foundCount == expectedCount):
+                log.debug(f'Found {foundCount} keepalive entries as expected 
on {replica_info}.')
+            else:
+                log.error(f'{foundCount} Keepalive entries are found '
+                          f'while {expectedCount} were expected on 
{replica_info}.')
+                assert False
+
+
+def test_online_init_should_create_keepalive_entries(topo_m2):
+    """Check that keep alive entries are created when initializinf a master 
from another one
+
+    :id: d5940e71-d18a-4b71-aaf7-b9185361fffe
+    :setup: Two masters replication setup
+    :steps:
+        1. Generate ldif without replication data
+        2  Init both masters from that ldif
+        3  Check that keep alive entries does not exists
+        4  Perform on line init of master2 from master1
+        5  Check that keep alive entries exists
+    :expectedresults:
+        1. No error while generating ldif
+        2. No error while importing the ldif file
+        3. No keepalive entrie should exists on any masters
+        4. No error while initializing master2
+        5. All keepalive entries should exist on every masters
+
+    """
+
+    repl = ReplicationManager(DEFAULT_SUFFIX)
+    m1 = topo_m2.ms["master1"]
+    m2 = topo_m2.ms["master2"]
+    # Step 1: Generate ldif without replication data
+    m1.stop()
+    m2.stop()
+    ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir()
+    m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
+               excludeSuffixes=None, repl_data=False,
+               outputfile=ldif_file, encrypt=False)
+    # Remove replication metadata that are still in the ldif
+    _remove_replication_data(ldif_file)
+
+    # Step 2: Init both masters from that ldif
+    m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+    m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
+    m1.start()
+    m2.start()
+
+    """ Replica state is now as if CLI setup has been done using:
+        dsconf master1 replication enable --suffix "${SUFFIX}" --role master
+        dsconf master2 replication enable --suffix "${SUFFIX}" --role master
+        dsconf master1 replication create-manager --name 
"${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
+        dsconf master2 replication create-manager --name 
"${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}"
+        dsconf master1 repl-agmt create --suffix "${SUFFIX}"
+        dsconf master2 repl-agmt create --suffix "${SUFFIX}"
+    """
+
+    # Step 3: No keepalive entrie should exists on any masters
+    verify_keepalive_entries(topo_m2, False)
+
+    # Step 4: Perform on line init of master2 from master1
+    agmt = Agreements(m1).list()[0]
+    agmt.begin_reinit()
+    (done, error) = agmt.wait_reinit()
+    assert done is True
+    assert error is False
+
+    # Step 5: All keepalive entries should exists on every masters
+    #  Verify the keep alive entry once replication is in sync
+    # (that is the step that fails when bug is not fixed)
+    repl.wait_for_ruv(m2,m1)
+    verify_keepalive_entries(topo_m2, True);
+
+
 if __name__ == '__main__':
     # Run isolated
     # -s for DEBUG mode
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-1.4.4.8~git0.bf454ad07/ldap/servers/plugins/chainingdb/cb_instance.c
 
new/389-ds-base-2.0.1~git0.b557f5daa/ldap/servers/plugins/chainingdb/cb_instance.c
--- 
old/389-ds-base-1.4.4.8~git0.bf454ad07/ldap/servers/plugins/chainingdb/cb_instance.c
        2020-11-08 04:33:31.000000000 +0100
+++ 
new/389-ds-base-2.0.1~git0.b557f5daa/ldap/servers/plugins/chainingdb/cb_instance.c
  2020-11-03 19:43:44.000000000 +0100
@@ -329,7 +329,7 @@
         }
     }
     *returncode = rc;
-    return ((LDAP_SUCCESS == rc) ? 1 : -1);
+    return ((LDAP_SUCCESS == rc) ? SLAPI_DSE_CALLBACK_OK : 
SLAPI_DSE_CALLBACK_ERROR);
 }
 
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-1.4.4.8~git0.bf454ad07/ldap/servers/plugins/replication/repl5_replica.c
 
new/389-ds-base-2.0.1~git0.b557f5daa/ldap/servers/plugins/replication/repl5_replica.c
--- 
old/389-ds-base-1.4.4.8~git0.bf454ad07/ldap/servers/plugins/replication/repl5_replica.c
     2020-11-08 04:33:31.000000000 +0100
+++ 
new/389-ds-base-2.0.1~git0.b557f5daa/ldap/servers/plugins/replication/repl5_replica.c
       2020-11-03 19:43:44.000000000 +0100
@@ -386,6 +386,20 @@
     slapi_ch_free((void **)arg);
 }
 
+/******************************************************************************
+ ******************** REPLICATION KEEP ALIVE ENTRIES **************************
+ ******************************************************************************
+ * They are subentries of the replicated suffix and there is one per master.  *
+ * These entries exist only to trigger a change that get replicated over the  *
+ * topology.                                                                  *
+ * Their main purpose is to generate records in the changelog and they are    *
+ * updated from time to time by fractional replication to insure that at      *
+ * least a change must be replicated by FR after a great number of not        *
+ * replicated changes are found in the changelog. The interest is that the    *
+ * fractional RUV get then updated so less changes need to be walked in the   *
+ * changelog when searching for the first change to send                      *
+ 
******************************************************************************/
+
 #define KEEP_ALIVE_ATTR "keepalivetimestamp"
 #define KEEP_ALIVE_ENTRY "repl keep alive"
 #define KEEP_ALIVE_DN_FORMAT "cn=%s %d,%s"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-1.4.4.8~git0.bf454ad07/ldap/servers/plugins/replication/repl_extop.c
 
new/389-ds-base-2.0.1~git0.b557f5daa/ldap/servers/plugins/replication/repl_extop.c
--- 
old/389-ds-base-1.4.4.8~git0.bf454ad07/ldap/servers/plugins/replication/repl_extop.c
        2020-11-08 04:33:31.000000000 +0100
+++ 
new/389-ds-base-2.0.1~git0.b557f5daa/ldap/servers/plugins/replication/repl_extop.c
  2020-11-03 19:43:44.000000000 +0100
@@ -1175,6 +1175,10 @@
                 */
                 if (replica_is_flag_set(r, REPLICA_LOG_CHANGES) && 
cldb_is_open(r)) {
                     replica_log_ruv_elements(r);
+                    /* now that the changelog is open and started, we can alos 
cretae the
+                     * keep alive entry without risk that db and cl will not 
match
+                     */
+                    replica_subentry_check(replica_get_root(r), 
replica_get_rid(r));
                 }
 
                 /* ONREPL code that dealt with new RUV, etc was moved into the 
code
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-1.4.4.8~git0.bf454ad07/ldap/servers/slapd/configdse.c 
new/389-ds-base-2.0.1~git0.b557f5daa/ldap/servers/slapd/configdse.c
--- old/389-ds-base-1.4.4.8~git0.bf454ad07/ldap/servers/slapd/configdse.c       
2020-11-08 04:33:31.000000000 +0100
+++ new/389-ds-base-2.0.1~git0.b557f5daa/ldap/servers/slapd/configdse.c 
2020-11-03 19:43:44.000000000 +0100
@@ -536,7 +536,7 @@
     }
 
     *returncode = LDAP_SUCCESS;
-    return *returncode;
+    return SLAPI_DSE_CALLBACK_OK;
 }
 
 static int
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-1.4.4.8~git0.bf454ad07/ldap/servers/slapd/dse.c 
new/389-ds-base-2.0.1~git0.b557f5daa/ldap/servers/slapd/dse.c
--- old/389-ds-base-1.4.4.8~git0.bf454ad07/ldap/servers/slapd/dse.c     
2020-11-08 04:33:31.000000000 +0100
+++ new/389-ds-base-2.0.1~git0.b557f5daa/ldap/servers/slapd/dse.c       
2020-11-03 19:43:44.000000000 +0100
@@ -1866,31 +1866,33 @@
     /* give the dse callbacks the first crack at the modify */
     rc = dse_call_callback(pdse, pb, SLAPI_OPERATION_MODIFY, DSE_FLAG_PREOP, 
ec, ecc, &returncode, returntext);
     if (SLAPI_DSE_CALLBACK_OK == rc) {
+        int plugin_rc;
+
         /* next, give the be plugins a crack at it */
         slapi_pblock_set(pb, SLAPI_RESULT_CODE, &returncode);
         slapi_pblock_set(pb, SLAPI_MODIFY_EXISTING_ENTRY, ecc);
-        rc = plugin_call_plugins(pb, SLAPI_PLUGIN_BE_PRE_MODIFY_FN);
+        plugin_rc = plugin_call_plugins(pb, SLAPI_PLUGIN_BE_PRE_MODIFY_FN);
         need_be_postop = 1; /* if the be preops were called, have to call the 
be postops too */
         if (!returncode) {
             slapi_pblock_get(pb, SLAPI_RESULT_CODE, &returncode);
         }
-        if (!rc && !returncode) {
+        if (!plugin_rc && !returncode) {
             /* finally, give the betxn plugins a crack at it */
-            rc = plugin_call_plugins(pb, SLAPI_PLUGIN_BE_TXN_PRE_MODIFY_FN);
+            plugin_rc = plugin_call_plugins(pb, 
SLAPI_PLUGIN_BE_TXN_PRE_MODIFY_FN);
             if (!returncode) {
                 slapi_pblock_get(pb, SLAPI_RESULT_CODE, &returncode);
             }
-            if (rc || returncode) {
+            if (plugin_rc || returncode) {
                 slapi_log_err(SLAPI_DSE_TRACELEVEL,
                               "dse_modify", "SLAPI_PLUGIN_BE_TXN_PRE_MODIFY_FN 
failed - rc %d LDAP error %d:%s\n",
-                              rc, returncode, ldap_err2string(returncode));
+                              plugin_rc, returncode, 
ldap_err2string(returncode));
             }
         } else {
             slapi_log_err(SLAPI_DSE_TRACELEVEL,
                           "dse_modify", "SLAPI_PLUGIN_BE_PRE_MODIFY_FN failed 
- rc %d LDAP error %d:%s\n",
                           rc, returncode, ldap_err2string(returncode));
         }
-        if (rc || returncode) {
+        if (plugin_rc || returncode) {
             char *ldap_result_message = NULL;
             rc = SLAPI_DSE_CALLBACK_ERROR;
             if (!returncode) {
@@ -2060,7 +2062,23 @@
                 slapi_pblock_get(pb, SLAPI_RESULT_CODE, &returncode);
             }
         }
+    } else {
+        /* It should not happen but just be paranoiac, do not
+         * forget to call the postop if needed
+         */
+        if (need_be_postop) {
+            plugin_call_plugins(pb, SLAPI_PLUGIN_BE_TXN_POST_MODIFY_FN);
+            if (!returncode) {
+                slapi_pblock_get(pb, SLAPI_RESULT_CODE, &returncode);
+            }
+
+            plugin_call_plugins(pb, SLAPI_PLUGIN_BE_POST_MODIFY_FN);
+            if (!returncode) {
+                slapi_pblock_get(pb, SLAPI_RESULT_CODE, &returncode);
+            }
+        }
     }
+
     /* time to restore original mods */
     if (original_mods) {
         LDAPMod **mods_from_callback;
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-1.4.4.8~git0.bf454ad07/ldap/servers/slapd/mapping_tree.c 
new/389-ds-base-2.0.1~git0.b557f5daa/ldap/servers/slapd/mapping_tree.c
--- old/389-ds-base-1.4.4.8~git0.bf454ad07/ldap/servers/slapd/mapping_tree.c    
2020-11-08 04:33:31.000000000 +0100
+++ new/389-ds-base-2.0.1~git0.b557f5daa/ldap/servers/slapd/mapping_tree.c      
2020-11-03 19:43:44.000000000 +0100
@@ -569,7 +569,7 @@
  * tree node (guaranteed to be non-NULL).
  */
 static int
-mapping_tree_entry_add(Slapi_Entry *entry, mapping_tree_node **newnodep)
+mapping_tree_entry_add(Slapi_Entry *entry, mapping_tree_node **newnodep, 
PRBool check_be)
 {
     Slapi_DN *subtree = NULL;
     const char *tmp_ndn;
@@ -605,6 +605,37 @@
         return lderr;
     }
 
+    /* Verify there is a matching backend for this suffix */
+    if (check_be) {
+        const char *mt_be_name;
+        char *cookie = NULL;
+        int32_t found_be = 0;
+
+        /* get the backend name for this mapping tree node */
+        mt_be_name = slapi_entry_attr_get_ref(entry, "nsslapd-backend");
+
+        be = slapi_get_first_backend(&cookie);
+        while (be) {
+            char *be_name = slapi_be_get_name(be);
+            if (mt_be_name && be_name &&
+                strcasecmp(be_name, mt_be_name) == 0 &&
+                slapi_sdn_compare(subtree, be->be_suffix) == 0)
+            {
+                found_be = 1;
+                break;
+            }
+            be = (backend *)slapi_get_next_backend(cookie);
+        }
+        slapi_ch_free((void **)&cookie);
+        if (!found_be) {
+            slapi_log_err(SLAPI_LOG_ERR, "mapping_tree_entry_add",
+                     "The subtree %s does not match any existing backends, and 
will not be created.\n",
+                     slapi_sdn_get_dn(subtree));
+            slapi_sdn_free(&subtree);
+            return LDAP_UNWILLING_TO_PERFORM;
+        }
+    }
+
     tmp_ndn = slapi_sdn_get_ndn(subtree);
     if (tmp_ndn && ('\0' == *tmp_ndn)) {
 
@@ -922,7 +953,7 @@
 
     for (x = 0; entries[x] != NULL; x++) {
         mapping_tree_node *child = NULL;
-        if (LDAP_SUCCESS != mapping_tree_entry_add(entries[x], &child)) {
+        if (LDAP_SUCCESS != mapping_tree_entry_add(entries[x], &child, 
PR_FALSE)) {
             slapi_log_err(SLAPI_LOG_ERR, "mapping_tree_node_get_children",
                           "Could not add mapping tree node %s\n",
                           slapi_entry_get_dn(entries[x]));
@@ -1331,7 +1362,7 @@
      * Should the mapping tree stucture change, this  would have to
      * be checked again
      */
-    *returncode = mapping_tree_entry_add(entryBefore, &node);
+    *returncode = mapping_tree_entry_add(entryBefore, &node, PR_TRUE /* Check 
be exists */);
     if (LDAP_SUCCESS != *returncode || !node) {
         return SLAPI_DSE_CALLBACK_ERROR;
     }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-1.4.4.8~git0.bf454ad07/src/lib389/lib389/schema.py 
new/389-ds-base-2.0.1~git0.b557f5daa/src/lib389/lib389/schema.py
--- old/389-ds-base-1.4.4.8~git0.bf454ad07/src/lib389/lib389/schema.py  
2020-11-08 04:33:31.000000000 +0100
+++ new/389-ds-base-2.0.1~git0.b557f5daa/src/lib389/lib389/schema.py    
2020-11-03 19:43:44.000000000 +0100
@@ -142,7 +142,7 @@
                     remainder = obj.split(" X-ORIGIN ")[1]
                     if remainder[:1] == "(":
                         # Have multiple values
-                        end = remainder.rfind(')')
+                        end = remainder.find(')')
                         vals = remainder[1:end]
                         vals = re.findall(X_ORIGIN_REGEX, vals)
                         # For now use the first value, but this should be a 
set (another bug in python-ldap)

++++++ 389-ds-base.obsinfo ++++++
--- /var/tmp/diff_new_pack.YcLlQ7/_old  2020-12-03 18:43:59.922258343 +0100
+++ /var/tmp/diff_new_pack.YcLlQ7/_new  2020-12-03 18:43:59.922258343 +0100
@@ -1,5 +1,5 @@
 name: 389-ds-base
-version: 1.4.4.8~git0.bf454ad07
-mtime: 1604806411
-commit: bf454ad070199d5e8c0a03b5e2505e6f2750e998
+version: 2.0.1~git0.b557f5daa
+mtime: 1604429024
+commit: b557f5daa833b7c43b582c445480e896429f33dd
 

++++++ _service ++++++
--- /var/tmp/diff_new_pack.YcLlQ7/_old  2020-12-03 18:43:59.962258396 +0100
+++ /var/tmp/diff_new_pack.YcLlQ7/_new  2020-12-03 18:43:59.962258396 +0100
@@ -3,8 +3,8 @@
     <param name="url">https://github.com/389ds/389-ds-base.git</param>
     <param name="versionformat">@PARENT_TAG@~git@TAG_OFFSET@.%h</param>
     <param name="scm">git</param>
-    <param name="revision">389-ds-base-1.4.4.8</param>
-    <param name="match-tag">389-ds-base-1.4.4.8</param>
+    <param name="revision">389-ds-base-2.0.1</param>
+    <param name="match-tag">389-ds-base-2.0.1</param>
     <param name="versionrewrite-pattern">389dsbase(.*)</param>
     <param name="versionrewrite-replacement">\1</param>
     <param name="changesgenerate">enable</param>

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.YcLlQ7/_old  2020-12-03 18:43:59.978258417 +0100
+++ /var/tmp/diff_new_pack.YcLlQ7/_new  2020-12-03 18:43:59.978258417 +0100
@@ -5,4 +5,4 @@
                 <param 
name="url">https://pagure.io/forks/firstyear/389-ds-base.git</param>
               <param 
name="changesrevision">52fa3f10591fc102b1e08def13e1e2bf48ecfd2e</param></service><service
 name="tar_scm">
                 <param 
name="url">https://github.com/389ds/389-ds-base.git</param>
-              <param 
name="changesrevision">bf454ad070199d5e8c0a03b5e2505e6f2750e998</param></service></servicedata>
\ No newline at end of file
+              <param 
name="changesrevision">b557f5daa833b7c43b582c445480e896429f33dd</param></service></servicedata>
\ No newline at end of file

++++++ vendor.tar.xz ++++++
_______________________________________________
openSUSE Commits mailing list -- [email protected]
To unsubscribe, email [email protected]
List Netiquette: https://en.opensuse.org/openSUSE:Mailing_list_netiquette
List Archives: 
https://lists.opensuse.org/archives/list/[email protected]

Reply via email to